id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5022403 | <reponame>AskNowQA/VANiLLa
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchtext.data import BucketIterator
import random
import math
import time
import spacy
import numpy as np
from models.attn_model import *
from data.dataset import *
def attn_train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
QnA, QnA_len = batch.QnA
Ans_Sen = batch.Ans_Sen
Ans_Sen = Ans_Sen.permute(1,0)
QnA = QnA.permute(1,0)
optimizer.zero_grad()
output = model(QnA, QnA_len, Ans_Sen)
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = Ans_Sen[1:].contiguous().view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def attn_eval(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
QnA, QnA_len = batch.QnA
Ans_Sen = batch.Ans_Sen
Ans_Sen = Ans_Sen.permute(1,0)
QnA = QnA.permute(1,0)
output = model(QnA, QnA_len, Ans_Sen,0) #turn off teacher forcing
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = Ans_Sen[1:].contiguous().view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def attn_predict(sentence, src_field, trg_field, model, device, max_len = 50):
model.eval()
if isinstance(sentence, str):
nlp = spacy.load('en')
tokens = [token.text.lower() for token in nlp(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
src_len = torch.LongTensor([len(src_indexes)]).to(device)
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor, src_len)
mask = model.create_mask(src_tensor)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device)
for i in range(max_len):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs, mask)
attentions[i] = attention
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attentions[:len(trg_tokens)-1]
| StarcoderdataPython |
8058022 | #!/usr/bin/env python3.6
import argparse
import sys
import os
from datetime import datetime
import subprocess
import shutil
import time
from stat import *
import netCDF4
def timeString2DateTime(time_string):
year = time_string[0:4]
month = time_string[4:6]
day = time_string[6:8]
hour = time_string[8:10]
minute = time_string[10:12]
second = 0
return datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second))
def getMdvTimes(directory, start_time, end_time, filename_debug):
stack = [directory]
times = []
while stack:
directory = stack.pop()
date_of_data = os.path.basename(directory)
year = date_of_data[0:4]
month = date_of_data[4:6]
day = date_of_data[6:8]
# cfrad.20190308_105505.000_to_20190308_105809.000_9355MWA_XXX.nc
for file in os.listdir(directory):
fullname = os.path.join(directory, file)
basename = os.path.basename(file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
if len(file.split("/")[-1]) == 8:
stack.append(fullname)
continue
extension = file.split(".")[-1]
if extension != "mdv":
continue
hour = file[0:2]
minute = file[2:4]
second = file[4:6]
if filename_debug:
print("PROCESSING:", file, "Under directory", directory.split("/")[-1])
this_time = datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second))
if this_time >= start_time and this_time <= end_time:
times.append( this_time )
print("Adding time", this_time.strftime("%Y%m%d %H%M%S"))
times.sort()
return times
def main(arguments):
parser = argparse.ArgumentParser(description="Archive MRRD data to a new location.",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('start_time', help= "A required start time YYYYMMDDHHMM", type=str)
parser.add_argument('end_time', help= "A required end time YYYYMMDDHHMM", type=str)
parser.add_argument('CIDD_pfile', help= "A required CIDD parameter file name.", type=str)
parser.add_argument('-source_directory', help= "Path to the data. Defaults to /home/nowcast/data/lakevic/mdv/satellite/meteosat-11.", type=str, default="/home/nowcast/data/lakevic/mdv/satellite/meteosat-11")
parser.add_argument('-debug', help= "Turn on debug messages", action="store_true")
parser.add_argument('-filename_debug', help= "Prints file names that are found processed.", action="store_true")
args = parser.parse_args()
if not os.path.exists(args.source_directory):
print("ERROR: No Such directory", args.source_directory)
sys.exit(-1)
start_time = timeString2DateTime(args.start_time)
end_time = timeString2DateTime(args.end_time)
if args.debug:
print("Archive start time:", start_time.strftime("%Y%m%d %H:%M"))
print("Archive end time:", end_time.strftime("%Y%m%d %H:%M"))
os.chdir(args.source_directory)
if args.debug:
print()
print("chdir", args.source_directory)
if args.debug:
print("Compiling list of mdv files found for this time range.")
print()
mdv_times = getMdvTimes(args.source_directory, start_time, end_time, args.filename_debug)
os.chdir(os.environ["DISPLAY_HOME"] + "/params")
os.environ["DISPLAY"] = ":99"
for this_time in mdv_times:
# dump the image
print()
print("CIDD -p", args.CIDD_pfile, "-t", this_time.strftime("%Y%m%d%H%M"))
subprocess.call(["CIDD", "-p", args.CIDD_pfile, "-t", this_time.strftime("%Y%m%d%H%M")])
time.sleep(5)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
254463 | from flask import Flask, request, make_response, render_template, redirect
import sqlite3
import secrets
import hashlib
import re
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
def createSessionAuthenticated(userName):
h = hashlib.sha512()
h.update(str.encode(userName))
sid = h.hexdigest()
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("INSERT OR REPLACE INTO sessions VALUES (:sid, (SELECT datetime('now','+1 hour')), :userName);", {"sid": sid, "userName": userName})
db.commit()
db.close()
return (sid, 3600)
def removeSession(sessionID):
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("DELETE FROM sessions WHERE sessionID = :sid;", {"sid": sessionID})
db.commit()
db.close()
return ("", 0)
@app.before_request
def removeSessionsExpired():
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("DELETE FROM sessions WHERE expiresAfter < (SELECT datetime('now'));")
db.commit()
db.close()
def createUser(userName, password):
salt = secrets.token_hex(32)
h = hashlib.sha512()
h.update(str.encode(salt))
h.update(str.encode(password))
hash = h.hexdigest()
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
try:
c.execute("INSERT INTO users VALUES (:userName, :salt, :hash);", {"userName": userName, "salt": salt, "hash": hash})
except sqlite3.IntegrityError: # username already exists
db.close()
return False
db.commit()
db.close()
return True
def getSession(request):
sessionCookie = request.cookies.get("session")
if sessionCookie == None:
return None
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("UPDATE sessions SET expiresAfter = (SELECT datetime('now','+1 hour')) WHERE sessionID = :sid;", {"sid": sessionCookie})
db.commit()
c.execute("SELECT sessionID, strftime('%s', expiresAfter) - strftime('%s','now') as max_age, userName FROM sessions WHERE sessionID = :sid;", {"sid": sessionCookie})
session = c.fetchone()
db.close()
return session
def auth(userName, password):
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT salt, hash FROM users WHERE userName = :userName;", {"userName": userName})
r = c.fetchone()
db.close()
if r == None:
return False # unknown user name
h = hashlib.sha512()
h.update(str.encode(r[0])) # salt
h.update(str.encode(password))
hash = h.hexdigest()
return r[1] == hash
def login(userName, password):
if auth(userName, password):
return createSessionAuthenticated(userName)
return None
def vote(user, voteID, votedYes):
if getPoll(voteID) == None:
return False
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
try:
c.execute("INSERT INTO votes VALUES (:pollID, :userName, :votedYes);", {"pollID": voteID, "userName": user, "votedYes": votedYes})
except sqlite3.IntegrityError: # already voted
db.close()
return False
db.commit()
db.close()
return True
def getPoll(pollID):
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT pollID, title, description, creator, creatorsNotes FROM polls WHERE pollID = :id;", {"id": pollID})
poll = c.fetchone()
db.close()
return poll
def createPoll(user, title, description, notes):
# get ID for new poll
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT count(*) + 1 FROM polls;")
pollID = c.fetchone()[0]
# create poll
c.execute("INSERT INTO polls VALUES (:id, :title, :description, :creator, :creatorsNotes);",
{"id": pollID, "title": title, "description": description, "creator": user, "creatorsNotes": notes})
db.commit()
db.close()
# return pollID
return pollID
def getVotes(pollID):
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT count(*) FROM votes WHERE pollID = :id AND votedYes = :yes;", {"id": pollID, "yes": True})
votesYes = c.fetchone()
c.execute("SELECT count(*) FROM votes WHERE pollID = :id AND votedYes = 0;", {"id": pollID})
votesNo = c.fetchone()
db.close()
return (votesYes[0], votesNo[0])
def votedYes(pollID, username):
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT votedYes FROM votes WHERE pollID = :id AND userName = :username;", {"id": pollID, "username": username})
userVotedYes = c.fetchone()
db.close()
if userVotedYes is None:
return None
return userVotedYes[0]
def initDB():
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("CREATE TABLE IF NOT EXISTS sessions (sessionID TEXT NOT NULL UNIQUE, expiresAfter TEXT NOT NULL, userName TEXT NOT NULL, PRIMARY KEY(sessionID));")
c.execute("CREATE TABLE IF NOT EXISTS users (userName TEXT NOT NULL UNIQUE, salt TEXT NOT NULL, hash TEXT NOT NULL, PRIMARY KEY(userName));")
c.execute("CREATE TABLE IF NOT EXISTS polls (pollID INTEGER NOT NULL UNIQUE, title TEXT NOT NULL, description TEXT NOT NULL, \
creator TEXT NOT NULL, creatorsNotes TEXT, PRIMARY KEY(pollID));")
c.execute("CREATE TABLE IF NOT EXISTS votes (pollID INTEGER NOT NULL, userName TEXT NOT NULL, votedYes INTEGER NOT NULL, PRIMARY KEY(pollID, userName));")
db.commit()
# add some initial data if tables are empty
c.execute("SELECT count(*) FROM polls;")
if c.fetchone()[0] == 0:
users = ["Jade", "Sara", "Andrew", "Emma", "Cole", "Reece"]
polls = [("Party Hard 🥳", "Vote yes 👍 for a state-aided 24/7 party with free drinks and food in all major cities. Improve society!"),
("Ban Annoying Selfies 🤳", "Selfies where invented by the devil 👿 and therefore should not be allowed!"),
("Anti Alien 👽 Act", "Aliens threaten the earth 🌏 and this should be forbidden."),
("Support Organic Farming 👩🌾", "Organic Farming is a very good way to increase food quality 🍆🥕🌶 and decrease environmental damage. The earth 🌏 needs this!"),
("Strengthen Offensive Cyber War Capabilities 👩💻", "All cool states need offensive cyber war capabilities to show how cool they are! Burn it down! 🔥🔥🔥"),
("Ban Wizards & Vampires from Public Places 🧙🧛♀️", "Groups of violent wizards and vampires are hanging out in the streets threatening \
defenceless grandmas. Stop them!"),
("Implement Basic Income 🤑", "A basic income enables social participation and a happy life for everyone. Stop working until you break! Take a break, start living!"),
("Add Unicorns to the IUCN Red List 🦄", "Have you saw any unicorns in the recent time? No! Save unicorns by adding them to the Red List.")]
# create some users
for user in users:
c.execute("INSERT OR IGNORE INTO users VALUES (:userName, :salt, :hash);", {"userName": user, "salt": secrets.token_hex(32), "hash": secrets.token_hex(64)})
db.commit()
# create some votings
for id, poll in enumerate(polls, 1):
c.execute("INSERT OR IGNORE INTO polls VALUES (:id, :title, :description, :creator, '');", {"id": id, "title": poll[0], "description": poll[1], "creator": secrets.choice(users)})
db.commit()
# create some votes
for user in users:
for poll in range(1, len(polls) + 1):
c.execute("INSERT OR IGNORE INTO votes VALUES (:id, :userName, :votedYes);", {"id": poll, "userName": user, "votedYes": secrets.choice([True, False])})
db.commit()
db.close()
def validUserName(userName):
# a valid user name must be a string and at least 4 and at most 32 characters long
if type(userName) is str:
return 3 < len(userName) < 33
else:
return False
def validPassword(password):
# a valid password must be a string and at least 4 and at most 64 characters long
if type(password) is str:
return 3 < len(password) < 65
else:
return False
def validVoteID(voteID):
# a valid voteID may contain only numeric characters
# and must be at least 1 character long
# and must be greater as zero
if re.match(r"^[0-9]+$", voteID) == None:
return False
return int(voteID) > 0
def validVoteType(voteType):
return voteType == "Yes" or voteType == "No"
def validPollTitle(title):
# a valid poll title must be a string and at least 4 and at most 48 characters long
if type(title) is str:
return 3 < len(title) < 49
else:
return False
def validPollDescription(description):
# a valid poll description must be a string and at least 4 and at most 512 characters long
if type(description) is str:
return 3 < len(description) < 513
else:
return False
def validPollPrivateNotes(notes):
# a valid poll private note must be a string and must be at most 128 characters long
if type(notes) is str:
return len(notes) < 129
else:
return False
@app.route("/index.html")
def pageIndex():
session = getSession(request)
db = sqlite3.connect("data.sqlite3")
c = db.cursor()
c.execute("SELECT polls.pollID, title, sum(votedYes), count(votedYes) FROM polls \
LEFT JOIN votes ON polls.pollID == votes.pollID \
GROUP BY polls.pollID \
ORDER BY polls.pollID DESC \
LIMIT 50;") # sum(votesYes) is None, if count(votedYes) is 0
polls = c.fetchall() # [(pollID_66, pollTitle_66, votesYes, votesTotal), (pollID_65, pollTitle_65, votesYes, votesTotal), ...]
if session != None:
c.execute("SELECT pollID, votedYes FROM votes WHERE userName = :userName;", {"userName": session[2]})
userVotedYes = dict(c.fetchall()) # {pollID_1: 1, pollID_4: 0, ...}
else:
userVotedYes = {}
db.close()
response = make_response(render_template("index.html", session = session, polls = polls, votedYes = userVotedYes))
if session:
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
@app.route("/login.html", methods=['GET', 'POST'])
def pageLogin():
# redirect if user is already logged in
session = getSession(request)
if not session == None:
response = redirect("index.html")
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
if request.method == "POST":
try:
userProvided = request.form["user"]
passwordProvided = request.form["password"]
except KeyError:
abort(400)
if not validUserName(userProvided) or not validPassword(passwordProvided):
return render_template("login.html", msg = "Wrong username / password", current = "login")
result = login(userProvided, passwordProvided)
if result == None:
return render_template("login.html", msg = "Wrong username / password", user = userProvided, current = "login")
# redirect on successful login
response = redirect("index.html")
response.set_cookie(key = "session", value = result[0],
max_age = result[1]);
return response
else:
return render_template("login.html", current = "login")
@app.route("/logout.html", methods=['POST'])
def pageLogout():
session = getSession(request)
# redirect if user is not logged in
if session == None:
return redirect("index.html")
result = removeSession(session[0])
# redirect on successful logout
response = redirect("index.html")
response.set_cookie(key = "session", value = result[0],
max_age = result[1]);
return response
@app.route("/register.html", methods=['GET', 'POST'])
def pageRegister():
# redirect if user is already logged in
session = getSession(request)
if not session == None:
response = redirect("index.html")
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
if request.method == "POST":
try:
userProvided = request.form["user"]
passwordProvided = request.form["password"]
except KeyError:
abort(400)
if not validUserName(userProvided) or not validPassword(passwordProvided):
return render_template("register.html", msg = "Illegal input", current = "reg")
if not createUser(userProvided, passwordProvided):
return render_template("register.html", msg = "Username already exists", user = userProvided, current = "reg")
# login once user is created
result = login(userProvided, passwordProvided)
response = redirect("index.html")
response.set_cookie(key = "session", value = result[0],
max_age = result[1]);
return response
else:
return render_template("register.html", current = "reg")
@app.route("/vote.html", methods=['GET', 'POST'])
def pageVote():
session = getSession(request)
if request.method == "POST":
# redirect if user is not logged in
if session == None:
return redirect("login.html")
try:
voteIDProvided = request.args["v"]
voteTypeProvided = request.form["vote"]
except KeyError:
abort(400)
if not validVoteID(voteIDProvided) or not validVoteType(voteTypeProvided):
response = make_response(render_template("vote.html", msg = "Illegal input", session = session))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
success = vote(session[2], voteIDProvided, voteTypeProvided == "Yes")
if success == False:
response = make_response(render_template("vote.html", msg = "Vote failed. Already participated, vote ended or not found.", session = session))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
response = redirect("vote.html?v={}".format(voteIDProvided))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
else:
try:
voteIDProvided = request.args["v"]
except KeyError:
response = redirect("index.html")
if session:
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
if not validVoteID(voteIDProvided):
response = make_response(render_template("vote.html", msg = "Vote not found.", session = session), 404)
if session:
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
pollInfo = getPoll(voteIDProvided)
if pollInfo is None:
response = make_response(render_template("vote.html", msg = "Vote not found.", session = session), 404)
if session:
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
(votesYes, votesNo) = getVotes(voteIDProvided)
if session != None:
userVotedYes = votedYes(voteIDProvided, session[2])
else:
userVotedYes = None
response = make_response(render_template("vote.html", session = session, pollID = pollInfo[0],
pollTitle = pollInfo[1], pollDescription = pollInfo[2],
pollCreator = pollInfo[3], pollCreatorsNotes = pollInfo[4],
votesYes = votesYes, votesNo = votesNo, votedYes = userVotedYes))
if session:
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
@app.route("/create.html", methods=['GET', 'POST'])
def pageCreate():
session = getSession(request)
# redirect if user is not logged in
if session == None:
return redirect("login.html")
if request.method == "POST":
try:
titleProvided = request.form["title"]
descriptionProvided = request.form["description"]
notesProvided = request.form["notes"]
except KeyError:
abort(400)
if not validPollTitle(titleProvided) or not validPollDescription(descriptionProvided) or not validPollPrivateNotes(notesProvided):
response = make_response(render_template("create.html", session = session, current = "create",
title = titleProvided, description = descriptionProvided, notes = notesProvided, msg = "Illegal input."))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
result = createPoll(session[2], titleProvided, descriptionProvided, notesProvided)
if result == None:
response = make_response(render_template("create.html", session = session, current = "create",
title = titleProvided, description = descriptionProvided, notes = notesProvided, msg = "Creation failed."))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
response = redirect("vote.html?v={}".format(result))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
else:
response = make_response(render_template("create.html", session = session, current = "create"))
response.set_cookie(key = "session", value = session[0], max_age = session[1])
return response
initDB()
| StarcoderdataPython |
8146933 | <reponame>DASTUDIO/MyVHost
# coding=utf-8
import hashlib
def verify(signature,timestamp,nonce,echostr):
token = ""
list = [token,timestamp,nonce]
list.sort()
sha1 = hashlib.sha1()
# map(sha1.update,list)
res = ""
for item in list:
res=res+item
sha1.update(res.encode('utf-8'))
hashcode = sha1.hexdigest()
print(hashcode)
if(hashcode == signature):
return echostr
else:
return "tk: "+token+"ts"+timestamp+"nonce"+nonce + "-1 "+hashcode
if __name__ == "__main__":
print(verify('123','456','789','000',))
| StarcoderdataPython |
6410000 | from flask import render_template, flash, redirect, url_for
from app import app
from app.forms import LoginForm, RecordForm, PostForm, MultiPostForm
from app.models import Post, User
from app.worker_local import UserData
from app.worker_s3 import DataFile
@app.route('/')
@app.route('/index', methods=['GET', 'POST'])
def index():
user = {'username': 'guest'}
form = PostForm()
if form.validate_on_submit():
data = UserData()
data.savedata(form.post.data)
flash('You data has been properly recorded locally')
return redirect(url_for('index'))
permissions = [
{
"username": "guest",
"body": "read and send data",
"id": 1
},
{
"username": "admin",
"body": "read, collect by api, send and modify data",
"id": 2
}
]
return render_template('index.html', title='Home', user=user, permissions=permissions, form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash('Login requested for user {}, remember_me={}'.format(
form.username.data, form.remember_me.data))
return redirect(url_for('index'))
return render_template('login.html', title='Sign In', form=form)
@app.route('/show_data', methods=['GET', 'POST'])
def show_data():
form = PostForm()
user = "guest"
if form.validate_on_submit():
data = UserData()
storage_respond = data.data_record(data=form.post.data, user=user)
flash(storage_respond)
return redirect(url_for('show_data'))
return render_template('show_data.html', title='Leave the data', form=form)
@app.route('/multidata', methods=['GET', 'POST'])
def multidata():
form = MultiPostForm()
if form.validate_on_submit():
data = DataFile()
data.data_record(
title=form.title.data,
categoty=form.category.data,
user_case=form.userCase.data,
text=form.text.data
)
return redirect(url_for('multidata'))
return render_template('multidata.html', title='Explore', form=form)
@app.route('/more')
def more():
return render_template('more.html')
| StarcoderdataPython |
328066 | # encoding: utf-8
from sqlalchemy import *
from migrate import *
import uuid
def make_uuid():
return unicode(uuid.uuid4())
def upgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
# you need to load these two for foreign keys to work
package_table = Table('package', metadata, autoload=True)
user_table = Table('user', metadata, autoload=True)
rating_table = Table('rating', metadata,
Column('id', UnicodeText, primary_key=True, default=make_uuid),
Column('user_id', UnicodeText, ForeignKey('user.id')),
Column('user_ip_address', UnicodeText), # alternative to user_id if not logged in
Column('package_id', Integer, ForeignKey('package.id')),
Column('rating', Float)
)
rating_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
| StarcoderdataPython |
5010822 | # flake8: noqa
from .conversion import localize_pydatetime, normalize_date
from .nattype import NaT, NaTType, iNaT, is_null_datetimelike
from .np_datetime import OutOfBoundsDatetime
from .period import IncompatibleFrequency, Period
from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta
from .timestamps import Timestamp
from .tzconversion import tz_convert_single
# import fails if we do this before np_datetime
from .c_timestamp import NullFrequencyError # isort:skip
| StarcoderdataPython |
5102642 | #!/usr/bin/env python
"""Implement client side components.
Client components are managed, versioned modules which can be loaded at runtime.
"""
import importlib
import logging
import os
import StringIO
import sys
import zipfile
from grr.client import actions
from grr.lib import config_lib
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
LOADED_COMPONENTS = {}
class Site(object):
"""A copy of the relevant functions of the site Python package.
PyInstaller removes site.py and replaces it with its own version for
some reason so if we want to use site.addsitedir(), we need to
provide it ourselves. This code is basically based on
https://github.com/python-git/python/blob/715a6e5035bb21ac49382772076ec4c630d6e960/Lib/site.py
"""
def MakePath(self, *paths):
dir_ = os.path.abspath(os.path.join(*paths))
return dir_, os.path.normcase(dir_)
def InitPathinfo(self):
"""Return a set containing all existing directory entries from sys.path."""
d = set()
for dir_ in sys.path:
try:
if os.path.isdir(dir_):
dir_, dircase = self.MakePath(dir_)
d.add(dircase)
except TypeError:
continue
return d
def AddSiteDir(self, sitedir):
"""Add 'sitedir' argument to sys.path if missing."""
known_paths = self.InitPathinfo()
sitedir, sitedircase = self.MakePath(sitedir)
if sitedircase not in known_paths and os.path.exists(sitedir):
sys.path.append(sitedir)
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
self.AddPackage(sitedir, name, known_paths)
def AddPackage(self, sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory."""
if known_paths is None:
self.InitPathinfo()
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for line in f:
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
exec line # pylint: disable=exec-used
continue
line = line.rstrip()
dir_, dircase = self.MakePath(sitedir, line)
if dircase not in known_paths and os.path.exists(dir_):
sys.path.append(dir_)
known_paths.add(dircase)
class LoadComponent(actions.ActionPlugin):
"""Launches an external client action through a component."""
in_rdfvalue = rdf_client.LoadComponent
out_rdfvalues = [rdf_client.LoadComponent]
def LoadComponent(self, summary):
"""Import all the required modules as specified in the request."""
if (summary.name in LOADED_COMPONENTS and
summary.version != LOADED_COMPONENTS[summary.name]):
logging.error("Component %s is already loaded at version %s. Exiting!",
summary.name, LOADED_COMPONENTS[summary.name])
os._exit(0) # pylint: disable=protected-access
for mod_name in summary.modules:
logging.debug("Will import %s", mod_name)
importlib.import_module(mod_name)
def Run(self, request):
"""Load the component requested.
The component defines a set of python imports which should be imported into
the running program. The purpose of this client action is to ensure that the
imports are available and of the correct version. We ensure this by:
1) Attempt to import the relevant modules.
2) If that fails checks for the presence of a component installed at the
require path. Attempt to import the modules again.
3) If no component is installed, we fetch and install the component from the
server. We then attempt to use it.
If all imports succeed we return a success status, otherwise we raise an
exception.
Args:
request: The LoadComponent request.
Raises:
RuntimeError: If the component is invalid.
"""
summary = request.summary
# Just try to load the required modules.
try:
self.LoadComponent(summary)
# If we succeed we just report this component is done.
self.SendReply(request)
return
except ImportError:
pass
# Try to add an existing component path.
component_path = utils.JoinPath(
config_lib.CONFIG.Get("Client.component_path"), summary.name,
summary.version)
# Add the component path to the site packages:
site = Site()
site.AddSiteDir(component_path)
LOADED_COMPONENTS[summary.name] = summary.version
try:
self.LoadComponent(summary)
logging.info("Component %s already present.", summary.name)
self.SendReply(request)
return
except ImportError:
pass
# Could not import component - will have to fetch it.
logging.info("Unable to import component %s.", summary.name)
# Derive the name of the component that we need depending on the current
# architecture. The client build system should have burned its environment
# into the client config file. This is the best choice because it will
# choose the same component that was built together with the client
# itself (on the same build environment).
build_environment = config_lib.CONFIG.Get("Client.build_environment")
if not build_environment:
# Failing this we try to get something similar to the running system.
build_environment = rdf_client.Uname.FromCurrentSystem().signature()
url = "%s/%s" % (summary.url, build_environment)
logging.info("Fetching component from %s", url)
http_result = self.grr_worker.http_manager.OpenServerEndpoint(url)
if http_result.code != 200:
raise RuntimeError("Error %d while downloading component %s." %
(http_result.code, url))
crypted_data = http_result.data
# Decrypt and check signature. The cipher is created when the component is
# uploaded and contains the key to decrypt it.
signed_blob = rdf_crypto.SignedBlob(summary.cipher.Decrypt(crypted_data))
# Ensure the blob is signed with the correct key.
signed_blob.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
component = rdf_client.ClientComponent(signed_blob.data)
# Make sure its the component we actually want.
if (component.summary.name != summary.name or
component.summary.version != summary.version):
raise RuntimeError("Downloaded component is not the correct version")
# Make intermediate directories.
try:
os.makedirs(component_path)
except (OSError, IOError):
pass
# Unzip the component into the path.
logging.info("Installing component to %s", component_path)
component_zip = zipfile.ZipFile(StringIO.StringIO(component.raw_data))
component_zip.extractall(component_path)
# Add the component to the site packages:
site.AddSiteDir(component_path)
LOADED_COMPONENTS[component.summary.name] = component.summary.version
# If this does not work now, we just fail.
self.LoadComponent(summary)
# If we succeed we just report this component is done.
self.SendReply(request)
| StarcoderdataPython |
9751884 | <reponame>mattjw/sparkql
from pyspark.sql.types import StructType, StructField, StringType, ArrayType
from sparkql import merge_schemas
schema_a = StructType([
StructField("message", StringType()),
StructField("author", ArrayType(
StructType([
StructField("name", StringType())
])
))
])
schema_b = StructType([
StructField("author", ArrayType(
StructType([
StructField("address", StringType())
])
))
])
merged_schema = merge_schemas(schema_a, schema_b)
pretty_merged_schema = """
StructType(List(
StructField(message,StringType,true),
StructField(author,
ArrayType(StructType(List(
StructField(name,StringType,true),
StructField(address,StringType,true))),true),
true)))
"""
| StarcoderdataPython |
3372071 | from __future__ import absolute_import, division, print_function
import numpy as np
import h5py
import pandas as pd
import sys
sys.path.append('../')
import get_unique_craters as guc
class TestLongLatEstimation(object):
def setup(self):
ctrs = pd.HDFStore('./sample_crater_csv.hdf5', 'r')
ctrs_meta = h5py.File('./sample_crater_csv_metadata.hdf5', 'r')
self.craters = ctrs['craters']
self.dim = (256, 256)
self.llbd = ctrs_meta['longlat_bounds'][...]
self.dc = ctrs_meta['pix_distortion_coefficient'][...]
ctrs.close()
ctrs_meta.close()
def test_estimate_longlatdiamkm(self):
coords = self.craters[['x', 'y', 'Radius (pix)']].as_matrix()
craters_unique = guc.estimate_longlatdiamkm(
self.dim, self.llbd, self.dc, coords)
# Check that estimate is same as predictions in sample_crater_csv.hdf5.
assert np.all(np.isclose(craters_unique[:, 0],
self.craters['Predicted Long'],
atol=0., rtol=1e-10))
assert np.all(np.isclose(craters_unique[:, 1],
self.craters['Predicted Lat'],
atol=0., rtol=1e-10))
assert np.all(np.isclose(craters_unique[:, 2],
self.craters['Predicted Radius (km)'],
atol=0., rtol=1e-10))
# Check that estimate is within expected tolerance from ground truth
# values in sample_crater_csv.hdf5.
assert np.all(abs(craters_unique[:, 0] - self.craters['Long']) /
(self.llbd[1] - self.llbd[0]) < 0.01)
assert np.all(abs(craters_unique[:, 1] - self.craters['Lat']) /
(self.llbd[3] - self.llbd[2]) < 0.02)
# Radius is exact, since we use the inverse estimation from km to pix
# to get the ground truth crater pixel radii/diameters in
# input_data_gen.py.
assert np.all(np.isclose(craters_unique[:, 2],
self.craters['Radius (km)'],
atol=0., rtol=1e-10))
| StarcoderdataPython |
9675990 | import collections
class Solution:
"""
@param N:
@return: return true or false
"""
def reorderedPowerOf2(self, N):
# write your code here
curr = collections.Counter(str(N))
return any(curr == collections.Counter(str(1 << i)) for i in range(31))
| StarcoderdataPython |
6519258 |
# Ensure that no users have access keys that have never been used.
# Description: Checks that all users have only active access keys.
#
# Trigger Type: Change Triggered
# Scope of Changes: IAM:User
import json
import logging
import boto3
APPLICABLE_RESOURCES = ["AWS::IAM::User"]
def evaluate_compliance(configuration_item):
compliant = "COMPLIANT"
annotations = []
if configuration_item["resourceType"] not in APPLICABLE_RESOURCES:
compliant = "NOT_APPLICABLE"
annotations.append(
"Cannot use this rule for resource of type {}.".format(
configuration_item["resourceType"]))
return compliant, " ".join(annotations)
user_name = configuration_item["configuration"]["userName"]
iam = boto3.client("iam")
access_keys = iam.list_access_keys(UserName=user_name)["AccessKeyMetadata"]
if access_keys:
for access_key in access_keys:
access_key_id = access_key["AccessKeyId"]
access_key_status = access_key["Status"]
last_used_date = iam.get_access_key_last_used(
AccessKeyId=access_key_id
).get("AccessKeyLastUsed").get("LastUsedDate")
if access_key_status == "Active" and last_used_date is None:
compliant = "NON_COMPLIANT"
annotations.append(
"Access key with ID {} was never used.".format(
access_key_id))
else:
annotations.append(
"Access key with ID {} key was last used {}.".format(
access_key_id, last_used_date))
else:
annotations.append("User do not have any active access key.")
return compliant, " ".join(annotations)
def lambda_handler(event, context):
logging.debug("Input event: %s", event)
invoking_event = json.loads(event["invokingEvent"])
configuration_item = invoking_event["configurationItem"]
result_token = "No token found."
if "resultToken" in event:
result_token = event["resultToken"]
try:
compliant, annotation = evaluate_compliance(configuration_item)
config = boto3.client("config")
config.put_evaluations(
Evaluations=[
{
"ComplianceResourceType":
configuration_item["resourceType"],
"ComplianceResourceId":
configuration_item["resourceId"],
"ComplianceType": compliant,
"Annotation": annotation,
"OrderingTimestamp":
configuration_item["configurationItemCaptureTime"]
},
],
ResultToken=result_token,
)
except Exception as exception:
logging.error("Error computing compliance status: %s", exception)
| StarcoderdataPython |
9655900 | # ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [<NAME>]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import unittest
import numpy as np
import numpy.testing as npt
import itertools
from pydl.nn.layers import FC
from pydl import conf
class TestLayers(unittest.TestCase):
def test_score_fn(self):
def test(inp, w, true_out, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
out_fc = fc.score_fn(inp)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
# -------------------
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
true_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2, 3, 10]
for batch, feat, neur, scl in list(itertools.product(batch_size, feature_size, num_neurons,
scale)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
true_out = np.matmul(X, w)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
def test_forward(self):
def test(inp, w, true_out, bias=False, actv_fn='Sigmoid', bchnorm=False, p=None, mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=bchnorm, dropout=p)
out_fc = fc.forward(inp, mask=mask)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
score_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
true_out = 1.0 / (1.0 + np.exp(-score_out))
test(X, w, true_out)
true_out = 1.0 / (1.0 + np.exp(-(score_out + bias)))
test(X, w, true_out, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2]
batchnorm = [True, False]
dropout = [True, False]
for batch, feat, scl, neur, bn, dout in \
list(itertools.product(batch_size, feature_size, scale, num_neurons, batchnorm,
dropout)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
score = np.matmul(X, w) + bias
if bn:
score = (score - np.mean(score, axis=0)) / np.sqrt(np.var(score, axis=0) + 1e-32)
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*score.shape) < p, dtype=conf.dtype)
else:
p = None
mask = None
true_out_sig = 1.0 / (1.0 + np.exp(-np.matmul(X, w)))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias=False, actv_fn='Sigmoid', bchnorm=False, p=p, mask=mask)
true_out_sig = 1.0 / (1.0 + np.exp(-score))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias, actv_fn='Sigmoid', bchnorm=bn, p=p, mask=mask)
true_out_tanh = (2.0 / (1.0 + np.exp(-2.0 * score))) - 1.0
if dout:
true_out_tanh *= mask
test(X, w, true_out_tanh, bias, actv_fn='Tanh', bchnorm=bn, p=p, mask=mask)
unnorm_prob = np.exp(score)
true_out_softmax = unnorm_prob / np.sum(unnorm_prob, axis=-1, keepdims=True)
if dout:
true_out_softmax *= mask
test(X, w, true_out_softmax, bias, actv_fn='Softmax', bchnorm=bn, p=p, mask=mask)
true_out_relu = np.maximum(0, score)
if dout:
mask /= p
true_out_relu *= mask
test(X, w, true_out_relu, bias, actv_fn='ReLU', bchnorm=bn, p=p, mask=mask)
true_out_linear = score
if dout:
true_out_linear *= mask
test(X, w, true_out_linear, bias, actv_fn='Linear', bchnorm=bn, p=p, mask=mask)
def test_gradients_manually(self):
def test(inp, w, inp_grad, true_weights_grad, true_inputs_grad, bias=False,
true_bias_grad=None):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
npt.assert_almost_equal(weights_grad, true_weights_grad, decimal=5)
npt.assert_almost_equal(bias_grad, true_bias_grad, decimal=5)
npt.assert_almost_equal(inputs_grad, true_inputs_grad, decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
true_weights_grad = np.sum(X, axis=0, keepdims=True).T * np.ones(w.shape, dtype=conf.dtype)
true_inputs_grad = np.sum(w, axis=-1, keepdims=True).T * np.ones(X.shape, dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[3, 3, 3, 3],
[2, 2, 2, 2]], dtype=conf.dtype)
true_weights_grad = np.array([[11, 11, 11, 11],
[16, 16, 16, 16],
[21, 21, 21, 21]], dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
true_inputs_grad = np.array([[30, 78, 126],
[20, 52, 84]], dtype=conf.dtype)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
def test_gradients_finite_difference(self):
self.delta = 1e-5
def test(inp, w, inp_grad, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i] = self.delta
weights_finite_diff[i] = np.sum(((fc.score_fn(inp, w + w_delta) -
fc.score_fn(inp, w - w_delta)) /
(2 * self.delta)) * inp_grad, axis=0)
# Bias finite difference gradients
fc.bias = bias + self.delta
lhs = fc.score_fn(inp)
fc.bias = bias - self.delta
rhs = fc.score_fn(inp)
bias_finite_diff = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad, axis=0)
fc.bias = bias
# Inputs finite difference gradients
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inputs_grad.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[:, i] = self.delta
inputs_finite_diff[:, i] = np.sum(((fc.score_fn(inp + i_delta, w) -
fc.score_fn(inp - i_delta, w)) /
(2 * self.delta)) * inp_grad, axis=-1,
keepdims=False)
# Threshold Gradient Diff Check
npt.assert_almost_equal(weights_grad, weights_finite_diff, decimal=5)
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=5)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=5)
# # Relative gradient error check
# max_abs_w_grads = np.maximum(np.abs(weights_grad), np.abs(weights_finite_diff))
# max_abs_w_grads[max_abs_w_grads==0] = 1
# w_grads_accuracy = np.abs(weights_grad - weights_finite_diff) / max_abs_w_grads
# npt.assert_almost_equal(np.zeros_like(w_grads_accuracy), w_grads_accuracy, decimal=5)
#
# max_abs_b_grads = np.maximum(np.abs(bias_grad), np.abs(bias_finite_diff))
# max_abs_b_grads[max_abs_b_grads==0] = 1
# b_grads_accuracy = np.abs(bias_grad - bias_finite_diff) / max_abs_b_grads
# npt.assert_almost_equal(np.zeros_like(b_grads_accuracy), b_grads_accuracy, decimal=5)
#
# max_abs_inp_grads = np.maximum(np.abs(inputs_grad), np.abs(inputs_finite_diff))
# max_abs_inp_grads[max_abs_inp_grads==0] = 1
# inp_grads_accuracy = np.abs(inputs_grad - inputs_finite_diff) / max_abs_inp_grads
# npt.assert_almost_equal(np.zeros_like(inp_grads_accuracy), inp_grads_accuracy,
# decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[1, 2, 3, 4],
[-5, -6, -7, -8]], dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-4, 1e-3, 1e-1, 1e-0, 2, 3, 10]
unit_inp_grad = [True, False]
for batch, feat, neur, scl, unit in list(itertools.product(batch_size, feature_size,
num_neurons, scale,
unit_inp_grad)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.random.rand(neur) * scl
inp_grad = np.ones((batch, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-10, 10, (batch, neur))
test(X, w, inp_grad, bias)
def test_backward_gradients_finite_difference(self):
self.delta = 1e-8
def test(inp, w, inp_grad, bias=False, actv_fn='Sigmoid', batchnorm=False, p=None,
mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=batchnorm,
dropout=p)
_ = fc.forward(inp, mask=mask)
inputs_grad = fc.backward(inp_grad)
weights_grad = fc.weights_grad
bias_grad = fc.bias_grad
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
for j in range(weights_grad.shape[1]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i, j] = self.delta
fc.weights = w + w_delta
lhs = fc.forward(inp, mask=mask)
fc.weights = w - w_delta
rhs = fc.forward(inp, mask=mask)
weights_finite_diff[i, j] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
weights_finite_diff[i, j] = weights_grad[i, j]
fc.weights = w
# Bias finite difference gradients
bias_finite_diff = np.empty(bias_grad.shape)
for i in range(bias_grad.shape[0]):
bias_delta = np.zeros(bias.shape, dtype=conf.dtype)
bias_delta[i] = self.delta
fc.bias = bias + bias_delta
lhs = fc.forward(inp, mask=mask)
fc.bias = bias - bias_delta
rhs = fc.forward(inp, mask=mask)
bias_finite_diff[i] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
bias_finite_diff[i] = bias_grad[i]
fc.bias = bias
# Inputs finite difference gradients
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inputs_grad.shape[0]):
for j in range(inputs_grad.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[i, j] = self.delta
lhs = fc.forward(inp + i_delta, mask=mask)
rhs = fc.forward(inp - i_delta, mask=mask)
inputs_finite_diff[i, j] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad,
keepdims=False)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
inputs_finite_diff[i, j] = inputs_grad[i, j]
npt.assert_almost_equal(weights_grad, weights_finite_diff, decimal=2)
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=2)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=2)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax']
batchnorm = [True, False]
dropout = [True, False]
for actv, bn, dout in list(itertools.product(activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*inp_grad.shape) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[5, 6, 7, 8],
[1, 2, 3, 4]], dtype=conf.dtype)
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax']
batchnorm = [True, False]
dropout = [True, False]
for actv, bn, dout in list(itertools.product(activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*inp_grad.shape) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 8, 11]
feature_size = [1, 2, 3, 11]
num_neurons = [1, 2, 3, 11]
scale = [1e-3, 1e-0, 2]
unit_inp_grad = [True, False]
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax', 'ReLU']
batchnorm = [True, False]
dropout = [True, False]
for batch, feat, neur, scl, unit, actv, bn, dout in \
list(itertools.product(batch_size, feature_size, num_neurons, scale, unit_inp_grad,
activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
# bias = np.random.randn(neur) * scl
bias = np.zeros(neur)
inp_grad = np.ones((batch, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-1, 1, (batch, neur))
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(batch, neur) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6596865 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 16 15:04:40 2020
@author: user
"""
from playwithml import predictor as p
P = p('datasets/iris.csv')
print(P.do_all(c=True))
| StarcoderdataPython |
8159583 | """
Date: 2022.02.03 9:11
Description: Omit
LastEditors: <NAME>
LastEditTime: 2022.02.03 9:11
"""
import os
from enum import IntEnum
from .common import writer
def pypirc(read_only=True):
official = "https://packaging.python.org/en/latest/specifications/pypirc/"
conf = os.path.join(os.path.expanduser("~"), ".pypirc")
content = """\
# https://pypi.org/manage/account/#API%20tokens
[distutils]
index-servers =
pypi
testpypi
private-repository
[pypi]
username = __token__
password = <PyPI token>
[testpypi]
username = __token__
password = <TestPyPI token>
[private-repository]
repository = <private-repository URL>
username = <private-repository username>
password = <private-repository password>
"""
writer(conf, content=content, read_only=read_only, official=official)
class Method(IntEnum):
pypirc = 1
@classmethod
def func(cls, method):
return {
cls.pypirc: pypirc,
}.get(method, pypirc)
def python(method=Method.pypirc, read_only=True):
Method.func(method)(read_only)
| StarcoderdataPython |
3430490 | import logging
import socketio
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from src import routers
from src.utils.global_instances import sio
from src import socketio_events
logger = logging.getLogger("uvicorn.error")
app = FastAPI(debug=True)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
sio_asgi_app = socketio.ASGIApp(sio, app, socketio_path="/socket.io/")
app.mount("/socket.io/", sio_asgi_app)
app.include_router(routers.router)
socketio_events.register_routes()
| StarcoderdataPython |
9760383 | <reponame>Junkbite/smoked-salmon
import re
from collections import defaultdict
from html import unescape
from salmon.common import RE_FEAT, parse_copyright, re_split
from salmon.sources import DeezerBase
from salmon.tagger.sources.base import MetadataMixin
RECORD_TYPES = {
"album": "Album",
"ep": "EP",
"single": "Single",
}
class Scraper(DeezerBase, MetadataMixin):
def parse_release_title(self, soup):
return RE_FEAT.sub("", soup["title"])
def parse_cover_url(self, soup):
return soup["cover_xl"]
def parse_release_year(self, soup):
try:
return int(re.search(r"(\d{4})", soup["release_date"])[1])
except TypeError as e:
return None
# raise ScrapeError('Could not parse release year.') from e
def parse_release_date(self, soup):
return soup["release_date"]
def parse_release_label(self, soup):
return parse_copyright(soup["label"])
def parse_genres(self, soup):
return {g["name"] for g in soup["genres"]["data"]}
def parse_release_type(self, soup):
try:
return RECORD_TYPES[soup["record_type"]]
except KeyError:
return None
def parse_upc(self, soup):
return soup["upc"]
def parse_tracks(self, soup):
tracks = defaultdict(dict)
for track in soup["tracklist"]:
tracks[str(track["DISK_NUMBER"])][
str(track["TRACK_NUMBER"])
] = self.generate_track(
trackno=track["TRACK_NUMBER"],
discno=track["DISK_NUMBER"],
artists=self.parse_artists(
track["SNG_CONTRIBUTORS"], track["ARTISTS"], track["SNG_TITLE"]
),
title=self.parse_title(track["SNG_TITLE"], track.get("VERSION", None)),
isrc=track["ISRC"],
explicit=track["EXPLICIT_LYRICS"],
stream_id=track["SNG_ID"],
md5_origin=track.get("MD5_ORIGIN"),
media_version=track.get("MEDIA_VERSION"),
lossless=True,
mp3_320=True,
)
return dict(tracks)
def process_label(self, data):
if isinstance(data["label"], str):
if any(
data["label"].lower() == a.lower() and i == "main"
for a, i in data["artists"]
):
return "Self-Released"
return data["label"]
def parse_artists(self, artists, default_artists, title):
"""
Iterate over all artists and roles, returning a compliant list of
artist tuples.
"""
result = []
feat = RE_FEAT.search(title)
if feat:
for artist in re_split(feat[1]):
result.append((unescape(artist), "guest"))
if artists:
for a in artists.get("mainartist") or artists.get("main_artist", []):
for b in re_split(a):
if (b, "main") not in result:
result.append((b, "main"))
for a in artists.get("featuredartist", []):
for b in re_split(a):
if (b, "guest") not in result:
result.append((b, "guest"))
else:
for artist in default_artists:
for b in re_split(artist["ART_NAME"]):
if (b, "main") not in result:
result.append((b, "main"))
return result
| StarcoderdataPython |
1785685 | from django import forms
from .models import CreditApplication
class CreditApplicationForm(forms.ModelForm):
class Meta:
model = CreditApplication
fields = '__all__'
| StarcoderdataPython |
9630535 | <filename>config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# BASIC APP CONFIG
WTF_CSRF_ENABLED = os.getenv('CSRF_ENABLED', 'yes') == 'yes'
SECRET_KEY = os.getenv('SECRET_KEY', 'secret')
BIND_ADDRESS = os.getenv('BIND_ADDRESS', '0.0.0.0')
PORT = os.getenv('PORT', 9393)
LOGIN_TITLE = os.getenv('LOGIN_TITLE', 'PDNS')
# TIMEOUT - for large zones
TIMEOUT = os.getenv('TIMEOUT', 10)
# LOG CONFIG
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
LOG_FILE = ''
# Upload
UPLOAD_DIR = os.path.join(basedir, 'upload')
# DATABASE CONFIG
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = os.getenv('DATABASE_TRACK_MODIFICATIONS', 'yes') == 'yes'
# LDAP CONFIG
if os.getenv('LDAP_TYPE') != None:
LDAP_TYPE = os.getenv('LDAP_TYPE')
LDAP_URI = os.getenv('LDAP_URI', 'ldaps://your-ldap-server:636')
LDAP_USERNAME = os.getenv('LDAP_USERNAME', 'cn=dnsuser,ou=users,ou=services,dc=duykhanh,dc=me')
LDAP_PASSWORD = os.getenv('LDAP_PASSWORD', '<PASSWORD>')
LDAP_SEARCH_BASE = os.getenv('LDAP_SEARCH_BASE', 'ou=System Admins,ou=People,dc=duykhanh,dc=me')
# Additional options only if LDAP_TYPE=ldap
LDAP_USERNAMEFIELD = os.getenv('LDAP_USERNAMEFIELD', 'uid')
LDAP_FILTER = os.getenv('LDAP_FILTER', '(objectClass=inetorgperson)')
# Github Oauth
GITHUB_OAUTH_ENABLE = os.getenv('GITHUB_OAUTH_ENABLE', 'no') == 'yes'
GITHUB_OAUTH_KEY = os.getenv('GITHUB_OAUTH_KEY')
GITHUB_OAUTH_SECRET = os.getenv('GITHUB_OAUTH_SECRET')
GITHUB_OAUTH_SCOPE = os.getenv('GITHUB_OAUTH_SCOPE', 'email')
GITHUB_OAUTH_URL = os.getenv('GITHUB_OAUTH_URL', 'https://github.com/api/v3/')
GITHUB_OAUTH_TOKEN = os.getenv('GITHUB_OAUTH_TOKEN', 'https://github.com/oauth/token')
GITHUB_OAUTH_AUTHORIZE = os.getenv('GITHUB_OAUTH_AUTHORIZE', 'https://github.com/oauth/authorize')
#Default Auth
BASIC_ENABLED = os.getenv('BASIC_ENABLED', 'yes') == 'yes'
SIGNUP_ENABLED = os.getenv('SIGNUP_ENABLED', 'yes') == 'yes'
# POWERDNS CONFIG
PDNS_STATS_URL = os.getenv('PDNS_STATS_URL')
PDNS_API_KEY = os.getenv('PDNS_API_KEY', '')
PDNS_VERSION = os.getenv('PDNS_VERSION', '4.0.1')
# RECORDS ALLOWED TO EDIT
RECORDS_ALLOW_EDIT = os.getenv('RECORDS_ALLOW_EDIT', 'A,AAAA,CNAME,PTR,MX,TXT,NS').split(',')
# EXPERIMENTAL FEATURES
PRETTY_IPV6_PTR = False
| StarcoderdataPython |
4844197 | <filename>Calibration.py
class Calibration:
def __init__(self, radio_calib=None, intr_calib=None, geo_calib=None):
# This class simply combines the calibration objects from the different calibration procedures
self.radio_calib = radio_calib
self.intr_calib = intr_calib
self.geo_calib = geo_calib
| StarcoderdataPython |
3273114 | import asyncio
import functools
def wrap_sync_writer(writer):
class AsyncWriter:
def __init__(self, writer):
self.write = wrap_sync_func(writer.write)
return AsyncWriter(writer)
def wrap_sync_reader(reader):
class AsyncReader:
def __init__(self, reader):
self.read = wrap_sync_func(reader.read)
return AsyncReader(reader)
def wrap_sync_func(func):
if asyncio.iscoroutinefunction(func):
return func
@functools.wraps(func)
async def coro(*args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
functools.partial(func, *args, **kwargs)
)
return coro
def run_async(coro):
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
result = loop.run_until_complete(coro)
loop.close()
return result
| StarcoderdataPython |
9717538 | __author__ = "<NAME> <http://intertwingly.net/> and <NAME> <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 <NAME> and <NAME>"
from .base import validatorBase
from .validators import *
#
# Atom link element
#
class link(nonblank,xmlbase,iso639,nonhtml,nonNegativeInteger,rfc3339):
validRelations = [
# http://www.iana.org/assignments/link-relations.html
'alternate', # RFC4287
'current', # RFC5005
'describedby', # http://www.w3.org/TR/powder-dr/#assoc-linking
'edit', # RFC-ietf-atompub-protocol-17.txt
'edit-media', # RFC-ietf-atompub-protocol-17.txt
'enclosure', # RFC4287
'first', # RFC5005
'hub', # http://pubsubhubbub.googlecode.com/
'last', # RFC5005
'license', # RFC4946
'next', # RFC5005
'next-archive', # RFC5005
'payment', # Kinberg
'prev-archive', # RFC5005
'previous', # RFC5005
'related', # RFC4287
'replies', # RFC4685
'search', # http://www.opensearch.org/Specifications/OpenSearch/1.1
'self', # RFC4287
'service', # Snell
'up', # Slater
'via' # RFC4287
]
rfc5005 = [
'current', # RFC5005
'first', # RFC5005
'last', # RFC5005
'next', # RFC5005
'next-archive', # RFC5005
'prev-archive', # RFC5005
'previous', # RFC5005
]
def getExpectedAttrNames(self):
return [(None, 'type'), (None, 'title'), (None, 'rel'),
(None, 'href'), (None, 'length'), (None, 'hreflang'),
('http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'type'),
('http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'resource'),
('http://purl.org/syndication/thread/1.0', 'count'),
('http://purl.org/syndication/thread/1.0', 'when'),
('http://purl.org/syndication/thread/1.0', 'updated')]
def validate(self):
self.type = ""
self.rel = "alternate"
self.href = ""
self.hreflang = ""
self.title = ""
if (None, "rel") in self.attrs:
self.value = self.rel = self.attrs.getValue((None, "rel"))
if self.rel.startswith('http://www.iana.org/assignments/relation/'):
self.rel=self.rel[len('http://www.iana.org/assignments/relation/'):]
if self.rel in self.validRelations:
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
elif rfc2396_full.rfc2396_re.match(self.rel.encode('idna').decode('utf-8')):
self.log(ValidAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
else:
self.log(UnregisteredAtomLinkRel({"parent":self.parent.name, "element":self.name, "attr":"rel", "value":self.rel}))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "rel"})
if self.rel in self.rfc5005 and self.parent.name == 'entry':
self.log(FeedHistoryRelInEntry({"rel":self.rel}))
if (None, "type") in self.attrs:
self.value = self.type = self.attrs.getValue((None, "type"))
if not mime_re.match(self.type):
self.log(InvalidMIMEType({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
elif self.rel == "self" and self.type not in ["application/atom+xml", "application/rss+xml", "application/rdf+xml"]:
self.log(SelfNotAtom({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
else:
self.log(ValidMIMEAttribute({"parent":self.parent.name, "element":self.name, "attr":"type", "value":self.type}))
if (None, "title") in self.attrs:
self.log(ValidTitle({"parent":self.parent.name, "element":self.name, "attr":"title"}))
self.value = self.title = self.attrs.getValue((None, "title"))
nonblank.validate(self, errorClass=AttrNotBlank, extraParams={"attr": "title"})
nonhtml.validate(self)
if (None, "length") in self.attrs:
self.name = 'length'
self.value = self.attrs.getValue((None, "length"))
nonNegativeInteger.validate(self)
nonblank.validate(self)
if (None, "hreflang") in self.attrs:
self.name = 'hreflang'
self.value = self.hreflang = self.attrs.getValue((None, "hreflang"))
iso639.validate(self)
if (None, "href") in self.attrs:
self.name = 'href'
self.value = self.href = self.attrs.getValue((None, "href"))
xmlbase.validate(self, extraParams={"attr": "href"})
if self.rel == "self" and self.parent.name in ["feed","channel"]:
# detect relative self values
from urllib.parse import urlparse
from xml.dom import XML_NAMESPACE
absolute = urlparse(self.href)[1]
element = self
while not absolute and element and hasattr(element,'attrs'):
pattrs = element.attrs
if pattrs and (XML_NAMESPACE, 'base') in pattrs:
absolute=urlparse(pattrs.getValue((XML_NAMESPACE, 'base')))[1]
element = element.parent
if not absolute:
self.log(RelativeSelf({"value":self.href}))
from urllib.parse import urljoin
if urljoin(self.xmlBase,self.value) not in self.dispatcher.selfURIs:
if urljoin(self.xmlBase,self.value).split('#')[0] != self.xmlBase.split('#')[0]:
from .uri import Uri
if self.value.startswith('http://feeds.feedburner.com/'):
if self.value.endswith('?format=xml'):
self.value = self.value.split('?')[0]
value = Uri(self.value)
for docbase in self.dispatcher.selfURIs:
if value == Uri(docbase): break
# don't complain when validating feedburner's xml view
if docbase.startswith('http://feeds.feedburner.com/'):
if docbase.endswith('?format=xml'):
if value == Uri(docbase.split('?')[0]): break
else:
self.log(SelfDoesntMatchLocation({"parent":self.parent.name, "element":self.name}))
self.dispatcher.selfURIs.append(urljoin(self.xmlBase,self.value))
else:
self.log(MissingHref({"parent":self.parent.name, "element":self.name, "attr":"href"}))
if ('http://purl.org/syndication/thread/1.0', 'count') in self.attrs:
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:count"}))
self.value = self.attrs.getValue(('http://purl.org/syndication/thread/1.0', 'count'))
self.name="thr:count"
nonNegativeInteger.validate(self)
if ('http://purl.org/syndication/thread/1.0', 'when') in self.attrs:
self.log(NoThrWhen({"parent":self.parent.name, "element":self.name, "attribute":"thr:when"}))
if ('http://purl.org/syndication/thread/1.0', 'updated') in self.attrs:
if self.rel != "replies":
self.log(UnexpectedAttribute({"parent":self.parent.name, "element":self.name, "attribute":"thr:updated"}))
self.value = self.attrs.getValue(('http://purl.org/syndication/thread/1.0', 'updated'))
self.name="thr:updated"
rfc3339.validate(self)
def startElementNS(self, name, qname, attrs):
self.push(eater(), name, attrs)
def characters(self, text):
if text.strip():
self.log(AtomLinkNotEmpty({"parent":self.parent.name, "element":self.name}))
| StarcoderdataPython |
318504 | n = int(input())
left_dp = [1] * (n + 1)
right_dp = [1] * (n + 1)
arr = list(map(int, input().split()))
for i in range(1, n):
for j in range(i):
if arr[j] < arr[i]:
left_dp[i] = max(left_dp[i], left_dp[j] + 1)
for i in range(n - 2, -1, -1):
for j in range(n - 1, i, -1):
if arr[j] < arr[i]:
right_dp[i] = max(right_dp[i], right_dp[j] + 1)
result = 0
for i in range(n):
result = max(result, left_dp[i] + right_dp[i] - 1)
print(result)
| StarcoderdataPython |
1676345 | import argparse
import random
import numpy as np
import torch
from nner import *
from transformers import *
# take args
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--source_language", default='en', type=str,
help="The target language")
parser.add_argument("--target_language", default='en', type=str,
help="The target language")
parser.add_argument("--bert_model", default='', type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir", default='save', type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--ckpt", default=None, type=str,
help="Checkpoint for previously saved mdoel")
parser.add_argument("--exp_name", default=None, type=str,
help="Checkpoint and config save prefix")
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--num_exp", default=None, type=int,
help="Number of additional examples from source language")
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--max_epoch", default=5, type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--gpuid", default='0', type=str)
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--num_duplicate", default=20, type=int)
parser.add_argument("--warmup_proportion", default=0.4, type=float)
parser.add_argument("--gradient_accumulation_steps", default=1, type=int,
help="Number of updates steps to accumulate before performing a backward/update pass.")
args = parser.parse_args()
if __name__ == '__main__':
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
save_ckpt = args.exp_name + '.ckpt'
save_config = args.exp_name + '.cfg'
# parse source domains
print('F1 ================== EXP =====================')
source_language = args.source_language
target_language = args.target_language
print('F1 Target language: %s' % target_language)
print('batchsize: %d' % args.batchsize)
print('learning rate: %.7f' % args.learning_rate)
print('max epochs: %d' % args.max_epoch)
print('max_seq_length: %d' % args.max_seq_length)
print('num_depulicate: %d' % args.num_duplicate)
print('warmup proportion: %.5f' % args.warmup_proportion)
print('model ckpt will be saved at: %s' % save_ckpt)
print('model config will be saved at: %s' % save_config)
processor = ACEProcessor()
label_list = processor.get_labels()
num_labels = len(label_list)
device = torch.device('cuda:' + args.gpuid)
# build model
if args.bert_model == 'bert-base-multilingual-cased':
model = BertForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels = num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-roberta-base':
model = XLMRobertaForNER.from_pretrained('/data/lan/BiBERT/data/xlm-robert-base-pre-training/tlm/checkpoints/',
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-mlm-xnli15-1024':
model = XLMForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-mlm-tlm-xnli15-1024':
model = XLMForNER.from_pretrained(args.bert_model,
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
elif args.bert_model == 'xlm-roberta-large':
model = XLMRobertaForNER.from_pretrained('/data/lan/BiBERT/saved_model/'+ args.bert_model + '/giga/',
cache_dir=args.output_dir,
num_labels=num_labels,
output_hidden_states=True) # if you want to get all layer hidden states
else:
config = BertConfig.from_json_file(args.bert_model+'/bert_config.json') # config file
config.num_labels = num_labels
config.output_hidden_states = True
#print('num_labels: ', num_labels)
#sys.exit()
model = BertForNER(config=config)
model.load_state_dict(torch.load(args.bert_model+'/pytorch_model.bin', map_location=device), strict=False) # pytorch ckpt file
model.set_label_map(label_list)
model.to(device)
model.set_device('cuda:' + args.gpuid)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# preprocess the data to json file and use loader to convert it to training format
training_data_path = source_language + '/train.txt'
if 'source' in args.exp_name:
dev_data_path = source_language + '/dev.txt'
else:
dev_data_path = target_language + '/dev.txt'
test_data_path = target_language + '/test.txt'
train_examples = processor.get_examples(training_data_path)
num_train_optimization_steps = int(
len(train_examples) / args.batchsize / args.gradient_accumulation_steps) * args.max_epoch
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * num_train_optimization_steps), num_training_steps=num_train_optimization_steps)
#scheduler = WarmupLinearSchedule(optimizer, warmup_steps=int(args.warmup_proportion * num_train_optimization_steps), t_total=num_train_optimization_steps)
if args.bert_model == 'bert-base-multilingual-cased':
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
elif args.bert_model == 'xlm-roberta-base':
tokenizer = XLMRobertaTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
elif args.bert_model == 'xlm-roberta-large':
tokenizer = XLMRobertaTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
elif args.bert_model == 'xlm-mlm-xnli15-1024':
tokenizer = XLMTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.unk_token = '<unk>'
tokenizer.sep_token = '</s>'
tokenizer.cls_token = '</s>'
tokenizer.mask_token = '<special1>'
tokenizer.pad_token = '<pad>'
elif args.bert_model == 'xlm-mlm-tlm-xnli15-1024':
tokenizer = XLMTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
tokenizer.bos_token = '<s>'
tokenizer.eos_token = '</s>'
tokenizer.unk_token = '<unk>'
tokenizer.sep_token = '</s>'
tokenizer.cls_token = '</s>'
tokenizer.mask_token = '<special1>'
tokenizer.pad_token = '<pad>'
else:
#if args.bert_model=='bibert-64k' or args.bert_model == 'csbert' or args.bert_model == 'bibert':
# lower_case_flag=True
#else:
lower_case_flag=True
print('lower_case_flag: ', lower_case_flag)
tokenizer = BertTokenizer.from_pretrained(args.bert_model+'/vocab.txt', do_lower_case=lower_case_flag) # bert vocab file
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
# make data loader for train/dev/test
print('Loading training data...\n')
train_dataloader, _ = create_dataloader(training_data_path, set_type='train', batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
print('Loading development data...\n')
dev_dataloader, dev_size = create_dataloader(dev_data_path, set_type='dev',
batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
print('Loading testing data...\n')
test_dataloader, test_size = create_dataloader(test_data_path, set_type='test',
batchsize=args.batchsize,
max_seq_length=args.max_seq_length, tokenizer=tokenizer,
num_duplicate=args.num_duplicate)
# train
print('Training started...')
model = train(model, train_dataloader=train_dataloader, dev_dataloader=dev_dataloader,
dev_size=dev_size, optimizer=optimizer, scheduler=scheduler, max_epochs=args.max_epoch,
save_ckpt=save_ckpt, save_config=save_config, dev_ref=dev_data_path.replace('txt', 'json'))
# Load best checkpoint
print('Loading best check point...')
output_model_file = 'best_' + save_ckpt
model.load_state_dict(torch.load(output_model_file, map_location=device))
# test
print('Evaluating on dev set...\n')
f1, avg_loss = evaluate(model, dev_dataloader, dev_size, ref=dev_data_path.replace('txt', 'json'))
print('DEV F1: %.5f, avg loss: %.5f' % (f1, avg_loss))
print('Evaluating on test set...\n')
f1, avg_loss = evaluate(model, test_dataloader, test_size, ref=test_data_path.replace('txt', 'json'))
print('Test F1: %.5f, avg loss: %.5f' % (f1, avg_loss))
| StarcoderdataPython |
162415 | '''
Created on Feb 3, 2017
@author: Akash
link_preview is now a fashionable way of sharing links
in social media. The contents of what the preview is
made up are:-
1. og.title:-
Title of the preview.
In HTML: <meta property="og.title" content="XYZ">
Value: XYZ
2. og.description:-
Description of the preview.
In HTML: <meta property="og.description" content="XYZ">
Value: XYZ
3. og.image:-
Image of the preview.
In HTML: <meta property="og.image" content="XYZ">
Value: XYZ
4. title:-
if 'og:title' is not found, this becomes the Title.
In HTML: <title>XYZ</title>
Value: XYZ
5. meta description:-
if 'og:description' is not found, this becomes the Description.
In HTML: <meta name="description" content="XYZ">
Value: XYZ
6. favicon:-
if 'og:image' is not found, this becomes the Image.
In HTML: <link rel="shortcut icon" href="XYZ" type="image/x-icon">
Value: XYZ
7. website:-
Host website for the link.
Reference:- https://richpreview.com/ (from where I learned)
This module fetches all these data and combines those into
a dictionary.
A sample WhatsApp link_preview:
#######################################
# I # Title #
# M # Description #
# A # #
# G # website #
# E # #
#######################################
Usage:-
from link_preview import link_preview
dict_elem = link_preview.generate_dict(url) # this is a dict()
# Access values
title = dict_elem['title']
description = dict_elem['description']
image = dict_elem['image']
website = dict_elem['website']
'''
import urllib.request as req
import re
def generate_dict(url):
'''
returns dictionary containing elements of link_preview:
dict_keys :
'title' : '',
'description': '',
'image': '',
'website': ''
if Exception occurs, it raises Exception of urllib.request module.
'''
return_dict = {}
try:
html = req.urlopen(url).read().decode('utf-8')
meta_elems = re.findall('<[\s]*meta[^<>]+og:(?:title|image|description)(?!:)[^<>]+>', html)
og_map = map(return_og, meta_elems)
og_dict = dict(list(og_map))
# title
try:
return_dict['title'] = og_dict['og.title']
except KeyError:
return_dict['title'] = find_title(html)
# description
try:
return_dict['description'] = og_dict['og.description']
except KeyError:
return_dict['description'] = find_meta_desc(html)
# website
return_dict['website'] = find_host_website(url)
# Image
try:
return_dict['image'] = og_dict['og.image']
except KeyError:
image_path = find_image(html)
if 'http' not in image_path:
image_path = 'http://' + return_dict['website'] + image_path
return_dict['image'] = image_path
return return_dict
except Exception as e:
'Raises Occurred Exception'
raise e
def return_og(elem):
'''
returns content of og_elements
'''
content = re.findall('content[\s]*=[\s]*"[^<>"]+"', elem)[0]
p = re.findall('"[^<>]+"', content)[0][1:-1]
if 'og:title' in elem:
return ("og.title", p)
elif 'og:image' in elem and 'og:image:' not in elem:
return ("og.image", p)
elif 'og:description' in elem:
return ("og.description", p)
def find_title(html):
'''
returns the <title> of html
'''
try:
title_elem = re.findall('<[\s]*title[\s]*>[^<>]+<[\s]*/[\s]*title[\s]*>', html)[0]
title = re.findall('>[^<>]+<', title_elem)[0][1:-1]
except:
title = ''
return title
def find_meta_desc(html):
'''
returns the description (<meta name="description") of html
'''
try:
meta_elem = re.findall('<[\s]*meta[^<>]+name[\s]*=[\s]*"[\s]*description[\s]*"[^<>]*>', html)[0]
content = re.findall('content[\s]*=[\s]*"[^<>"]+"', meta_elem)[0]
description = re.findall('"[^<>]+"', content)[0][1:-1]
except:
description = ''
return description
def find_image(html):
'''
returns the favicon of html
'''
try:
favicon_elem = re.findall('<[\s]*link[^<>]+rel[\s]*=[\s]*"[\s]*shortcut icon[\s]*"[^<>]*>', html)[0]
href = re.findall('href[\s]*=[\s]*"[^<>"]+"', favicon_elem)[0]
image = re.findall('"[^<>]+"', href)[0][1:-1]
except:
image = ''
return image
def find_host_website(url):
'''
returns host website from the url
'''
return list(filter(lambda x: '.' in x, url.split('/')))[0]
| StarcoderdataPython |
5175401 | <reponame>fragmuffin/howto-micropython
import machine
pin = machine.Pin('SW', machine.Pin.IN, machine.Pin.PULL_UP)
pin = machine.Pin('SW', machine.Pin.IN, machine.Pin.PULL_DOWN)
pin = machine.Pin('SW', machine.Pin.IN, machine.Pin.PULL_NONE)
# These 2 initialize the pin in the same way
pin = machine.Pin('SW') # defaults for SW are...
pin = machine.Pin('SW', machine.Pin.IN, machine.Pin.PULL_UP)
| StarcoderdataPython |
11233426 | from django.conf.urls.defaults import *
from django.views.generic.list_detail import object_list
from tagging.views import tagged_object_list
from badges.models import Badge
from badges.feeds import RecentlyClaimedAwardsFeed, RecentlyClaimedAwardsJSONFeed
from badges.feeds import AwardsClaimedForProfileFeed, AwardsClaimedForProfileJSONFeed
from badges.feeds import AwardsClaimedForBadgeFeed, AwardsClaimedForBadgeJSONFeed
from voting.views import vote_on_object
urlpatterns = patterns("badges.views",
url(r'^$', 'index', name='badge_index'),
url(r'^all/$', object_list,
dict(queryset=Badge.objects.all(), template_object_name='badge',
template_name='badges/badge_list.html', paginate_by=25,
allow_empty=True),
name='badge_browse'),
url(r'^tag/(?P<tag>[^/]+)/$', tagged_object_list,
dict(queryset_or_model=Badge, paginate_by=25, allow_empty=True,
template_object_name='badge'),
name='badge_tag'),
url(r'^badge/(?P<slug>[^/]+)/(?P<direction>up|down|clear)vote/?$', vote_on_object,
dict(slug_field='slug', model=Badge, template_object_name='badge',
allow_xmlhttprequest=True),
name='badge_vote'),
url(r"^create$", "create",
name="create_badge"),
url(r"^verify/(.*)$", "awardee_verify",
name="awardee_verify"),
url(r"^badge/(.*)/nominations/$", "nomination_create",
name="badge_nomination_create"),
url(r"^badge/(.*)/nominations/(.*)$", "nomination_details",
name="badge_nomination"),
#url(r"^badge/(.*)/awards/$", "award_list",
# name="badge_award_recent"),
url(r"^badge/(.*)/awards/(.*)/$", "award_history",
name="badge_award_list"),
url(r"^badge/(.*)/awards/(.*)/showhide$", "award_show_hide_bulk",
name="badge_award_show_hide"),
url(r"^badge/(.*)/awards/(.*)/(.*)$", "award_details",
name="badge_award"),
url(r"^badge/(.*)/awards/(.*)/(.*)/showhide$", "award_show_hide_single",
name="badge_award_show_hide_single"),
url(r"^badge/(.*)/edit$", "edit",
name="badge_edit"),
url(r"^badge/(.*)$", "badge_details",
name="badge_details"),
(r'^api/', include('badges.api.urls')),
url(r'feeds/atom/recentawards/', RecentlyClaimedAwardsFeed(),
name="badge_feed_recentawards"),
url(r'feeds/atom/profiles/(.*)/awards/', AwardsClaimedForProfileFeed(),
name="badge_feed_profileawards"),
url(r'feeds/atom/badges/(.*)/awards/', AwardsClaimedForBadgeFeed(),
name="badge_feed_badgeawards"),
url(r'feeds/json/recentawards/', RecentlyClaimedAwardsJSONFeed(),
name="badge_json_recentawards"),
url(r'feeds/json/profiles/(.*)/awards/', AwardsClaimedForProfileJSONFeed(),
name="badge_json_profileawards"),
url(r'feeds/json/badges/(.*)/awards/', AwardsClaimedForBadgeJSONFeed(),
name="badge_json_badgeawards"),
)
| StarcoderdataPython |
172158 | '''
flask_miracle.functions
-----------------------
functions callable from within a Flask context
'''
from flask import current_app
def check_any(resource, permission, roles=None):
return current_app.miracle_acl_manager.check_any(resource, permission, roles=None)
def check_all(resource, permission, roles=None):
return current_app.miracle_acl_manager.check_all(resource, permission, roles=None)
def set_current_roles(roles):
return current_app.miracle_acl_manager.set_current_roles(roles)
| StarcoderdataPython |
1918332 | <gh_stars>1000+
# SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
(MVEX) EH bit value
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import MvexEHBit
else:
MvexEHBit = int
NONE: MvexEHBit = 0 # type: ignore
"""
Not hard coded to 0 or 1 so can be used for other purposes
"""
EH0: MvexEHBit = 1 # type: ignore
"""
EH bit must be 0
"""
EH1: MvexEHBit = 2 # type: ignore
"""
EH bit must be 1
"""
| StarcoderdataPython |
4900233 | <reponame>PraveenKumar-Rajendran/CarND-Behavioral-Cloning<gh_stars>0
version https://git-lfs.github.com/spec/v1
oid sha256:c9bd3410a4d0bb585f7dc4ff96eb71361c8acfacbab10e278230b6be4a8cb983
size 3457
| StarcoderdataPython |
6648819 | <filename>psims/mzmlb/components.py<gh_stars>10-100
from ..mzml.components import BinaryDataArray, Binary, NullMap
from ..xml import _element
EXTERNAL_DATASET_PARAM = "external HDF5 dataset"
# EXTERNAL_DATASET_PARAM = "external dataset"
class ExternalBinaryDataArray(BinaryDataArray):
def __init__(self, external_dataset_name, data_processing_reference=None,
offset=None, array_length=None, params=None, context=NullMap, **kwargs):
if (params is None):
params = []
self.external_dataset_name = external_dataset_name
self.array_length = array_length
self.offset = offset
self.data_processing_reference = data_processing_reference
if data_processing_reference:
self._data_processing_reference = context[
'DataProcessing'][data_processing_reference]
else:
self._data_processing_reference = None
self.params = self.prepare_params(params, **kwargs)
self.element = _element(
'binaryDataArray',
encodedLength=0,
dataProcessingRef=self._data_processing_reference)
self.context = context
self._array_type = None
self._prepare_external_refs()
self.binary = Binary(b"", context=self.context)
def _prepare_external_refs(self):
self.add_param({
"name": EXTERNAL_DATASET_PARAM,
"value": self.external_dataset_name
}).add_param({
"name": "external array length",
"value": self.array_length,
}).add_param({
"name": "external offset",
"value": self.offset
})
return self
| StarcoderdataPython |
5172480 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# DESCRIPTION: Given an excel file and text files passed as arguments to the script,
# metadata headers are added to each individual text files
# Windows run with Anaconda Prompt example:
# python add_headers.py --directory="Fall 2018/normalized/" --master_file="Metadata_Fall_2018_updated.csv"
import argparse
import csv
import pandas
import os
import re
from pandas import DataFrame
# define the way we retrive arguments sent to the script
parser = argparse.ArgumentParser(description='Add Headers to Individual Textfile')
parser.add_argument('--directory', action="store", dest='directory', default='')
parser.add_argument('--master_file', action="store", dest='master_file', default='')
parser.add_argument('--overwrite', action='store_true')
args = parser.parse_args()
#----------------------------------------------------------------------------------------------------------------------------------------
# function 1 is defined
def add_header_to_file(filename, master, overwrite=False): # filename = folder1.../Lan/Lan_p1d3/WA_Second Draft_lan12_attempt_2017-06-29_Lan Ge_WA Second.txt
found_text_files = False
if '.txt' in filename: #check the indent
found_text_files = True
global career_account_list
global assignment
global draft
for career_account in career_account_list:
#print("career_account is:", career_account)
if re.search('_'+career_account+'_', filename):
print('>>>>> matched: ', '_'+career_account+'_', "is in", filename,'and adding headers...')
#print('>>>>> add header to',filename)
filtered_master = master[master['User_ID'] == career_account]
#print(filtered_master)
textfile = open(filename, 'r')
#print(textfile.read())
# Subject + Course number = ENGL 10600
#filtered_master['COURSE'] = filtered_master['SUBJECT']+' '+filtered_master['COURSE_NUMBER'].astype(str)
#course = filtered_master['COURSE']
#course = course.strip()
#course = re.sub(r'NaN', r'NA', course)
course = filtered_master['COURSE_NUMBER'].to_string(index=False) #changed
assignment = ''
draft = ''
# Identify assignment and draft based on folder structure
if re.search(r'([a-zA-Z]+\_)p1d1',filename):
assignment = 'LN'
draft = '1'
if re.search(r'([a-zA-Z]+\_)p1d2',filename):
assignment = 'LN'
draft = '2'
if re.search(r'([a-zA-Z]+\_)p1d3',filename):
assignment = 'LN'
draft = 'F'
if re.search(r'([a-zA-Z]+\_)p2d1',filename):
assignment = 'RP'
draft = '1'
if re.search(r'([a-zA-Z]+\_)p2d2',filename):
assignment = 'RP'
draft = '2'
if re.search(r'([a-zA-Z]+\_)p2d3',filename):
assignment = 'RP'
draft = 'F'
if re.search(r'([a-zA-Z]+\_)p3d1',filename):
assignment = 'IR'
draft = '1'
if re.search(r'([a-zA-Z]+\_)p3d2',filename):
assignment = 'IR'
draft = '2'
if re.search(r'([a-zA-Z]+\_)p3d3',filename):
assignment = 'IR'
draft = 'F'
if re.search(r'([a-zA-Z]+\_)p4d1',filename):
assignment = 'SY'
draft = '1'
if re.search(r'([a-zA-Z]+\_)p4d2',filename):
assignment = 'SY'
draft = '2'
if re.search(r'([a-zA-Z]+\_)p4d3',filename):
assignment = 'SY'
draft = 'F'
if re.search(r'([a-zA-Z]+\_)p5d1',filename):
assignment = 'AR'
draft = '1'
if re.search(r'([a-zA-Z]+\_)p5d2',filename):
assignment = 'AR'
draft = '2'
if re.search(r'([a-zA-Z]+\_)p5d3',filename):
assignment = 'AR'
draft = 'F'
country_code = filtered_master['COUNTRY_CODE'].to_string(index=False)
country_code = country_code.strip()
country_code = re.sub(r'NaN', r'NAN', country_code)
# STUDENT_CLASS_BOAP is a number to show the semester in school for students
# STUDENT_CLASS_BOAP_DESC is a string to descibe students' status (junior 45-60 hours)
#semester_in_school = filtered_master['STUDENT_CLASS_BOAP']
year_in_school = filtered_master['STUDENT_CLASS_BOAP_DESC'].to_string(index=False)
year_in_school = year_in_school.strip()
if re.search(r'Freshmen(\:.*)',year_in_school):
year_in_school_numeric = '1'
if re.search(r'Sophomore(\:.*)',year_in_school):
year_in_school_numeric = '2'
if re.search(r'Junior(\:.*)',year_in_school):
year_in_school_numeric = '3'
if re.search(r'Senior(\:.*)',year_in_school):
year_in_school_numeric = '4'
else:
year_in_school_numeric = 'NA'
gender = filtered_master['GENDER'].to_string(index=False)
gender = gender.strip()
gender = re.sub(r'NaN', r'NA', gender)
crow_id = filtered_master['Crow ID'].to_string(index=False)
crow_id = crow_id.strip()
crow_id = re.sub(r'NaN', r'NA', crow_id)
institution_code = 'PRD' # hard coding: PRD = Purdue University
#course assignment draft country yearinschool gender studentID institution '.txt'
output_filename = ''
output_filename += course
output_filename += '_'
output_filename += assignment
output_filename += '_'
output_filename += draft
output_filename += '_'
output_filename += country_code
output_filename += '_'
output_filename += year_in_school_numeric
output_filename += '_'
output_filename += gender
output_filename += '_'
output_filename += crow_id
output_filename += '_'
output_filename += institution_code
output_filename += '.txt'
output_filename = re.sub(r'\s', r'', output_filename)
output_filename = re.sub(r'__', r'_NA_', output_filename)
term = filtered_master['Semester'].to_string(index=False)
term = term.strip()
# create path for output files
cwd = os.getcwd() # get current working directory
path = os.path.join(cwd, "files_with_headers", term , "ENGL " + course, assignment, draft)
# "newpath" might be used --> path might be keyword somewhere
if not os.path.exists(path):
os.makedirs(path)
output_file = open(path + output_filename, 'w')
country = filtered_master['NATION_OF_CITIZENSHIP_DESC'].to_string(index=False)
country = country.strip()
institution = 'Purdue University'
institution = institution.strip()
semester = term.split()[0]
year = term.split()[1]
college = filtered_master['COLLEGE'].to_string(index=False)
program = filtered_master['PROGRAM_DESC'].to_string(index=False)
TOEFL_COMPI = filtered_master['TIBT - TOEFL IBT Total Score'].to_string(index=False)
TOEFL_Listening = filtered_master['TIBL - TOEFL IBT Listening Score'].to_string(index=False)
TOEFL_Reading = filtered_master['TIBR - TOEFL IBT Reading Score'].to_string(index=False)
TOEFL_Writing = filtered_master['TIBW - TOEFL IBT Writing Score'].to_string(index=False)
TOEFL_Speaking = filtered_master['TIBS - TOEFL IBT Speaking Score'].to_string(index=False)
IELTS_Overall = filtered_master['ILT2 - IELTS Overall'].to_string(index=False)
IELTS_Listening = filtered_master['ILT1 - IELTS Listening'].to_string(index=False)
IELTS_Reading = filtered_master['ILT3 - IELTS Reading'].to_string(index=False)
IELTS_Writing = filtered_master['ILT5 - IELTS Writing'].to_string(index=False)
IELTS_Speaking = filtered_master['ILT4 - IELTS Speaking'].to_string(index=False)
instructor = filtered_master['Instructor_Code'].to_string(index=False)
section = filtered_master['COURSE_REFERENCE_NUMBER'].to_string(index=False)
mode = filtered_master['Mode'].to_string(index=False)
length = filtered_master['Length'].to_string(index=False)
college = college.strip()
program = program.strip()
TOEFL_COMPI = TOEFL_COMPI.strip()
TOEFL_Listening = TOEFL_Listening.strip()
TOEFL_Reading = TOEFL_Reading.strip()
TOEFL_Writing = TOEFL_Writing.strip()
TOEFL_Speaking = TOEFL_Speaking.strip()
IELTS_Overall = IELTS_Overall.strip()
IELTS_Listening = IELTS_Listening.strip()
IELTS_Reading = IELTS_Reading.strip()
IELTS_Writing = IELTS_Writing.strip()
IELTS_Speaking = IELTS_Speaking.strip()
instructor = instructor.strip()
section = section.strip()
mode = mode.strip()
length = length.strip()
country = re.sub(r'NaN', r'NA', country)
TOEFL_COMPI = re.sub(r'NaN', r'NA', TOEFL_COMPI)
TOEFL_Listening = re.sub(r'NaN', r'NA', TOEFL_Listening)
TOEFL_Reading = re.sub(r'NaN', r'NA', TOEFL_Reading)
TOEFL_Writing = re.sub(r'NaN', r'NA', TOEFL_Writing)
TOEFL_Speaking = re.sub(r'NaN', r'NA', TOEFL_Speaking)
IELTS_Overall = re.sub(r'NaN', r'NA', IELTS_Overall)
IELTS_Listening = re.sub(r'NaN', r'NA', IELTS_Listening)
IELTS_Reading = re.sub(r'NaN', r'NA', IELTS_Reading)
IELTS_Writing = re.sub(r'NaN', r'NA', IELTS_Writing)
IELTS_Speaking = re.sub(r'NaN', r'NA', IELTS_Speaking)
# Identify the exams
proficiency_exam = ''
exam_total = ''
exam_reading = ''
exam_listening = ''
exam_speaking = ''
exam_writing = ''
if TOEFL_COMPI != 'NA':
proficiency_exam = 'TOEFL'
exam_total = TOEFL_COMPI
exam_reading = TOEFL_Reading
exam_listening = TOEFL_Listening
exam_speaking = TOEFL_Speaking
exam_writing = TOEFL_Writing
elif IELTS_Overall != 'NA':
proficiency_exam = 'IELTS'
exam_total = IELTS_Overall
exam_reading = IELTS_Reading
exam_listening = IELTS_Listening
exam_speaking = IELTS_Speaking
exam_writing = IELTS_Writing
elif TOEFL_COMPI != 'NA' and IELTS_Overall != 'NA':
proficiency_exam = 'TOEFL;IELTS'
exam_total = TOEFL_COMPI + ';' + IELTS_Overall
exam_reading = TOEFL_Reading + ';' + IELTS_Reading
exam_listening = TOEFL_Listening + ';' + IELTS_Listening
exam_speaking = TOEFL_Speaking + ';' + IELTS_Speaking
exam_writing = TOEFL_Writing + ';' + IELTS_Writing
else:
proficiency_exam = 'NA'
exam_total = 'NA'
exam_reading = 'NA'
exam_listening = 'NA'
exam_speaking = 'NA'
exam_writing = 'NA'
# write headers
# output_file.write("<Student ID: " + crow_id + ">") #same thing as print plus argument "file = output_file"
print("<Student ID: " + crow_id + ">", file = output_file)
print("<Country: " + country + ">", file = output_file)
print("<Institution: " + institution + ">", file = output_file)
print("<Course: ENGL " + course + ">", file = output_file)
print("<Mode: " + mode + ">", file = output_file)
print("<Length: " + length + ">", file = output_file)
print("<Assignment: " + assignment + ">", file = output_file)
print("<Draft: " + draft + ">", file = output_file)
print("<Year in School: " + year_in_school_numeric + ">", file = output_file)
print("<Gender: " + gender + ">", file = output_file)
print("<Course Year: " + year + ">", file = output_file)
print("<Course Semester: " + semester + ">" , file = output_file)
print("<College: " + college + ">", file = output_file)
print("<Program: " + program + ">", file = output_file)
print("<Proficiency Exam: " + proficiency_exam +">", file = output_file)
print("<Exam total: " + exam_total + ">", file = output_file)
print("<Exam reading: " + exam_reading + ">", file = output_file)
print("<Exam listening: " + exam_listening + ">", file = output_file)
print("<Exam speaking: " + exam_speaking + ">", file = output_file)
print("<Exam writing: " + exam_writing + ">", file = output_file)
print("<Instructor: " + instructor + ">", file = output_file)
print("<Section: " + section + ">", file = output_file)
print("<End Header>", file = output_file)
print("", file = output_file)
for line in textfile:
this_line = re.sub(r'\r?\n', r'\r\n', line)
if this_line != '\r\n':
new_line = re.sub(r'\s+', r' ', this_line)
new_line = new_line.strip()
print(new_line, file = output_file)
output_file.close()
textfile.close()
# check the ident of this line
return(found_text_files)
#---------------------------------------------------------------------------------------------------------------------------------------
# function 2 is defined (master here is the master_data in the main program that has been excel_read())
def add_headers_recursive(directory, master, overwrite=False):
found_text_files = False
#dirpath = whole path without file's names (C:\folder1\folder2\folder3\)
#files = file's name (p1d1.txt)
#filename = os.path.join(dirpath,name) = whole path with file's name (C:\folder1\folder2\folder3\p1d1.txt)
for dirpath, dirnames, files in os.walk(directory):
for name in files:
#print(name) #this print file's name: WA_Second Draft_bai69_attempt_2017-06-19-01-07-23_Lu Bai_WA second.txt
#print(os.path.join(dirpath, name)) #print file path and file's name: test\WA_Second Draft_bai69_attempt_2017-06-19-01-07-23_Lu Bai_WA second.txt
# function 1 is called (with filename = os.path.join(dirpath,name), master, overwrite)
is_this_a_text_file = add_header_to_file(os.path.join(dirpath, name), master, overwrite)
if is_this_a_text_file:
found_text_files = True
if not found_text_files:
print('No text files found in the directory.')
#---------------------------------------------------------------------------------------------------------------------------------------
# the main program starts here:
if args.master_file and args.directory:
if '.xlsx' in args.master_file:
master_file = args.master_file
master_data = pandas.read_excel(master_file)
master_data_frame = pandas.DataFrame(master_data)
#prepare a list with all career account name that will be used to map with the career account name in the files' names in the functions
career_account_list = master_data_frame['User_ID'].tolist()
#print(career_account_list)
elif '.csv' in args.master_file:
master_data = pandas.read_csv(args.master_file)
master_data = pandas.read_excel(master_file)
master_data_frame = pandas.DataFrame(master_file)
#prepare a list with all career account name that will be used to map with the career account name in the files' names in the functions
career_account_list = master_data_frame['User_ID'].tolist()
#print(career_account_list)
# function 2 is called with three parameters: (1) directory (2) master_data (3)overwrite
add_headers_recursive(args.directory, master_data, args.overwrite)
else:
print('>>>>> Error report: provide a valid master_file and directory with student files.')
| StarcoderdataPython |
3481422 | <reponame>1995chen/jingdong_financial
# -*- coding: utf-8 -*-
from typing import Optional
import inject
import template_logging
from template_pagination import Pagination
from sqlalchemy.orm import Query
from sqlalchemy import desc
from template_transaction import CommitContext
from app.models import GoldPrice
from app.dependencies import MainDBSession
logger = template_logging.getLogger(__name__)
pagination: Pagination = inject.instance(Pagination)
"""
Service 中不应该出现Schema
理想情况下所有涉及参数校验均应该在dataclass中的__post_init__方法内完成
"""
def get_current_price() -> Optional[GoldPrice]:
"""
获得当前金价
"""
session = inject.instance(MainDBSession)
with CommitContext(session):
gold_info: Optional[GoldPrice] = session.query(GoldPrice).order_by(desc(GoldPrice.time)).first()
return gold_info
@pagination.with_paginate()
def get_latest_price() -> Query:
"""
获得最近一段时间的黄金价格
"""
session = inject.instance(MainDBSession)
with CommitContext(session):
query: Query = session.query(GoldPrice)
return query
| StarcoderdataPython |
4843431 | <reponame>leonardcser/waldo-video-preprocessor
from utils.command_utils import check_docker_installed, run_cmd
from variables import IMAGE_NAME
from utils.logger import logger
def main() -> None:
"""Main function to remove the docker image"""
check_docker_installed()
user_input = input(
(
"[INPUT] Are you sure you want to delete the container? "
"You can always rebuild it. (Y/n): "
)
)
if user_input.lower() == "y":
run_cmd(f"docker rmi $(docker images '{IMAGE_NAME}' -a -q) --force")
logger.success(f"Sucessfully removed '{IMAGE_NAME}' image!")
else:
logger.info("Cancelled.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
6654519 | def main():
fh = open("file.txt")
for line in fh:
print(line)
fh.close()
with open("file2.txt") as fh2:
for line in fh2:
print(line)
with open("file3.txt", "rb") as fh3:
for l in fh3:
print(l)
if __name__ == '__main__':
main()
| StarcoderdataPython |
385065 | <reponame>azagajewski/ColiCoords
from colicoords.data_models import BinaryImage, BrightFieldImage, FluorescenceImage, STORMTable, Data
from colicoords.fileIO import load_thunderstorm, load
from colicoords.cell import Cell, CellList
from test.testcase import ArrayTestCase
from test.test_functions import load_testdata
from scipy.ndimage.interpolation import rotate as scipy_rotate
import os
import numpy as np
import unittest
class TestDataElements(ArrayTestCase):
def test_binaryimage(self):
testdata = np.round(np.random.rand(512, 512)).astype(int)
binary_img = BinaryImage(testdata, name='test1234', metadata={'no_entries': 123})
self.assertArrayEqual(testdata, binary_img)
sl_binary = binary_img[20:100, 100:200]
self.assertTrue(sl_binary.dclass == 'binary')
self.assertTrue(sl_binary.name == 'test1234')
def test_brightfieldimage(self):
testdata = np.round(np.random.rand(512, 512)) * 2**16-1
bf_img = BrightFieldImage(testdata, name='test1234', metadata={'no_entries': 123})
sl_bf = bf_img[20:100, 100:200]
self.assertTrue(sl_bf.dclass == 'brightfield')
self.assertTrue(sl_bf.name == 'test1234')
def test_fluorescence_img(self):
testdata = np.round(np.random.rand(512, 512)) * 2**16-1
fl_img = FluorescenceImage(testdata, name='test1234', metadata={'no_entries': 123})
sl_fl = fl_img[20:100, 100:200]
self.assertTrue(sl_fl.dclass == 'fluorescence')
self.assertTrue(sl_fl.name == 'test1234')
def test_fluorescence_mov(self):
testdata = np.round(np.random.rand(512, 512, 10)) * 2**16-1
fl_img = FluorescenceImage(testdata, name='test1234', metadata={'no_entries': 123})
sl_fl = fl_img[:5, 20:100, 100:200]
self.assertTrue(sl_fl.dclass == 'fluorescence')
self.assertTrue(sl_fl.name == 'test1234')
def test_data_class_storm(self):
f_path = os.path.dirname(os.path.realpath(__file__))
storm_data = load_thunderstorm(os.path.join(f_path, 'test_data/ds3/storm_table.csv'))
storm_table = STORMTable(storm_data, name='test1234', metadata={'no_entries:': 123})
storm_sl = storm_table[5: 20]
self.assertTrue(storm_table.dclass == 'storm')
self.assertTrue(storm_table.name == 'test1234')
self.assertTrue(storm_sl.shape == (15,))
class TestMakeData(ArrayTestCase):
def test_add_data(self):
testdata_int = np.round(np.random.rand(512, 512)).astype(int)
testdata_float = np.round(np.random.rand(512, 512)) * 2**16-1
testdata_mov = np.round(np.random.rand(10, 512, 512)) * 2**16-1
data = Data()
with self.assertRaises(TypeError): # Invalid dtype
data.add_data(testdata_float, dclass='binary')
data.add_data(testdata_int, dclass='binary')
self.assertArrayEqual(testdata_int, data.data_dict['binary'])
self.assertArrayEqual(testdata_int, data.binary_img)
with self.assertRaises(ValueError): # Invalid shape
data.add_data(testdata_float.reshape(256, -1), 'fluorescence')
with self.assertRaises(ValueError): # Binary has to be unique
data.add_data(testdata_int, dclass='binary', name='newbinaryname')
data.add_data(testdata_float, dclass='brightfield')
with self.assertRaises(ValueError): # Same dclass data elements which will have the same name
data.add_data(testdata_float, dclass='brightfield')
self.assertEqual(testdata_float.shape, data.shape)
data.add_data(testdata_mov, 'fluorescence', name='fluorescence_movie')
class TestData(ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
f_path = os.path.dirname(os.path.realpath(__file__))
self.storm_cells_1 = load(os.path.join(f_path, 'test_data/test_single_spot_storm.hdf5'))
self.storm_cells_2 = load(os.path.join(f_path, 'test_data/test_double_spot_storm.hdf5'))
cells_no_flu = []
for c in self.storm_cells_2:
d = Data()
d.add_data(c.data.binary_img, 'binary')
d.add_data(c.data.data_dict['storm_1'], 'storm', 'storm_1')
d.add_data(c.data.data_dict['storm_2'], 'storm', 'storm_2')
cell = Cell(d)
cells_no_flu.append(cell)
self.storm_cells_2_no_flu = CellList(cells_no_flu)
def test_copying(self):
data_copy = self.data.copy()
for k, v in self.data.data_dict.items():
self.assertArrayEqual(v, data_copy.data_dict[k])
i = self.data.data_dict['fluorescence'][5, 10, 10]
self.data.data_dict['fluorescence'][5, 10, 10] += 20
self.assertEqual(self.data.data_dict['fluorescence'][5, 10, 10], i + 20)
self.assertEqual(i, data_copy.data_dict['fluorescence'][5, 10, 10])
def test_rotation(self):
data_rotated = self.data[:2].rotate(60)
rotated = scipy_rotate(self.data.binary_img[:2], -60, mode='nearest', axes=(-1, -2))
self.assertArrayEqual(rotated, data_rotated.binary_img)
self.assertEqual(len(data_rotated), 2)
def test_rotation_storm(self):
for cell in self.storm_cells_1:
for th in np.arange(90, 370, 90):
data_r = cell.data.copy().rotate(th)
flu = data_r.data_dict['fluorescence']
storm = data_r.data_dict['storm']
x, y = storm['x'], storm['y']
nc = Cell(data_r, init_coords=False)
nc.coords.shape = data_r.shape
x_fl = np.sum(nc.coords.x_coords * flu) / np.sum(flu)
y_fl = np.sum(nc.coords.y_coords * flu) / np.sum(flu)
self.assertAlmostEqual(x[0], np.array(x_fl), 2)
self.assertAlmostEqual(y[0], np.array(y_fl), 2)
# https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249
# for cell in self.storm_cells_2_no_flu:
# storm = cell.data.data_dict['storm_1']
# x1, y1 = storm['x'][0], storm['y'][0]
#
# storm = cell.data.data_dict['storm_2']
# x2, y2 = storm['x'][0], storm['y'][0]
#
# d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# angle = np.arctan2(y1-y2, x1-x2)
#
# data = cell.data.copy()
# for th in range(0, 740, 20):
# data_r = data.rotate(th)
#
# storm = data_r.data_dict['storm_1']
# x1, y1 = storm['x'][0], storm['y'][0]
#
# storm = data_r.data_dict['storm_2']
# x2, y2 = storm['x'][0], storm['y'][0]
#
# d1 = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# self.assertAlmostEqual(d, d1, 5)
#
# angle1 = np.arctan2(y1-y2, x1-x2)
# rounded = np.round((angle - angle1)*(180/np.pi) + th, 10)
# self.assertAlmostEqual(rounded % 360, 0)
def test_iteration(self):
for i, d in enumerate(self.data):
with self.subTest(i=i):
self.assertArrayEqual(self.data.binary_img[i], d.binary_img)
self.assertEqual(len(self.data), 10)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3410666 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test rights schema."""
import pytest
from marshmallow import ValidationError
from invenio_rdm_records.services.schemas.metadata import MetadataSchema, \
ReferenceSchema
def test_valid_reference():
"""Test references schema."""
valid_full = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}
assert valid_full == ReferenceSchema().load(valid_full)
def test_valid_minimal_reference():
valid_minimal = {
"reference": "Reference to something et al."
}
assert valid_minimal == ReferenceSchema().load(valid_minimal)
def test_invalid_no_reference():
invalid_no_reference = {
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}
with pytest.raises(ValidationError):
data = ReferenceSchema().load(invalid_no_reference)
def test_invalid_scheme_reference():
invalid_scheme = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "Invalid"
}
loaded = data = ReferenceSchema().load(invalid_scheme)
# Check the backend forced the change to the correct scheme
assert loaded["scheme"] == "isni"
def test_invalid_extra_right():
invalid_extra = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "Invalid",
"extra": "field"
}
with pytest.raises(ValidationError):
data = ReferenceSchema().load(invalid_extra)
@pytest.mark.parametrize("references", [
([]),
([{
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}, {
"reference": "Reference to something et al."
}])
])
def test_valid_rights(references, minimal_record, vocabulary_clear):
metadata = minimal_record['metadata']
# NOTE: this is done to get possible load transformations out of the way
metadata = MetadataSchema().load(metadata)
metadata['references'] = references
assert metadata == MetadataSchema().load(metadata)
| StarcoderdataPython |
264434 | # **************************** Desafio 094 ********************************* #
# Unindo dicionários e listas #
# Crie um programa que leia nome, sexo e idade de várias pessoas, guardando #
# os dados de cada pessoa em um dicionário e todos os dicionários em uma #
# lista. No final, mostre: #
# A) Quantas pessoas foram cadastradas #
# B) A média de idade #
# C) Uma lista com as mulheres #
# D) Uma lista de pessoas com idade acima da média #
# ************************************************************************** #
linha = '+=' * 24
linha1 = '\033[1;34m*=\033[m' * 30
título = ' \033[1;3;4;7;34mUnindo dicionários e listas\033[m '
print(f'\n{título:*^64}\n')
print(linha)
# ************************************************************************** #
cad = dict()
lista = list()
while True:
# Cadastrando as informações:
cad['nome'] = str(input('Nome: ')).capitalize().strip()
while True:
cad['sexo'] = str(input('Sexo (M/F): ')).upper().strip()[0]
if cad['sexo'] not in "MF":
print('Entrada INVÁLIDA.', end=' ')
else:
break
cad['idade'] = int(input('Idade: '))
lista.append(cad.copy())
while True:
resp = str(input('Deseja continuar (S/N)? ')).upper().strip()[0]
if resp not in "SN":
print('Entrada INVÁLIDA.', end=' ')
else:
break
if resp == 'N':
break
# Fim do cadastro.
print(f'\n{linha1}')
# Calculando o total de pessoas cadastradas:
print(f"A) Ao todo foram cadastradas {len(lista)} pessoas.")
# Calculando A média de idade:
tot = 0
for i, v in enumerate(lista):
tot += v['idade']
média = tot / len(lista)
print(f'B) A média de idades cadastradas foi de {média:.2f} anos.')
# Exibindo uma lista com as mulheres cadastradas:
cont = 0
print('C) As mulheres cadastradas foram: ', end='')
for i, v in enumerate(lista):
if v['sexo'] in 'F':
print(f"{v['nome']}", end=' ')
cont += 1
if cont == 0:
print('Não houve cadastro de mulheres!')
print()
# Exibindo uma lista de pessoas com idade acima da média:
print('D) Lista de pessoas com idade acima da média:')
for i, d in enumerate(lista):
if d['idade'] > média:
print(f" nome = {d['nome']}; sexo = {d['sexo']}; idade = {d['idade']}")
print(linha1)
print(f'{"<< ENCERRADO >>":^60}')
| StarcoderdataPython |
9740208 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import filecmp
class PreprocessCSVTest(unittest.TestCase):
def test_generate_tmcf(self):
output_columns = ['Date', 'GeoId',
'COVID19CumulativeTestResults', 'COVID19NewTestResults',
'COVID19CumulativePositiveTestResults', 'COVID19NewPositiveTestResults',
'COVID19CumulativeNegativeTestResults', 'COVID19NewNegativeTestResults']
TEMPLATE_MCF_GEO = """
Node: E:COVIDTracking_States->E0
typeOf: schema:State
dcid: C:COVIDTracking_States->GeoId
"""
TEMPLATE_MCF_TEMPLATE = """
Node: E:COVIDTracking_States->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
observationAbout: E:COVIDTracking_States->E0
observationDate: C:COVIDTracking_States->Date
value: C:COVIDTracking_States->{stat_var}
"""
stat_vars = output_columns[2:]
with open('test_tmcf.tmcf', 'w', newline='') as f_out:
f_out.write(TEMPLATE_MCF_GEO)
for i in range(len(stat_vars)):
f_out.write(TEMPLATE_MCF_TEMPLATE.format_map({'index': i + 1, 'stat_var': output_columns[2:][i]}))
same = filecmp.cmp('test_tmcf.tmcf', 'test_expected_tmcf.tmcf')
os.remove('test_tmcf.tmcf')
self.assertTrue(same)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1856540 | __author__ = 'wangfeng'
import time
import os
import shutil
from functools import wraps
from oslo.config import cfg
from libcloud.compute.types import StorageVolumeState,NodeState
from libcloud.compute.base import NodeSize, NodeImage,NodeAuthSSHKey
from libcloud.storage.types import ObjectDoesNotExistError
import sshclient
import random
from nova import utils
from nova import exception as exception
from nova.i18n import _, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import imageutils
from nova.openstack.common import fileutils as fileutils
from nova.openstack.common import log as logging
from nova.compute import task_states
from nova.volume.cinder import API as cinder_api
from nova.image.api import API as glance_api
from nova.compute import power_state
from nova.virt import driver
from nova.network import neutronv2
from nova.context import RequestContext
from nova.compute import utils as compute_utils
from nova.image import glance
from wormholeclient.client import Client
from wormholeclient import errors
from wormholeclient import constants as wormhole_constants
import traceback
import adapter
import exception_ex
from nova.virt.aws import image_utils
hybrid_cloud_opts = [
cfg.StrOpt('provide_cloud_type',
default='aws',
help='provider cloud type ')
]
hypernode_api_opts = [
cfg.StrOpt('my_ip', help='internal base ip of rabbit host, for injecting in to hyper_vm')
]
ec2_opts = [
cfg.StrOpt('conversion_dir',
default='/tmp',
help='where conversion happens'),
cfg.StrOpt('access_key_id',
help='the access key id for connection to EC2 '),
cfg.StrOpt('secret_key',
help='the secret key for connection to EC2 '),
cfg.StrOpt('region',
default='us-east-1',
help='the region for connection to EC2 '),
cfg.StrOpt('availability_zone',
default='us-east-1a',
help='the availability_zone for connection to EC2 '),
cfg.StrOpt('base_linux_image',
default='ami-68d8e93a',
help='use for create a base ec2 instance'),
cfg.StrOpt('storage_tmp_dir',
default='wfbucketse',
help='a cloud storage temp directory '),
cfg.StrOpt('cascaded_node_id',
help='az31 node id in provider cloud'),
cfg.StrOpt('subnet_api',
help='api subnet'),
cfg.StrOpt('subnet_data',
help='data subnet'),
cfg.StrOpt('cgw_host_ip',
help='compute gateway ip'),
cfg.StrOpt('cgw_host_id',
help='compute gateway id in provider cloud'),
cfg.StrOpt('cgw_user_name',
help='compute gateway user name'),
cfg.StrOpt('cgw_certificate',
help='full name of compute gateway public key'),
cfg.StrOpt('security_group',
help=''),
cfg.StrOpt('rabbit_host_ip_public',
help=''),
cfg.StrOpt('rabbit_password_public',
help=''),
cfg.StrOpt('vpn_route_gateway',
help=''),
cfg.DictOpt('flavor_map',
default={'m1.tiny': 't2.micro', 'm1.small': 't2.micro', 'm1.medium': 't2.micro3',
'm1.large': 't2.micro', 'm1.xlarge': 't2.micro'},
help='map nova flavor name to aws ec2 instance specification id'),
cfg.StrOpt('driver_type',
default ='agent',
help='the network soulution type of aws driver'),
cfg.StrOpt('image_user',
default='',
help=''),
cfg.StrOpt('image_password',
default='',
help=''),
cfg.StrOpt('agent_network',
default='False',
help=''),
cfg.StrOpt('iscsi_subnet',
default='',
help=''),
cfg.StrOpt('iscsi_subnet_route_gateway',
default='',
help=''),
cfg.StrOpt('iscsi_subnet_route_mask',
default='',
help=''),
cfg.StrOpt('tunnel_cidr',
help='The tunnel cidr of provider network.'),
cfg.StrOpt('route_gw',
help='The route gw of the provider network.'),
cfg.StrOpt('dst_path',
default='/home/neutron_agent_conf.txt',
help='The config location for hybrid vm.'),
cfg.StrOpt('hybrid_service_port',
default='7127',
help='The route gw of the provider network.')
]
instance_task_map={}
class NodeState(object):
RUNNING = 0
TERMINATED = 2
PENDING = 3
UNKNOWN = 4
STOPPED = 5
AWS_POWER_STATE={
NodeState.RUNNING:power_state.RUNNING,
NodeState.TERMINATED:power_state.CRASHED,
NodeState.PENDING:power_state.BUILDING,
NodeState.UNKNOWN:power_state.NOSTATE,
NodeState.STOPPED:power_state.SHUTDOWN,
}
MAX_RETRY_COUNT=20
CONTAINER_FORMAT_HYBRID_VM = 'hybridvm'
class aws_task_states:
IMPORTING_IMAGE = 'importing_image'
CREATING_VOLUME = 'creating_volume'
CREATING_VM = 'creating_vm'
MOUNTPOINT_LIST = []
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(hybrid_cloud_opts)
CONF.register_opts(ec2_opts, 'provider_opts')
CHUNK_SIZE = 1024*4
# EC2 = get_driver(CONF.ec2.driver_type)
class RetryDecorator(object):
"""Decorator for retrying a function upon suggested exceptions.
The decorated function is retried for the given number of times, and the
sleep time between the retries is incremented until max sleep time is
reached. If the max retry count is set to -1, then the decorated function
is invoked indefinitely until an exception is thrown, and the caught
exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=-1, inc_sleep_time=5,
max_sleep_time=60, exceptions=()):
"""Configure the retry object using the input params.
:param max_retry_count: maximum number of times the given function must
be retried when one of the input 'exceptions'
is caught. When set to -1, it will be retried
indefinitely until an exception is thrown
and the caught exception is not in param
exceptions.
:param inc_sleep_time: incremental time in seconds for sleep time
between retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param exceptions: suggested exceptions for which the function must be
retried
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
@wraps(f)
def f_retry(*args, **kwargs):
max_retries, mdelay = self._max_retry_count, self._inc_sleep_time
while max_retries > 1:
try:
return f(*args, **kwargs)
except self._exceptions as e:
LOG.error('retry times: %s, exception: %s' %
(str(self._max_retry_count - max_retries), traceback.format_exc(e)))
time.sleep(mdelay)
max_retries -= 1
if mdelay >= self._max_sleep_time:
mdelay=self._max_sleep_time
if max_retries == 1:
msg = 'func: %s, retry times: %s, failed' % (f.__name__, str(self._max_retry_count))
LOG.error(msg)
return f(*args, **kwargs)
return f_retry # true decorator
class AwsEc2Driver(driver.ComputeDriver):
def __init__(self, virtapi):
if CONF.provide_cloud_type == 'aws':
if (CONF.provider_opts.access_key_id is None or
CONF.provider_opts.secret_key is None):
raise Exception(_("Must specify access_key_id and "
"secret_key to use aws ec2"))
self.compute_adapter = adapter.Ec2Adapter(CONF.provider_opts.access_key_id,
secret=CONF.provider_opts.secret_key,
region=CONF.provider_opts.region,
secure=False)
self.storage_adapter = adapter.S3Adapter(CONF.provider_opts.access_key_id,
secret=CONF.provider_opts.secret_key,
region=CONF.provider_opts.region,
secure=False)
self.location = CONF.provider_opts.availability_zone
self.cinder_api = cinder_api()
self.glance_api = glance_api()
self.provider_security_group_id = None
self.provider_interfaces = []
if CONF.provider_opts.driver_type == 'agent':
self.provider_subnet_data = CONF.provider_opts.subnet_data
self.provider_subnet_api = CONF.provider_opts.subnet_api
# for agent solution by default
self.provider_interfaces = []
if CONF.provider_opts.subnet_data:
provider_interface_data = adapter.NetworkInterface(name='eth_data',
subnet_id=self.provider_subnet_data,
# security_groups=self.provider_security_group,
device_index=0)
self.provider_interfaces.append(provider_interface_data)
if CONF.provider_opts.subnet_api:
provider_interface_api = adapter.NetworkInterface(name='eth_control',
subnet_id=self.provider_subnet_api,
# security_groups=self.provider_security_group,
device_index=1)
self.provider_interfaces.append(provider_interface_api)
else:
if not CONF.provider_opts.security_group:
self.provider_security_group_id = None
else:
self.provider_security_group_id = CONF.provider_opts.security_group
def _get_auth(self, key_data, key_name):
return None
def init_host(self, host):
pass
def list_instances(self):
"""List VM instances from all nodes."""
instances = []
try:
nodes = self.compute_adapter.list_nodes()
except Exception as e:
LOG.error('list nodes failed')
LOG.error(e.message)
return instances
if nodes is None:
LOG.error('list nodes failed, Nodes are null!')
return instances
for node in nodes:
instance_uuid = node.extra.get('tags').get('hybrid_cloud_instance_id')
instances.append(instance_uuid)
return instances
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
pass
def snapshot(self, context, instance, image_id, update_task_state):
LOG.debug('start to do snapshot')
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image container type: %s' % image_container_type)
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
self._do_snapshot_for_hybrid_vm(context, instance, image_id, update_task_state)
else:
self._do_snapshot_2(context, instance, image_id, update_task_state)
def _do_snapshot_for_hybrid_vm(self, context, instance, image_id, update_task_state):
image_object_of_hybrid_cloud = self.glance_api.get(context, image_id)
LOG.debug('get image object: %s' % image_object_of_hybrid_cloud)
clients = self._get_hybrid_service_clients_by_instance(instance)
LOG.debug('get clients: %s' % clients)
# create image in docker repository
create_image_task = self._clients_create_image_task(clients, image_object_of_hybrid_cloud)
self._wait_for_task_finish(clients, create_image_task)
LOG.debug('create image in docker image repository success')
docker_image_info = self._clients_get_image_info(clients, image_object_of_hybrid_cloud)
size = docker_image_info['size']
LOG.debug('docker image size: %s' % size)
image_object_of_hybrid_cloud['size'] = size
LOG.debug('image with size: %s' % image_object_of_hybrid_cloud)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._put_image_info_to_glance(context, image_object_of_hybrid_cloud, update_task_state, instance)
LOG.debug('finish do snapshot for create image')
def _put_image_info_to_glance(self, context, image_object, update_task_state, instance):
LOG.debug('start to put image info to glance, image obj: %s' % image_object)
image_id = image_object['id']
LOG.debug('image id: %s' % image_id)
image_metadata = self._create_image_metadata(context, instance, image_object)
LOG.debug('image metadata: %s' % image_metadata)
# self.glance_api.update(context, image_id, image_metadata)
with image_utils.temporary_file() as tmp:
image_service, image_id = glance.get_remote_image_service(context, image_id)
with fileutils.file_open(tmp, 'wb+') as f:
f.truncate(image_object['size'])
image_service.update(context, image_id, image_metadata, f)
self._update_vm_task_state(instance, task_state=instance.task_state)
LOG.debug('success to put image to glance')
def _create_image_metadata(self, context, instance, image_object):
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(context, self.glance_api, base_image_ref, instance)
metadata = {'is_public': False,
'status': 'active',
'name': image_object['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_object['disk_format']
metadata['container_format'] = CONTAINER_FORMAT_HYBRID_VM
metadata['size'] = image_object['size']
return metadata
def _do_snapshot_1(self, context, instance, image_id, update_task_state):
# 1) get provider node
provider_node_id = self._get_provider_node_id(instance)
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance.uuid)
raise exception.InstanceNotFound(instance_id=instance.uuid)
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance.uuid)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
# 2) get root-volume id
provider_volumes = self.compute_adapter.list_volumes(node=provider_node)
if not provider_volumes:
raise exception.VolumeNotFound
provider_volume = provider_volumes[0]
# 3) export
self.compute_adapter.export_volume(provider_volume.id,
CONF.provider_opts.conversion_dir,
image_id,
cgw_host_id=CONF.provider_opts.cgw_host_id,
cgw_host_ip=CONF.provider_opts.cgw_host_ip,
cgw_username=CONF.provider_opts.cgw_username,
cgw_certificate=CONF.provider_opts.cgw_certificate,
transfer_station=CONF.provider_opts.storage_tmp_dir)
# 4) upload to glance
src_file_name = '%s/%s' %(CONF.provider_opts.conversion_dir, image_id)
file_size = os.path.getsize(src_file_name)
metadata = self.glance_api.get(context, image_id)
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
src_file_handle = fileutils.file_open(src_file_name, "rb")
self.glance_api.create(context,image_metadata,src_file_handle)
src_file_handle.close()
def _do_snapshot_2(self, context, instance, image_id, update_task_state):
# a) get provider node id
provider_node_id = self._get_provider_node_id(instance)
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance.uuid)
raise exception.InstanceNotFound(instance_id=instance.uuid)
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance.uuid)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
# b) export-instance to s3
# self.compute_adapter.ex_stop_node(provider_node)
try:
task = self.compute_adapter.create_export_instance_task(provider_node_id,
CONF.provider_opts.storage_tmp_dir)
except:
task = self.compute_adapter.create_export_instance_task(provider_node_id,
CONF.provider_opts.storage_tmp_dir)
while not task.is_completed():
time.sleep(10)
task = self.compute_adapter.get_task_info(task)
obj_key = task.export_to_s3_info.s3_key
obj_bucket = task.export_to_s3_info.s3_bucket
# c) download from s3
obj = self.storage_adapter.get_object(obj_bucket,obj_key)
conv_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_id)
fileutils.ensure_tree(conv_dir)
org_full_name = '%s/%s.vmdk' % (conv_dir,image_id)
self.storage_adapter.download_object(obj,org_full_name)
# d) convert to qcow2
dest_full_name = '%s/%s.qcow2' % (conv_dir,image_id)
convert_image(org_full_name,
dest_full_name,
'qcow2')
# upload to glance
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
file_size = os.path.getsize(dest_full_name)
metadata = self.glance_api.get(context, image_id)
image_metadata = {"disk_format": "qcow2",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"owner_id": instance['project_id']}}
src_file_handle = fileutils.file_open(dest_full_name, "rb")
self.glance_api.create(context,image_metadata,src_file_handle)
src_file_handle.close()
def _generate_provider_node_name(self, instance):
return instance.hostname
def _get_provider_node_size(self, flavor):
return NodeSize(id=CONF.provider_opts.flavor_map[flavor.name],
name=None, ram=None, disk=None, bandwidth=None,price=None, driver=self.compute_adapter)
def _get_image_id_from_meta(self,image_meta):
if 'id' in image_meta:
# create from image
return image_meta['id']
elif 'image_id' in image_meta:
# attach
return image_meta['image_id']
elif 'properties' in image_meta:
# create from volume
return image_meta['properties']['image_id']
else:
return None
def _get_image_name_from_meta(self, image_meta):
if 'name' in image_meta:
return image_meta['name']
elif 'image_name' in image_meta:
return image_meta['image_name']
else:
return NodeState
def _spawn_from_image(self, context, instance, image_meta, injected_files,
<PASSWORD>_password, network_info, block_device_info):
# 0.get provider_image,
LOG.info('begin time of _spawn_from_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
retry_time = 3
container_format = image_meta.get('container_format')
provider_image_id = None
provider_image = None
while (not provider_image) and retry_time>0:
provider_image = self._get_provider_image(image_meta)
retry_time = retry_time-1
if provider_image is None:
image_uuid = self._get_image_id_from_meta(image_meta)
LOG.error('Get image %s error at provider cloud' % image_uuid)
return
# 1. if provider_image do not exist,, import image first
vm_task_state = instance.task_state
if not provider_image :
LOG.debug('begin import image')
#save the original state
self._update_vm_task_state(
instance,
task_state=aws_task_states.IMPORTING_IMAGE)
image_uuid = self._get_image_id_from_meta(image_meta)
container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
try:
self.storage_adapter.get_object(container.name,image_uuid)
except ObjectDoesNotExistError:
# 1.1 download qcow2 file from glance
this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_uuid)
orig_file_full_name = '%s/%s.qcow2' % (this_conversion_dir,'orig_file')
fileutils.ensure_tree(this_conversion_dir)
self.glance_api.download(context,image_uuid,dest_path=orig_file_full_name)
# 1.2 convert to provider image format
converted_file_format = 'vmdk'
converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
converted_file_full_name = '%s/%s' % (this_conversion_dir,converted_file_name)
convert_image(orig_file_full_name,
converted_file_full_name,
converted_file_format,
subformat='streamoptimized')
# 1.3 upload to provider_image_id
object_name = image_uuid
extra = {'content_type': 'text/plain'}
with open(converted_file_full_name,'rb') as f:
obj = self.storage_adapter.upload_object_via_stream(container=container,
object_name=object_name,
iterator=f,
extra=extra)
task = self.compute_adapter.create_import_image_task(CONF.provider_opts.storage_tmp_dir,
image_uuid,
image_name=image_uuid)
try:
task_list = instance_task_map[instance.uuid]
if not task_list:
task_list.append(task)
instance_task_map[instance.uuid]=task_list
except KeyError:
task_list=[task]
instance_task_map[instance.uuid]=task_list
while not task.is_completed():
time.sleep(5)
task = self.compute_adapter.get_task_info(task)
provider_image = self.compute_adapter.get_image(task.image_id)
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
if set_tag_func:
set_tag_func(provider_image, {'hybrid_cloud_image_id': image_uuid})
# 2.1 map flovar to node size, from configuration
provider_size = self._get_provider_node_size(instance.get_flavor())
# 2.2 get a subnets and create network interfaces
# provider_interface_data = adapter.NetworkInterface(name='eth_data',
# subnet_id=CONF.provider_opts.subnet_data,
# device_index=0)
#
# provider_interface_api = adapter.NetworkInterface(name='eth_control',
# subnet_id=CONF.provider_opts.subnet_api,
# device_index=1)
# provider_interfaces = [provider_interface_data,provider_interface_api]
# 2.3 generate provider node name, which useful for debugging
provider_node_name = self._generate_provider_node_name(instance)
# 2.4 generate user data, which use for network initialization
user_data = self._generate_user_data(instance)
# 2.5 create data volumes' block device mappings, skip boot volume
provider_bdms = None
data_bdm_list = []
source_provider_volumes=[]
bdm_list = block_device_info.get('block_device_mapping',[])
if len(bdm_list)>0:
self._update_vm_task_state(
instance,
task_state=aws_task_states.CREATING_VOLUME)
root_volume_name = block_device_info.get('root_device_name',None)
# if data volume exist: more than one block device mapping
# 2.5.1 import volume to aws
provider_volume_ids = []
for bdm in bdm_list:
# skip boot volume
if bdm.get('mount_device') == root_volume_name:
continue
data_bdm_list.append(bdm)
if container_format != CONTAINER_FORMAT_HYBRID_VM:
connection_info = bdm.get('connection_info', None)
volume_id = connection_info['data']['volume_id']
provider_volume_id = self._get_provider_volume_id(context,volume_id)
# only if volume DO NOT exist in aws when import volume
if not provider_volume_id:
provider_volume_id = self._import_volume_from_glance(
context,
volume_id,
instance,
CONF.provider_opts.availability_zone)
provider_volume_ids.append(provider_volume_id)
# 2.5.2 create snapshot
# if container format is hybridvm, then need to attach volume after create node one by one
if container_format != CONTAINER_FORMAT_HYBRID_VM:
provider_snapshots = []
if len(provider_volume_ids) > 0:
source_provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=provider_volume_ids)
for provider_volume in source_provider_volumes:
provider_snapshots.append(self.compute_adapter.create_volume_snapshot(provider_volume))
provider_snapshot_ids = []
for snap in provider_snapshots:
provider_snapshot_ids.append(snap.id)
self._wait_for_snapshot_completed(provider_snapshot_ids)
# 2.5.3 create provider bdm list from bdm_info and snapshot
provider_bdms = []
if len(provider_snapshots) > 0:
for ii in range(0, len(data_bdm_list)):
provider_bdm = {'DeviceName':
self._trans_device_name(data_bdm_list[ii].get('mount_device')),
'Ebs': {'SnapshotId':provider_snapshots[ii].id,
'DeleteOnTermination': data_bdm_list[ii].get('delete_on_termination')}
}
provider_bdms.append(provider_bdm)
# 3. create node
try:
self._update_vm_task_state(
instance,
task_state=aws_task_states.CREATING_VM)
if (len(self.provider_interfaces)>1):
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
# ex_subnet=provider_subnet_data,
ex_blockdevicemappings=provider_bdms,
ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
elif(len(self.provider_interfaces)==1):
provider_subnet_data_id = self.provider_interfaces[0].subnet_id
provider_subnet_data = self.compute_adapter.ex_list_subnets(subnet_ids=[provider_subnet_data_id])[0]
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
ex_subnet=provider_subnet_data,
ex_security_group_ids=self.provider_security_group_id,
ex_blockdevicemappings=provider_bdms,
# ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
else:
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
# ex_subnet=provider_subnet_data,
ex_security_group_ids=self.provider_security_group_id,
ex_blockdevicemappings=provider_bdms,
# ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
except Exception as e:
LOG.warning('Provider instance is booting error')
LOG.error(e.message)
provider_node=self.compute_adapter.list_nodes(ex_filters={'tag:name':provider_node_name})
if not provider_node:
raise e
# 4. mapping instance id to provider node, using metadata
instance.metadata['provider_node_id'] = provider_node.id
instance.save()
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
try:
if set_tag_func:
set_tag_func(provider_node, {'hybrid_cloud_instance_id': instance.uuid})
except Exception as e:
time.sleep(5)
aws_node=self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id':instance.uuid})
if not aws_node:
set_tag_func(provider_node, {'hybrid_cloud_instance_id': instance.uuid})
# 5 wait for node avalaible
while provider_node.state!=NodeState.RUNNING and provider_node.state!=NodeState.STOPPED:
try:
#modified by liuling
#provider_node = self.compute_adapter.list_nodes(ex_node_ids=[provider_node.id])[0]
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node.id])
if len(provider_nodes) ==0:
break
else:
provider_node = provider_nodes[0]
except:
LOG.warning('Provider instance is booting but adapter is failed to get status. Try it later')
time.sleep(10)
if container_format == CONTAINER_FORMAT_HYBRID_VM:
self._create_hyper_service_container(context,
instance,
provider_node,
network_info,
block_device_info,
image_meta,
injected_files,
admin_password)
else:
# 6 mapp data volume id to provider
provider_bdm_list = provider_node.extra.get('block_device_mapping')
for ii in range(0, len(data_bdm_list)):
provider_volume_id = provider_bdm_list[ii+1].get('ebs').get('volume_id')
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
connection_info = data_bdm_list[ii].get('connection_info',[])
volume_id = connection_info['data']['volume_id']
self._map_volume_to_provider(context, volume_id, provider_volumes[0])
# delete the tmp volume
for provider_volume in source_provider_volumes:
self.compute_adapter.destroy_volume(provider_volume)
#reset the original state
self._update_vm_task_state(
instance,
task_state=vm_task_state)
LOG.info('end time of _spawn_from_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return provider_node
def _wait_for_volume_is_attached(self, provider_hybrid_volume):
LOG.debug('wait for volume is attached')
not_in_status = [StorageVolumeState.ERROR, StorageVolumeState.DELETED, StorageVolumeState.DELETING]
status = self._wait_for_volume_in_specified_status(provider_hybrid_volume, StorageVolumeState.INUSE,
not_in_status)
LOG.debug('volume status: %s' % status)
LOG.debug('volume is attached.')
return
def _wait_for_volume_is_available(self, provider_hybrid_volume):
LOG.debug('wait for volume is available')
not_in_status = [StorageVolumeState.ERROR, StorageVolumeState.DELETED, StorageVolumeState.DELETING]
# import pdb; pdb.set_trace()
status = self._wait_for_volume_in_specified_status(provider_hybrid_volume, StorageVolumeState.AVAILABLE,
not_in_status)
LOG.debug('volume status: %s' % status)
LOG.debug('volume is available')
return status
@RetryDecorator(max_retry_count=10,inc_sleep_time=5,max_sleep_time=60,exceptions=(exception_ex.RetryException))
def _wait_for_volume_in_specified_status(self, provider_hybrid_volume, status, not_in_status_list):
"""
:param provider_hybrid_volume:
:param status: StorageVolumeState
:return: specified_status
"""
LOG.debug('wait for volume in specified status: %s' % status)
LOG.debug('not_in_status_list: %s' % not_in_status_list)
provider_volume_id = provider_hybrid_volume.id
LOG.debug('wait for volume:%s in specified status: %s' % (provider_volume_id, status))
created_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
if not created_volumes:
error_info = 'created docker app volume failed.'
raise exception_ex.RetryException(error_info=error_info)
created_volume = created_volumes[0]
current_status = created_volume.state
LOG.debug('current_status: %s' % current_status)
error_info = 'volume: %s status is %s' % (provider_hybrid_volume.id, current_status)
if status == current_status:
LOG.debug('current status: %s is the same with specified status %s ' % (current_status, status))
elif not_in_status_list:
if status in not_in_status_list:
raise Exception(error_info)
else:
raise exception_ex.RetryException(error_info=error_info)
else:
raise exception_ex.RetryException(error_info=error_info)
return current_status
def _get_provider_volumes_map_from_bdm(self, context, instance, block_device_info):
"""
if there isn't any provider volume tag with hybrid cloud volume id, then import it from image of glance.
if there is provider volume mapped with hybrid cloud volume id, return it directly.
:param context:
:param instance:
:param block_device_info:
{
'block_device_mapping': [{
'guest_format': None,
'boot_index': None,
'mount_device': u'/dev/sdb',
'connection_info': {
u'driver_volume_type': u'provider_volume',
'serial': u'8ff7107a-74b9-4acb-8fab-46d8901f5bf2',
u'data': {
u'access_mode': u'rw',
u'qos_specs': None,
u'provider_location': u'vol-e4005a3e',
u'volume_id': u'8ff7107a-74b9-4acb-8fab-46d8901f5bf2'
}
},
'disk_bus': None,
'device_type': None,
'delete_on_termination': False
}],
'root_device_name': u'/dev/sda',
'ephemerals': [],
'swap': None
}
:return: dict, {hybrid_volume_id: provider_volume, ...}
"""
LOG.debug('start to get provider volumes map.')
provider_volume_map = {}
bdm_map = {}
bdm_list = block_device_info.get('block_device_mapping')
if bdm_list and len(bdm_list) > 0:
root_volume_name = block_device_info.get('root_device_name', None)
LOG.debug('root_volume_name: %s' % root_volume_name)
for bdm in bdm_list:
# skip boot volume
if bdm.get('mount_device') == root_volume_name:
continue
else:
connection_info = bdm.get('connection_info', None)
volume_id = connection_info['data']['volume_id']
provider_volume = self._get_provider_volume(volume_id)
# only if volume DO NOT exist in aws when import volume
if not provider_volume:
LOG.debug('provider volume is not exist for volume: %s' % volume_id)
provider_volume_id = self._import_volume_from_glance(
context,
volume_id,
instance,
CONF.provider_opts.availability_zone)
created_provider_volume = self._get_provider_volume_by_provider_volume_id(provider_volume_id)
self._map_volume_to_provider(context, volume_id, created_provider_volume)
provider_volume = self._get_provider_volume(volume_id)
if provider_volume:
provider_volume_map[volume_id] = provider_volume
bdm_map[volume_id] = bdm
LOG.debug('end to get provider volumes map.')
return provider_volume_map, bdm_map
def _deal_with_spawn_docker_app_failed(self, error_info, volume=None):
LOG.error(error_info)
if volume:
self._delete_volume(volume)
raise exception.NovaException(error_info)
def _delete_volume(self, volume):
"""
:param volume:
:return: boolean
"""
LOG.debug('start to delete container volume')
destroy_result = self.compute_adapter.destroy_volume(volume)
LOG.debug('end to delete container volume')
return destroy_result
def _trans_device_name(self, orig_name):
if not orig_name:
return orig_name
else:
return orig_name.replace('/dev/vd', '/dev/sd')
def _wait_for_snapshot_completed(self, provider_id_list):
is_all_completed = False
while not is_all_completed:
snapshot_list = self.compute_adapter.list_snapshots(snapshot_ids=provider_id_list)
is_all_completed = True
for snapshot in snapshot_list:
if snapshot.extra.get('state') != 'completed':
is_all_completed = False
time.sleep(10)
break
def _generate_user_data(self, instance):
return 'RABBIT_HOST_IP=%s;RABBIT_PASSWORD=%s;VPN_ROUTE_GATEWAY=%s' % (CONF.provider_opts.rabbit_host_ip_public,
CONF.provider_opts.rabbit_password_public,
CONF.provider_opts.vpn_route_gateway)
def _spawn_from_volume(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self._create_node_ec2(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def _spawn_from_volume_for_hybrid_vm(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
try:
self._create_hypervm_from_volume(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
except Exception, e:
LOG.error('spawn from volume failed!!,exception: %s' % traceback.format_exc(e))
time.sleep(5)
raise e
def _get_root_bdm_from_bdms(self, bdms, root_device_name):
root_bdm = None
for bdm in bdms:
if bdm['mount_device'] == root_device_name:
root_bdm = bdm
break
return root_bdm
def _get_volume_from_bdm(self, context, bdm):
volume_id = bdm['connection_info']['data']['volume_id']
volume = self.cinder_api.get(context, volume_id)
if not volume:
raise Exception('can not find volume for volume id: %s' % volume_id)
return volume
def _get_image_metadata_from_volume(self, volume):
volume_image_metadata = volume.get('volume_image_metadata')
return volume_image_metadata
def _get_image_metadata_from_bdm(self, context, bdm):
volume = self._get_volume_from_bdm(context, bdm)
image_metadata = self._get_image_metadata_from_volume(volume)
return image_metadata
@RetryDecorator(max_retry_count=10, inc_sleep_time=5, max_sleep_time=60, exceptions=(Exception))
def _set_tag_for_provider_instance(self, instance, provider_node):
LOG.debug('start to set tag')
aws_node = self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id': instance.uuid})
if aws_node:
LOG.debug('Already exist tag for provider_node: %s' % provider_node)
return
else:
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
LOG.debug('get function of set tag')
if set_tag_func:
set_tag_func(provider_node, {'hybrid_cloud_instance_id': instance.uuid})
else:
aws_node = self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id': instance.uuid})
if not aws_node:
raise Exception('There is no node taged.')
LOG.debug('end to set tag')
@RetryDecorator(max_retry_count=10, inc_sleep_time=5, max_sleep_time=60, exceptions=(Exception))
def _set_tag_for_provider_volume(self, provider_volume, volume_id):
set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
if set_tag_func:
set_tag_func(provider_volume, {'hybrid_cloud_volume_id': volume_id})
else:
LOG.warning('No ex_create_tags function, '
'so did not set tag for provider_volume: %s with hybrid cloud volume id: %s') %\
(provider_volume, volume_id)
def _create_node(self, instance, provider_node_name, provider_image, provider_size, provider_bdms, user_data):
try:
self._update_vm_task_state(
instance,
task_state=aws_task_states.CREATING_VM)
LOG.info('provider_interfaces: %s' % self.provider_interfaces)
if len(self.provider_interfaces) > 1:
LOG.debug('Create provider node, length: %s' % len(self.provider_interfaces))
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
# ex_subnet=provider_subnet_data,
ex_blockdevicemappings=provider_bdms,
ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
elif len(self.provider_interfaces) == 1:
LOG.debug('Create provider node, length: %s' % len(self.provider_interfaces))
provider_subnet_data_id = self.provider_interfaces[0].subnet_id
provider_subnet_data = self.compute_adapter.ex_list_subnets(subnet_ids=[provider_subnet_data_id])[0]
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
ex_subnet=provider_subnet_data,
ex_security_group_ids=self.provider_security_group_id,
ex_blockdevicemappings=provider_bdms,
# ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
else:
LOG.debug('Create provider node, length: %s' % len(self.provider_interfaces))
provider_node = self.compute_adapter.create_node(name=provider_node_name,
image=provider_image,
size=provider_size,
location=CONF.provider_opts.availability_zone,
# ex_subnet=provider_subnet_data,
ex_security_group_ids=self.provider_security_group_id,
ex_blockdevicemappings=provider_bdms,
# ex_network_interfaces=self.provider_interfaces,
ex_userdata=user_data,
auth=self._get_auth(instance._key_data,
instance._key_name))
except Exception as e:
LOG.ERROR('Provider instance is booting error')
LOG.error(e.message)
provider_node = self.compute_adapter.list_nodes(ex_filters={'tag:name':provider_node_name})
if not provider_node:
raise e
raise e
LOG.debug('create node success, provider_node: %s' % provider_node)
#mapping instance id to provider node, using metadata
instance.metadata['provider_node_id'] = provider_node.id
instance.save()
self._set_tag_for_provider_instance(instance, provider_node)
node_is_ok = False
while not node_is_ok:
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node.id])
if not provider_nodes:
error_info = 'There is no node created in provider. node id: %s' % provider_node.id
LOG.error(error_info)
continue
else:
provider_node = provider_nodes[0]
if provider_node.state == NodeState.RUNNING or provider_node.state == NodeState.STOPPED:
LOG.debug('Node %s is created, and status is: %s' % (provider_node.name, provider_node.state))
node_is_ok = True
time.sleep(10)
return provider_node
def _create_hypervm_from_volume(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
LOG.debug('Start to create hypervm from volume')
LOG.debug('instance: %s' % instance)
LOG.debug('image_meta: %s' % image_meta)
LOG.debug('injected_files: %s' % injected_files)
LOG.debug('admin_pasword: %s' % admin_password)
LOG.debug('network_info: %s' % network_info)
LOG.debug('block_device_info: %s' % block_device_info)
vm_task_state = instance.task_state
bdms = block_device_info.get('block_device_mapping',[])
root_device_name = block_device_info.get('root_device_name', '')
root_bdm = self._get_root_bdm_from_bdms(bdms, root_device_name)
if root_bdm is None:
error_info = 'boot bdm is None.'
LOG.error(error_info)
raise Exception(error_info)
LOG.debug('root_bdm: %s' % root_bdm)
image_metadata_of_root_volume = self._get_image_metadata_from_bdm(context, root_bdm)
LOG.debug('get image metadata of root volume: %s' % image_metadata_of_root_volume)
image_id = image_metadata_of_root_volume['image_id']
LOG.debug('image id of boot volume is: %s' % image_id)
image_name = image_metadata_of_root_volume['image_name']
LOG.debug('image name of boot volume is: %s' % image_name)
provider_image = self._get_provider_image_by_id(image_id)
LOG.debug('provider_image: %s' % provider_image)
provider_size = self._get_provider_node_size(instance.get_flavor())
LOG.debug('privoder size: %s' % provider_size)
provider_node_name = self._generate_provider_node_name(instance)
LOG.debug('provider_node_name: %s' % provider_node_name)
user_data = self._generate_user_data(instance)
LOG.debug('Start to create node.')
provider_bdms = None
provider_node = self._create_node(instance,
provider_node_name,
provider_image,
provider_size,
provider_bdms,
user_data)
LOG.debug('node: %s' % provider_node)
LOG.debug('-------------Start to create hyper service container.-------------')
self._create_hyper_service_container(context,
instance,
provider_node,
network_info,
block_device_info,
image_metadata_of_root_volume,
injected_files, admin_password)
LOG.debug('-------------SUCCESS to create hyper service container.---------------')
#reset the original state
self._update_vm_task_state(
instance,
task_state=vm_task_state)
def _get_inject_file_data(self, instance):
rabbit_host = CONF.hypernode_api.my_ip
if not rabbit_host:
raise ValueError('rabbit host is None' +
' please config it in /etc/nova/nova-compute.conf, ' +
'hypernode_api section, my_ip option')
LOG.info('rabbit_host: %s' % rabbit_host)
LOG.info('host: %s' % instance.uuid)
file_data = 'rabbit_userid=%s\nrabbit_password=%s\nrabbit_host=%s\n' % \
(CONF.rabbit_userid, CONF.rabbit_password, rabbit_host)
file_data += 'host=%s\ntunnel_cidr=%s\nroute_gw=%s\n' % \
(instance.uuid, CONF.provider_opts.tunnel_cidr,CONF.provider_opts.vpn_route_gateway)
LOG.info('end to composite user data: %s' % file_data)
return file_data
def _create_hyper_service_container(self, context, instance, provider_node,
network_info, block_device_info,
image_metadata, inject_file, admin_password):
LOG.debug('Start to create hyper service container')
instance.metadata['is_hybrid_vm'] = True
instance.save()
image_name = self._get_image_name_from_meta(image_metadata)
image_uuid = self._get_image_id_from_meta(image_metadata)
# update port bind host
self._binding_host(context, network_info, instance.uuid)
size = instance.get_flavor().get('root_gb')
provider_location = self._get_location()
root_volume = self._get_root_volume(context, block_device_info)
if not root_volume:
# if not exist hybrid root volume, then it is spawn from image.
provider_hybrid_volume = self._create_data_volume_for_container(provider_node, size, provider_location)
try:
self._wait_for_volume_is_available(provider_hybrid_volume)
except Exception, e:
LOG.error('exception: %s' % traceback.format_exc(e))
time.sleep(2)
self._deal_with_spawn_docker_app_failed(e.message, provider_hybrid_volume)
else:
# if exist hybrid root volume, it means spawn from volume, need to check if exist mapped root volume in aws.
# if not exist mapped root volume of aws, means it is first time spawn from root volume, then need to create
# mapped root volume in aws. if exist mapped root volume of aws, use it directly.
provider_hybrid_volume = self._get_provider_volume(root_volume.get('id'))
if not provider_hybrid_volume:
provider_hybrid_volume = self._create_data_volume_for_container(provider_node, size, provider_location)
try:
self._wait_for_volume_is_available(provider_hybrid_volume)
except Exception, e:
self._deal_with_spawn_docker_app_failed(e.message, provider_hybrid_volume)
self._map_volume_to_provider(context, root_volume.get('id'), provider_hybrid_volume)
device = '/dev/sdz'
self._attache_volume_and_wait_for_attached(provider_node, provider_hybrid_volume, device)
LOG.debug('Start to get clients.')
clients = self._get_hybrid_service_clients_by_node(provider_node)
try:
LOG.debug('wait for docker service starting')
is_docker_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
error_info = 'docker server is not up, create docker app failed, exception: %s' %\
traceback.format_exc(e)
self._deal_with_spawn_docker_app_failed(error_info, volume=provider_hybrid_volume)
LOG.info('start to composite user data.')
try:
LOG.debug('Start to inject file')
file_data = self._get_inject_file_data(instance)
inject_result = self._hype_inject_file(clients, file_data)
LOG.debug('inject_file result: %s' % inject_result)
except Exception, e:
LOG.error('inject file failed, exception: %s' % traceback.format_exc(e))
self._deal_with_spawn_docker_app_failed(e.message, volume=provider_hybrid_volume)
LOG.debug('old block_device_info: %s' % block_device_info)
block_device_info = self._attache_volume_and_get_new_bdm(context, instance, block_device_info, provider_node)
LOG.debug('new block_device_info: %s' % block_device_info)
try:
create_container_task = self._hyper_create_container_task(clients, image_name, image_uuid,
inject_file, admin_password, network_info,
block_device_info)
self._wait_for_task_finish(clients, create_container_task)
except Exception, e:
LOG.error('create container failed, exception: %s' % traceback.format_exc(e))
self._deal_with_spawn_docker_app_failed(e.message)
# try:
# LOG.debug('Start to create container by using image: %s' % image_name)
# created_container = self._hype_create_container(clients, image_name)
# LOG.debug('created_container: %s' % created_container)
# except Exception, e:
# LOG.error('create container failed, exception: %s' % traceback.format_exc(e))
# self._deal_with_spawn_docker_app_failed(e.message)
try:
LOG.info('network_info: %s' % network_info)
LOG.info('block device info: %s' % block_device_info)
LOG.debug('Star to start container.')
started_container = self._hype_start_container(clients,
network_info=network_info,
block_device_info=block_device_info)
LOG.debug('end to start container: %s' % started_container)
except Exception, e:
LOG.error('start container failed:%s' % traceback.format_exc(e))
self._deal_with_spawn_docker_app_failed(e.message)
# provider_volume_map, bdm_map = self._get_provider_volumes_map_from_bdm(context, instance, block_device_info)
# LOG.debug('get provider volume map: %s' % provider_volume_map)
# LOG.debug('get bdm_map: %s' % bdm_map)
# if provider_volume_map:
# for hybrid_volume_id, provider_volume in provider_volume_map.items():
# if bdm_map.get(hybrid_volume_id):
# mount_point = bdm_map.get(hybrid_volume_id).get('mount_device')
# LOG.debug('mount_point: %s' % mount_point)
# self._attache_volume_for_docker_app(context, instance, hybrid_volume_id,
# mount_point,
# provider_node,
# provider_volume)
# else:
# LOG.debug('can not get mount_device for hybrid_volume_id: %s' % hybrid_volume_id)
#
# if inject_file:
# try:
# # self._hype_inject_file_to_container(clients, inject_file)
# LOG.debug('inject file success.')
# except Exception, e:
# LOG.error('inject file to container failed. exception: %s' % exception)
# self._deal_with_spawn_docker_app_failed(e.message)
self._binding_host(context, network_info, instance.uuid)
def _attache_volume_and_wait_for_attached(self, provider_node, provider_hybrid_volume, device):
LOG.debug('Start to attach volume')
attache_result = self.compute_adapter.attach_volume(provider_node, provider_hybrid_volume, device)
self._wait_for_volume_is_attached(provider_hybrid_volume)
LOG.info('end to attache volume: %s' % attache_result)
def _get_location(self):
LOG.debug('Start to get location')
provider_location = self.compute_adapter.get_location(self.location)
LOG.debug('provider_location: %s' % provider_location)
if not provider_location:
error_info = 'No provider_location, release resource and return'
raise ValueError(error_info)
LOG.debug('get location: %s' % provider_location)
return provider_location
def _get_root_volume(self, context, block_device_info):
LOG.debug('start to get root volume for block_device_info: %s' % block_device_info)
bdms = block_device_info.get('block_device_mapping', [])
root_device_name = block_device_info.get('root_device_name', '')
if root_device_name:
root_bdm = self._get_root_bdm_from_bdms(bdms, root_device_name)
if root_bdm:
root_volume = self._get_volume_from_bdm(context, root_bdm)
else:
root_volume = None
else:
root_volume = None
LOG.debug('end to get root volume: %s' % root_volume)
return root_volume
def _get_root_volume_by_index_0(self, context, block_device_info):
LOG.debug('start to get root volume by index 0 for block_device_info: %s' % block_device_info)
bdms = block_device_info.get('block_device_mapping', [])
root_bdm = self._get_root_bdm_from_bdms_by_index_0(bdms)
if root_bdm:
root_volume = self._get_volume_from_bdm(context, root_bdm)
else:
root_volume = None
LOG.debug('end to get root volume: %s' % root_volume)
return root_volume
def _get_root_bdm_from_bdms_by_index_0(self, bdms):
root_bdm = None
for bdm in bdms:
if bdm['boot_index'] == 0:
root_bdm = bdm
break
return root_bdm
def _create_data_volume_for_container(self, provider_node, size, provider_location):
LOG.info('start to create volume')
volume_name = provider_node.id
provider_hybrid_volume = self.compute_adapter.create_volume(size, volume_name, provider_location)
if not provider_hybrid_volume:
error_info = 'provider_hybrid_volume is None, release resource and return'
raise error_info
#self._wait_for_volume_available(provider_hybrid_volume)
LOG.info('end to create volume: %s' % provider_hybrid_volume)
return provider_hybrid_volume
def _create_node_ec2(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
# 1. create a common vm
# 1.1 map flovar to node size, from configuration
provider_size = self._get_provider_node_size(instance.get_flavor())
# 1.2 get common image
provder_image = self.compute_adapter.get_image(CONF.provider_opts.base_linux_image)
# 1.3. create_node, and get_node_stat, waiting for node creation finish
provider_node_name = self._generate_provider_node_name(instance)
provider_node = self.compute_adapter.create_node(name=provider_node_name, image=provder_image,
size=provider_size,
auth=self._get_auth(instance._key_data,
instance._key_name))
# 2. power off the vm
self.compute_adapter.ex_stop_node(provider_node)
# 3. detach origin root volume
provider_volumes = self.compute_adapter.list_volumes(node=provider_node)
provider_volume = provider_volumes[0]
self.compute_adapter.detach_volume(provider_volume)
# 4. attach this volume
self.compute_adapter.attach_volume(provider_node,
provider_volume,
self._trans_device_name(provider_volume.extra.get('device')))
def _get_volume_ids_from_bdms(self, bdms):
volume_ids = []
for bdm in bdms:
volume_ids.append(bdm['connection_info']['data']['volume_id'])
return volume_ids
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
LOG.debug(_("image meta is:%s") % image_meta)
LOG.debug(_("instance is:%s") % instance)
LOG.debug(_("network_info is: %s") % network_info)
LOG.debug(_("block_device_info is: %s") % block_device_info)
bdms = block_device_info.get('block_device_mapping', [])
image_container_type = instance.system_metadata.get('image_container_format')
if not instance.image_ref and len(bdms) > 0:
LOG.debug('image_container_type: %s' % image_container_type)
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
self._spawn_from_volume_for_hybrid_vm(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
else:
volume_ids = self._get_volume_ids_from_bdms(bdms)
root_volume_id = volume_ids[0]
provider_root_volume_id = self._get_provider_volume_id(context, root_volume_id)
if provider_root_volume_id is not None:
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_root_volume_id])
else:
provider_volumes = []
if not provider_volumes:
# if has no provider volume, boot from image: (import image in provider cloud, then boot instance)
provider_node = self._spawn_from_image(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
provider_bdm_list = provider_node.extra.get('block_device_mapping')
provider_root_volume_id = provider_bdm_list[0].get('ebs').get('volume_id')
provider_root_volume = self.compute_adapter.list_volumes(ex_volume_ids=[provider_root_volume_id])[0]
self._map_volume_to_provider(context, root_volume_id, provider_root_volume)
elif len(provider_volumes) == 0:
# if has provider volume, boot from volume:
self._spawn_from_volume(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
else:
LOG.error('create instance %s faild: multi volume confusion' % instance.uuid)
raise exception_ex.MultiVolumeConfusion
else:
# if boot from image: (import image in provider cloud, then boot instance)
self._spawn_from_image(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
LOG.debug("creating instance %s success!" % instance.uuid)
def _map_volume_to_provider(self, context, volume_id, provider_volume):
# mapping intance root-volume to cinder volume
if not provider_volume:
self.cinder_api.delete_volume_metadata(context,
volume_id,
['provider_volume_id'])
else:
self.cinder_api.update_volume_metadata(context,
volume_id,
{'provider_volume_id': provider_volume.id})
self._set_tag_for_provider_volume(provider_volume, volume_id)
def _get_provider_image_id(self, image_obj):
try:
image_uuid = self._get_image_id_from_meta(image_obj)
provider_image = self.compute_adapter.list_images(ex_filters={'tag:hybrid_cloud_image_id': image_uuid})
if provider_image is None:
raise exception_ex.ProviderRequestTimeOut
if len(provider_image) == 0:
# raise exception.ImageNotFound
LOG.warning('Image %s NOT Found at provider cloud' % image_uuid)
return None
elif len(provider_image) > 1:
raise exception_ex.MultiImageConfusion
else:
return provider_image[0].id
except Exception as e:
LOG.error('Can NOT get image %s from provider cloud tag' % image_uuid)
LOG.error(e.message)
return None
def _get_provider_image(self,image_obj):
try:
image_uuid = self._get_image_id_from_meta(image_obj)
provider_image = self.compute_adapter.list_images(
ex_filters={'tag:hybrid_cloud_image_id':image_uuid})
if provider_image is None:
LOG.error('Can NOT get image %s from provider cloud tag' % image_uuid)
return provider_image
if len(provider_image)==0:
LOG.debug('Image %s NOT exist at provider cloud' % image_uuid)
return provider_image
elif len(provider_image)>1:
LOG.error('ore than one image are found through tag:hybrid_cloud_instance_id %s' % image_uuid)
raise exception_ex.MultiImageConfusion
else:
return provider_image[0]
except Exception as e:
LOG.error('get provider image failed: %s' % e.message)
return None
@RetryDecorator(max_retry_count=3,inc_sleep_time=1,max_sleep_time=60,
exceptions=(Exception))
def _get_provider_image_by_id(self, image_uuid):
provider_images = self.compute_adapter.list_images(
ex_filters={'tag:hybrid_cloud_image_id':image_uuid})
if provider_images is None:
error_info = 'Can NOT get image %s from provider cloud tag' % image_uuid
LOG.error(error_info)
raise Exception(error_info)
if len(provider_images) == 0:
error_info = 'Image %s NOT exist at provider cloud' % image_uuid
LOG.debug(error_info)
raise Exception(error_info)
elif len(provider_images) > 1:
error_info = 'More than one image are found through tag:hybrid_cloud_instance_id %s' % image_uuid
LOG.error(error_info)
raise Exception(error_info)
elif len(provider_images) == 1:
provider_image = provider_images[0]
else:
raise Exception('Unknow issue, the length of images is less then 0')
return provider_image
def _check_image_exist(self, image_id):
is_exist = False
try:
image = self._get_provider_image_by_id(image_id)
is_exist = True
except Exception, e:
is_exist = False
return is_exist
def _update_vm_task_state(self, instance, task_state):
instance.task_state = task_state
instance.save()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def _import_volume_from_glance(self, context, volume_id,instance, volume_loc):
LOG.debug('start to import volume from glance')
volume = self.cinder_api.get(context,volume_id)
image_meta = volume.get('volume_image_metadata')
if not image_meta:
LOG.error('Provider Volume NOT Found!')
exception_ex.VolumeNotFoundAtProvider
else:
# 1.1 download qcow2 file from glance
image_uuid = self._get_image_id_from_meta(image_meta)
orig_file_name = 'orig_file.qcow2'
this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
orig_file_full_name = '%s/%s' % (this_conversion_dir,orig_file_name)
fileutils.ensure_tree(this_conversion_dir)
self.glance_api.download(context, image_uuid,dest_path=orig_file_full_name)
# 1.2 convert to provider image format
converted_file_format = 'vmdk'
converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
converted_file_path = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
converted_file_full_name = '%s/%s' % (converted_file_path,converted_file_name)
convert_image(orig_file_full_name,
converted_file_full_name,
converted_file_format,
subformat='streamoptimized')
# 1.3 upload volume file to provider storage (S3,eg)
container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
# self.storage_adapter.upload_object(converted_file_full_name,container,volume_id)
object_name = volume_id
extra = {'content_type': 'text/plain'}
with open(converted_file_full_name,'rb') as f:
obj = self.storage_adapter.upload_object_via_stream(container=container,
object_name=object_name,
iterator=f,
extra=extra)
# 1.4 import volume
obj = self.storage_adapter.get_object(container.name,volume_id)
task = self.compute_adapter.create_import_volume_task(CONF.provider_opts.storage_tmp_dir,
volume_id,
'VMDK',
obj.size,
str(volume.get('size')),
volume_loc=volume_loc)
try:
task_list =instance_task_map[instance.uuid]
if not task_list:
task_list.append(task)
instance_task_map[instance.uuid]=task_list
except KeyError:
task_list=[task]
instance_task_map[instance.uuid]=task_list
while not task.is_completed():
time.sleep(10)
if task.is_cancelled():
LOG.error('import volume fail!')
raise exception_ex.UploadVolumeFailure
task = self.compute_adapter.get_task_info(task)
task.clean_up()
LOG.debug('finish to import volume, id: %s' % task.volume_id)
return task.volume_id
def _add_route_to_iscsi_subnet(self, ssh_client,
iscsi_subnet,
iscsi_subnet_route_gateway,
iscsi_subnet_route_mask):
while True:
try:
# list routes
cmd1 = "ip route show"
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug("cmd1 info status=%s ,out=%s, err=%s " %
(cmd1_status, cmd1_out, cmd1_err))
if cmd1_status != 0:
raise Exception("fail to show routes")
routes = [{'dest': p.split(" via ")[0],
'gateway': p.split(" via ")[1].split(" ")[0]}
for p in cmd1_out.splitlines() if
p.startswith(iscsi_subnet + "/" + iscsi_subnet_route_mask)]
# assume same dest only allows one route, lazy to test len(routes) > 1
if len(routes) > 0:
if routes[0]['gateway'] == iscsi_subnet_route_gateway:
LOG.debug("already got the route:%s" % routes)
return
else:
cmd_del_route = "sudo ip route delete %s" % routes[0]['dest']
cmd_del_status, cmd_del_out, cmd_del_err = \
ssh_client.execute(cmd_del_route)
LOG.debug("cmd delete route info status=%s ,out=%s, err=%s " %
(cmd_del_status, cmd_del_out, cmd_del_err))
if cmd_del_status != 0:
raise Exception("fail to delete existed route")
# route got deleted or no route, add one to route table
cmd_add_route = "sudo ip route add %s via %s" % \
(iscsi_subnet + "/" + iscsi_subnet_route_mask,
iscsi_subnet_route_gateway)
cmd_add_status, cmd_add_out, cmd_add_err = \
ssh_client.execute(cmd_add_route)
LOG.debug("cmd add route info status=%s ,out=%s, err=%s " %
(cmd_add_status, cmd_add_out, cmd_add_err))
if cmd_add_status != 0:
raise Exception("fail to add route")
# write route into rc.local
cmd_write_local = "sudo sed -i '/PATH=/a ip route add %s via %s' /etc/init.d/rc.local" \
% (iscsi_subnet + "/" + iscsi_subnet_route_mask,
iscsi_subnet_route_gateway)
cmd_write_status, cmd_write_out, cmd_write_err = \
ssh_client.execute(cmd_write_local)
LOG.debug("cmd write route info status=%s ,out=%s, err=%s " %
(cmd_write_status, cmd_write_out, cmd_write_err))
if cmd_write_status != 0:
raise Exception("fail to write route into rc.local")
LOG.info("added route succeeds!")
break
except sshclient.SSHError:
LOG.debug("wait for vm to initialize network")
time.sleep(5)
def _attach_volume_iscsi(self, provider_node, connection_info):
user = CONF.provider_opts.image_user
pwd = CONF.provider_opts.image_password
if provider_node.private_ips:
host = provider_node.private_ips[0]
else:
LOG.error("provider_node.private_ips None ,attach volume failed")
raise Exception(_("provider_node.private_ips None ,attach volume failed"))
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
# add route if config exists
if CONF.provider_opts.agent_network == 'True' and \
CONF.provider_opts.iscsi_subnet and \
CONF.provider_opts.iscsi_subnet_route_gateway and \
CONF.provider_opts.iscsi_subnet_route_mask:
LOG.debug("add route to vm:%s, %s, %s" % (CONF.provider_opts.iscsi_subnet,
CONF.provider_opts.iscsi_subnet_route_gateway,
CONF.provider_opts.iscsi_subnet_route_mask))
self._add_route_to_iscsi_subnet(ssh_client,
CONF.provider_opts.iscsi_subnet,
CONF.provider_opts.iscsi_subnet_route_gateway,
CONF.provider_opts.iscsi_subnet_route_mask)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "sudo iscsiadm -m node -T %s -p %s" % (target_iqn, target_portal)
while True:
try:
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug("sudo cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
if cmd1_status in [21, 255]:
cmd2 = "sudo iscsiadm -m node -T %s -p %s --op new" % (target_iqn, target_portal)
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug("sudo cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
break
except sshclient.SSHError:
LOG.debug("wait for vm to initialize network")
time.sleep(5)
cmd3 = "sudo iscsiadm -m session"
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in cmd3_out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_info['data']['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_info['data']['target_iqn']]
) == 0:
cmd4 = "sudo iscsiadm -m node -T %s -p %s --login" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v automatic" % \
(target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
ssh_client.close()
def _get_provider_volume_by_provider_volume_id(self, provider_volume_id):
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])
if not provider_volumes:
LOG.error('get volume %s error at provider cloud' % provider_volume_id)
return
if len(provider_volumes)>1:
LOG.error('volume %s are more than one' % provider_volume_id)
raise exception_ex.MultiVolumeConfusion
provider_volume = provider_volumes[0]
if provider_volume.state != StorageVolumeState.AVAILABLE:
LOG.error('volume %s is not available' % provider_volume_id)
raise exception.InvalidVolume
return provider_volume
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
volume_id = connection_info['data']['volume_id']
instance_id = instance.uuid
driver_type = connection_info['driver_volume_type']
LOG.info("attach volume")
provider_node = self._get_provider_node(instance)
if not provider_node:
LOG.error('get instance %s error at provider cloud' % instance_id)
return
if driver_type == 'iscsi':
self._attach_volume_iscsi(provider_node, connection_info)
return
# 2.get volume exist or import volume
provider_volume_id = self._get_provider_volume_id(context, volume_id)
if not provider_volume_id:
provider_volume_id = self._import_volume_from_glance(context, volume_id, instance,
provider_node.extra.get('availability'))
provider_volume = self._get_provider_volume_by_provider_volume_id(provider_volume_id)
LOG.debug('get provider_volume: %s' % provider_volume)
# map imported provider_volume id with hybrid cloud volume id by tagging hybrid_cloud_volume_id
LOG.debug('start to map volume')
self._map_volume_to_provider(context, volume_id, provider_volume)
LOG.debug('end to map volume')
else:
provider_volume = self._get_provider_volume_by_provider_volume_id(provider_volume_id)
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
# if is hybrid_vm, need to attache volume for docker app(container).
if image_container_type == CONTAINER_FORMAT_HYBRID_VM and provider_node.state == NodeState.RUNNING:
self._attache_volume_for_docker_app(context,
instance,
volume_id,
mountpoint,
provider_node,
provider_volume)
else:
self.compute_adapter.attach_volume(provider_node, provider_volume,
self._trans_device_name(mountpoint))
def _get_volume_devices_list_for_docker_app(self, instance, clients):
"""
:param instance:
:param clients:
:return: type list, e.g. [u'/dev/xvdb', u'/dev/xvdz']
"""
LOG.debug('Start to get volume list for docker app')
volume_device_list = []
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
# if is hybrid_vm, need to attache volume for docker app(container).
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
self._clients_wait_hybrid_service_up(clients)
volume_devices = self._clients_list_volume_devices_for_docker_app(clients)
volume_device_list = volume_devices.get('devices')
LOG.debug('End to get volume list for docker app, volumes list: %s ' % volume_device_list)
return volume_device_list
def _attache_volume_for_docker_app(self, context, instance, volume_id, mountpoint, provider_node, provider_volume):
LOG.debug('start attach volume for docker app')
clients = self._get_hybrid_service_clients_by_node(provider_node)
old_volumes_list = self._get_volume_devices_list_for_docker_app(instance, clients)
LOG.debug('old_volumes_list: %s' % old_volumes_list)
self._attache_volume_and_wait_for_attached(provider_node, provider_volume, self._trans_device_name(mountpoint))
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
raise e
LOG.debug('start to get added device')
added_device = self._get_added_device(instance, clients, old_volumes_list)
if is_docker_service_up:
try:
LOG.debug('start attach to docker app')
self._clients_attach_volume_for_docker_app(clients, volume_id, added_device, mountpoint)
except Exception, e:
error_info = 'Start container failed, exception: %s' % traceback.format_exc(e)
LOG.error(error_info)
raise exception.NovaException(error_info)
def _attache_volume_and_get_new_bdm(self, context, instance, block_device_info, provider_node):
bdm_list = block_device_info.get('block_device_mapping')
for bdm in bdm_list:
hybrid_cloud_volume_id = bdm.get('connection_info').get('data').get('volume_id')
provider_volume_id = self._get_provider_volume_id(context, hybrid_cloud_volume_id)
# if volume doesn't exist in aws, it need to import volume from image
if not provider_volume_id:
LOG.debug('provider volume is not exist for volume: %s' % hybrid_cloud_volume_id)
provider_volume_id = self._import_volume_from_glance(context,
hybrid_cloud_volume_id,
instance,
CONF.provider_opts.availability_zone)
created_provider_volume = self._get_provider_volume_by_provider_volume_id(provider_volume_id)
self._map_volume_to_provider(context, hybrid_cloud_volume_id, created_provider_volume)
provider_volume = self._get_provider_volume(hybrid_cloud_volume_id)
else:
provider_volume = self._get_provider_volume(hybrid_cloud_volume_id)
mount_device = bdm.get('mount_device')
clients = self._get_hybrid_service_clients_by_node(provider_node)
old_volumes_list = self._get_volume_devices_list_for_docker_app(instance, clients)
LOG.debug('old_volumes_list: %s' % old_volumes_list)
self._attache_volume_and_wait_for_attached(provider_node, provider_volume, self._trans_device_name(mount_device))
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
raise e
added_device = self._get_added_device(instance, clients, old_volumes_list)
bdm['real_device'] = added_device
hybrid_volume = self._get_volume_from_bdm(context, bdm)
bdm['size'] = hybrid_volume.get('size')
return block_device_info
@RetryDecorator(max_retry_count=60, inc_sleep_time=2, max_sleep_time=60, exceptions=(Exception))
def _get_added_device(self, instance, clients, old_volumes_list):
LOG.debug('start to get added device')
added_device = None
new_volumes_list = self._get_volume_devices_list_for_docker_app(instance, clients)
LOG.debug('new_volumes_list: %s' % new_volumes_list)
added_device_list = [device for device in new_volumes_list if device not in old_volumes_list]
if not added_device_list:
e_info = 'added device in docker is empty, can not do container attach operation'
LOG.error(e_info)
raise Exception(e_info)
else:
added_device = added_device_list[0]
LOG.debug('end to get added device: %s' % added_device)
return added_device
def _detach_volume_for_docker_app(self, clients, volume_id):
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
raise e
if is_docker_service_up:
try:
self._clients_detach_volume_for_docker_app(clients, volume_id)
except Exception, e:
error_info = 'detach volume for docker app failed, exception: %s' % traceback.format_exc(e)
LOG.error(error_info)
raise exception.NovaException(error_info)
def _get_provider_volume_id(self, context, volume_id):
provider_volume_id = self.cinder_api.get_volume_metadata_value(context,volume_id,'provider_volume_id')
if not provider_volume_id:
try:
provider_volumes = self.compute_adapter.list_volumes(ex_filters={'tag:hybrid_cloud_volume_id':volume_id})
if len(provider_volumes) == 1:
provider_volume_id = provider_volumes[0].id
self.cinder_api.update_volume_metadata(context, volume_id, {'provider_volume_id':provider_volume_id})
elif len(provider_volumes)>1:
LOG.warning('More than one instance are found through tag:hybrid_cloud_volume_id %s' % volume_id)
else:
LOG.warning('Volume %s NOT Found at provider cloud' % volume_id)
# raise exception.ImageNotFound
except Exception as e:
LOG.error('Can NOT get volume %s from provider cloud tag' % volume_id)
LOG.error(e.message)
return provider_volume_id
def _get_provider_volume(self, volume_id):
provider_volume = None
try:
#if not provider_volume_id:
provider_volumes = self.compute_adapter.list_volumes(ex_filters={'tag:hybrid_cloud_volume_id':volume_id})
if provider_volumes is None:
LOG.warning('Can not get volume through tag:hybrid_cloud_volume_id %s' % volume_id)
return provider_volumes
if len(provider_volumes) == 1:
provider_volume = provider_volumes[0]
elif len(provider_volumes) >1:
LOG.warning('More than one volumes are found through tag:hybrid_cloud_volume_id %s' % volume_id)
else:
LOG.warning('Volume %s NOT Found at provider cloud' % volume_id)
except Exception as e:
LOG.error('Can NOT get volume %s from provider cloud tag' % volume_id)
LOG.error(e.message)
return provider_volume
def _detach_volume_iscsi(self, provider_node, connection_info):
user = CONF.provider_opts.image_user
pwd = CONF.provider_opts.image_password
if provider_node.private_ips:
host = provider_node.private_ips[0]
else:
LOG.debug("provider_node.private_ips None ,attach volume failed")
raise
ssh_client = sshclient.SSH(user, host, password=<PASSWORD>)
target_iqn = connection_info['data']['target_iqn']
target_portal = connection_info['data']['target_portal']
cmd1 = "ls -l /dev/disk/by-path/ | grep %s | awk -F '/' '{print $NF}'" % target_iqn
cmd1_status, cmd1_out, cmd1_err = ssh_client.execute(cmd1)
LOG.debug(" cmd1 info status=%s ,out=%s, err=%s " % (cmd1_status, cmd1_out, cmd1_err))
device = "/dev/" + cmd1_out.split('\n')[0]
path = "/sys/block/" + cmd1_out.split('\n')[0] + "/device/delete"
cmd2 = "sudo blockdev --flushbufs %s" % device
cmd2_status, cmd2_out, cmd2_err = ssh_client.execute(cmd2)
LOG.debug(" cmd2 info status=%s ,out=%s, err=%s " % (cmd2_status, cmd2_out, cmd2_err))
cmd3 = "echo 1 | sudo tee -a %s" % path
cmd3_status, cmd3_out, cmd3_err = ssh_client.execute(cmd3)
LOG.debug("sudo cmd3 info status=%s ,out=%s, err=%s " % (cmd3_status, cmd3_out, cmd3_err))
cmd4 = "sudo iscsiadm -m node -T %s -p %s --op update -n node.startup -v manual" % (target_iqn, target_portal)
cmd4_status, cmd4_out, cmd4_err = ssh_client.execute(cmd4)
LOG.debug("sudo cmd4 info status=%s ,out=%s, err=%s " % (cmd4_status, cmd4_out, cmd4_err))
cmd5 = "sudo iscsiadm -m node -T %s -p %s --logout" % (target_iqn, target_portal)
cmd5_status, cmd5_out, cmd5_err = ssh_client.execute(cmd5)
LOG.debug("sudo cmd5 info status=%s ,out=%s, err=%s " % (cmd5_status, cmd5_out, cmd5_err))
cmd6 = "sudo iscsiadm -m node -T %s -p %s --op delete" % (target_iqn, target_portal)
cmd6_status, cmd6_out, cmd6_err = ssh_client.execute(cmd6)
LOG.debug("sudo cmd6 info status=%s ,out=%s, err=%s " % (cmd6_status, cmd6_out, cmd6_err))
def detach_interface(self, instance, vif):
LOG.debug("detach interface: %s, %s" % (instance, vif))
node = self._get_provider_node(instance)
if instance.system_metadata.get('image_container_format') == CONTAINER_FORMAT_HYBRID_VM \
and self._node_is_active(node):
clients = self._get_hybrid_service_clients_by_node(node)
self._clients_detach_interface(clients, vif)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
LOG.info("detach volume")
volume_id = connection_info['data']['volume_id']
instance_id = instance.uuid
driver_type = connection_info['driver_volume_type']
provider_node=self._get_provider_node(instance)
if not provider_node:
LOG.error('get instance %s error at provider cloud' % instance_id)
return
if driver_type == 'iscsi':
self._detach_volume_iscsi(provider_node, connection_info)
return
provider_volume=self._get_provider_volume(volume_id)
if not provider_volume:
LOG.error('get volume %s error at provider cloud' % volume_id)
return
if provider_volume.state != StorageVolumeState.ATTACHING:
LOG.error('volume %s is not attaching' % volume_id)
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
clients = self._get_hybrid_service_clients_by_node(provider_node)
self._detach_volume_for_docker_app(clients, volume_id)
# 2.dettach
self.compute_adapter.detach_volume(provider_volume)
time.sleep(3)
retry_time = 60
provider_volume=self._get_provider_volume(volume_id)
while retry_time > 0:
if provider_volume and \
provider_volume.state == StorageVolumeState.AVAILABLE and \
provider_volume.extra.get('attachment_status') is None:
break
else:
time.sleep(2)
provider_volume=self._get_provider_volume(volume_id)
retry_time = retry_time-1
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
# xxx(wangfeng):
return {'vcpus': 32,
'memory_mb': 164403,
'local_gb': 5585,
'vcpus_used': 0,
'memory_mb_used': 69005,
'local_gb_used': 3479,
'hypervisor_type': 'aws',
'hypervisor_version': 5005000,
'hypervisor_hostname': nodename,
'cpu_info': '{"model": ["Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz"], \
"vendor": ["Huawei Technologies Co., Ltd."], \
"topology": {"cores": 16, "threads": 32}}',
'supported_instances': jsonutils.dumps(
[["i686", "ec2", "hvm"], ["x86_64", "ec2", "hvm"]]),
'numa_topology': None,
}
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
# return "aws-ec2-hypervisor"
return "hybrid_%s" % CONF.provider_opts.region
def attach_interface(self, instance, image_meta, vif):
LOG.debug("attach interface: %s, %s" % (instance, vif))
self._binding_host_vif(vif, instance.uuid)
node = self._get_provider_node(instance)
if instance.system_metadata.get('image_container_format') == CONTAINER_FORMAT_HYBRID_VM \
and self._node_is_active(node):
clients = self._get_hybrid_service_clients_by_node(node)
self._clients_attach_interface(clients, vif)
self._binding_host_vif(vif, instance.uuid)
def get_pci_slots_from_xml(self, instance):
"""
:param instance:
:return:
"""
return []
def _node_is_active(self, node):
is_active = False
nova_state = node.state
if nova_state == NodeState.RUNNING or nova_state == NodeState.STOPPED:
is_active = True
else:
is_active = False
return is_active
def get_info(self, instance):
LOG.debug('begin get the instance %s info ' % instance.uuid)
state = power_state.NOSTATE
# xxx(wangfeng): it is too slow to connect to aws to get info. so I delete it
node = self._get_provider_node(instance)
if node:
LOG.debug('end get the instance %s info ,provider node is %s ' % (instance.uuid,node.id))
node_status = node.state
try:
state = AWS_POWER_STATE[node_status]
except KeyError:
state = power_state.NOSTATE
return {'state': state,
'max_mem': 0,
'mem': 0,
'num_cpu': 1,
'cpu_time': 0}
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
LOG.debug('begin destroy node %s',instance.uuid)
LOG.debug('destroy_disks: %s' % destroy_disks)
try:
task_list = instance_task_map[instance.uuid]
if task_list:
for task in task_list:
LOG.debug('the task of instance %s is %s' %(instance.uuid, task.task_id))
task = self.compute_adapter.get_task_info(task)
if not task.is_completed():
task._cancel_task()
instance_task_map.pop(instance.uuid)
except KeyError:
LOG.debug('the instance %s does not have task', instance.uuid)
node = self._get_provider_node(instance)
if node is None:
LOG.error('get instance %s error at provider cloud' % instance.uuid)
reason = "Error getting instance."
raise exception.InstanceTerminationFailure(reason=reason)
if not node:
LOG.error('instance %s not exist at provider cloud' % instance.uuid)
return
# 0.1 get network interfaces
provider_eth_list = node.extra.get('network_interfaces',None)
# 0.2 get volume
provider_vol_list = self.compute_adapter.list_volumes(node=node)
provider_volume_ids = []
local_volume_ids = []
all_volume_ids = []
if len(block_device_info) > 0:
# get volume id
bdms = block_device_info.get('block_device_mapping',[])
for device in bdms:
volume_id = device['connection_info']['data']['volume_id']
all_volume_ids.append(volume_id)
if device['connection_info']['driver_volume_type'] == 'iscsi':
local_volume_ids.append(volume_id)
else:
provider_volume_ids.append(self._get_provider_volume_id(context, volume_id))
# # 1. dettach volumes, if needed
# if not destroy_disks:
# # get volume in provide cloud
# provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=provider_volume_ids)
#
# # detach
# for provider_volume in provider_volumes:
# self.compute_adapter.detach_volume(provider_volume)
# for local_volume in local_volume_ids:
# volume = self.cinder_api.get(context, local_volume)
# attachment = self.cinder_api.get_volume_attachment(volume, instance['uuid'])
# if attachment:
# self.cinder_api.detach(context, local_volume, attachment['attachment_id'])
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
# if is hybrid_vm, need to stop docker app(container) first, then stop node.
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
root_volume = self._get_root_volume_by_index_0(context, block_device_info)
# if not exist root volume, means it is boot from image, need to remote data volume of container.
# if exist root volume, the data volume is a root volume. How to delete it will decided by manager.
if not root_volume:
LOG.debug('image type of instance is hybridvm, need to remove data volume of container.')
provider_volume_name_for_hybrid_vm_container = node.id
hybrid_container_volume = self._get_provider_container_data_volume(provider_vol_list,
provider_volume_name_for_hybrid_vm_container)
if node.state != NodeState.STOPPED and node.state != NodeState.TERMINATED:
self._stop_node(node)
if hybrid_container_volume:
self._detach_volume(hybrid_container_volume)
self._delete_volume(hybrid_container_volume)
else:
LOG.warning('There is no container data volume, pass to'
' detach volume and delete volume for node: %s' % node.id)
# no matter it is boot from volume or image, both need to remove neutron agent.
self._remove_neutron_agent(instance)
# 2.destroy node
if node.state != NodeState.TERMINATED:
self.compute_adapter.destroy_node(node)
while node.state != NodeState.TERMINATED:
time.sleep(5)
nodes = self.compute_adapter.list_nodes(ex_node_ids=[node.id])
if not nodes:
break
else:
node = nodes[0]
# 3. clean up
# 3.1 delete network interface anyway
for eth in provider_eth_list:
try:
self.compute_adapter.ex_delete_network_interface(eth)
except:
LOG.warning('Failed to delete network interface %s', eth.id)
# 3.2 delete volumes, if needed
# if destroy_disks:
# for vol in provider_vol_list:
# try:
# self.compute_adapter.destroy_volume(vol)
# except:
# LOG.warning('Failed to delete provider vol %s', vol.id)
# todo: unset volume mapping
bdms = block_device_info.get('block_device_mapping',[])
volume_ids = self._get_volume_ids_from_bdms(bdms)
for volume_id in volume_ids:
try:
self._map_volume_to_provider(context, volume_id, None)
except Exception as e:
LOG.info("got exception:%s" % str(e))
def _stop_node(self, node):
LOG.debug('start to stop node: %s' % node.name)
self.compute_adapter.ex_stop_node(node)
self._wait_for_node_in_specified_state(node, NodeState.STOPPED)
LOG.debug('end to stop node: %s' % node.name)
def _wait_for_node_in_specified_state(self, node, state):
LOG.debug('wait for node is in state: %s' % state)
state_of_current_node = self._get_node_state(node)
time.sleep(2)
while state_of_current_node != state:
state_of_current_node = self._get_node_state(node)
time.sleep(2)
def _get_node_state(self, node):
nodes = self.compute_adapter.list_nodes(ex_node_ids=[node.id])
if nodes and len(nodes) == 1:
current_node = nodes[0]
state_of_current_node = current_node.state
else:
raise Exception('Node is not exist, node id: %s' % node.id)
LOG.debug('state of current is: %s' % state_of_current_node)
return state_of_current_node
def _detach_volume(self, volume):
LOG.debug('start to detach volume')
self.compute_adapter.detach_volume(volume)
LOG.debug('end to detach volume')
self._wait_for_volume_in_specified_state(volume, StorageVolumeState.AVAILABLE)
def _wait_for_volume_in_specified_state(self, volume, state):
LOG.debug('wait for volume in state: %s' % state)
state_of_volume = self._get_volume_state(volume)
time.sleep(2)
while state_of_volume != state:
state_of_volume = self._get_volume_state(volume)
time.sleep(2)
def _get_volume_state(self, volume):
volume_id = volume.id
provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[volume_id])
if provider_volumes and len(provider_volumes) == 1:
current_volume = provider_volumes[0]
state_of_volume = current_volume.state
else:
raise Exception('There is not provider volume for id: %s' % volume_id)
LOG.debug('current volume state is: %s' % state_of_volume)
return state_of_volume
def _get_provider_container_data_volume(self, provider_volume_list, provider_volume_name_for_hybrid_vm_container):
"""
:param provider_volume_list: volume list of attchement of provider node
:param provider_volume_name_for_hybrid_vm_container: the name of data volume used by docker.
Currently the name is the same as provider vm id.
:return:
"""
hybrid_container_volume = None
#TODO:delete
for volume in provider_volume_list:
if volume.name == provider_volume_name_for_hybrid_vm_container:
hybrid_container_volume = volume
break
else:
continue
return hybrid_container_volume
def _remove_neutron_agent(self, instance):
LOG.debug('start to remove neutron agent for instance: %s' % instance.uuid)
instance_id = instance.uuid
neutron_client = neutronv2.get_client(context=None, admin=True)
agent = neutron_client.list_agents(host=instance_id)
if len(agent['agents']) == 1:
neutron_client.delete_agent(agent['agents'][0]['id'])
else:
LOG.warning('can not find neutron agent for instance: %s, did not delete agent for it' % instance.uuid)
LOG.debug('end to remove neutron agent for instance: %s' % instance.uuid)
def _get_provider_node_id(self, instance_obj):
"""map openstack instance_uuid to ec2 instance id"""
# if instance has metadata:provider_node_id, it's provider node id
provider_node_id = instance_obj.metadata.get('provider_node_id')
# if instance has NOT metadata:provider_node_id, search provider cloud instance's tag
if not provider_node_id:
try:
provider_node = self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id':instance_obj.uuid})
if len(provider_node) == 1:
provider_node_id = provider_node[0].id
instance_obj.metadata.set('provider_node_id', provider_node_id)
instance_obj.save()
elif len(provider_node)>1:
LOG.warning('More than one instance are found through tag:hybrid_cloud_instance_id %s' % instance_obj.uuid)
else:
# raise exception.ImageNotFound
LOG.warning('Instance %s NOT Found at provider cloud' % instance_obj.uuid)
except Exception as e:
LOG.error('Can NOT get instance %s from provider cloud tag' % instance_obj.uuid)
LOG.error(e.message)
return provider_node_id
def _get_provider_node(self, instance_obj):
"""map openstack instance to ec2 instance """
provider_node_id = instance_obj.metadata.get('provider_node_id')
provider_node = None
if not provider_node_id:
try:
provider_nodes = self.compute_adapter.list_nodes(ex_filters={'tag:hybrid_cloud_instance_id':instance_obj.uuid})
if provider_nodes is None:
LOG.error('Can NOT get node through tag:hybrid_cloud_instance_id %s' % instance_obj.uuid)
return provider_nodes
if len(provider_nodes) == 1:
provider_node_id = provider_nodes[0].id
instance_obj.metadata['provider_node_id']= provider_node_id
instance_obj.save()
provider_node = provider_nodes[0]
elif len(provider_nodes) >1:
LOG.debug('More than one instance are found through tag:hybrid_cloud_instance_id %s' % instance_obj.uuid)
else:
LOG.debug('Instance %s NOT exist at provider cloud' % instance_obj.uuid)
return []
except Exception as e:
LOG.error('Can NOT get instance through tag:hybrid_cloud_instance_id %s' % instance_obj.uuid)
LOG.error(e.message)
else:
try:
nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if nodes is None:
LOG.error('Can NOT get instance %s from provider cloud tag' % provider_node_id)
return nodes
if len(nodes) == 0:
LOG.debug('Instance %s NOT exist at provider cloud' % instance_obj.uuid)
return []
else:
provider_node=nodes[0]
except Exception as e:
LOG.error('Can NOT get instance %s from provider cloud tag' % provider_node_id)
LOG.error(e.message)
return provider_node
def get_volume_connector(self, instance):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
LOG.debug('Power off node %s',instance.uuid)
node = self._get_provider_node(instance)
if node:
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
# if is hybrid_vm, need to stop docker app(container) first, then stop node.
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
self._stop_container_in_loop(node)
self.compute_adapter.ex_stop_node(node)
else:
raise exception.InstanceNotFound(instance_id=instance.uuid)
def _stop_container_in_loop(self, node):
is_stop = False
clients = self._get_hybrid_service_clients_by_node(node)
try:
is_stop = self._clients_stop_container(clients)
except Exception as e:
LOG.error("power off container failed, exception:%s" % traceback.format_exc(e))
return is_stop
def power_on(self, context, instance, network_info,
block_device_info=None):
LOG.debug('Power on node %s',instance.uuid)
# start server of aws
node = self._get_provider_node(instance)
if node:
self.compute_adapter.ex_start_node(node)
else:
raise exception.InstanceNotFound(instance_id=instance.uuid)
LOG.debug('is_hybrid_vm: %s' % instance.metadata.get('is_hybrid_vm', False))
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
LOG.debug('Start to start container.')
self._start_container_in_loop_clients(node, network_info, block_device_info)
LOG.debug('End to start container.')
def _start_container_in_loop_clients(self, node, network_info, block_device_info):
clients = self._get_hybrid_service_clients_by_node(node)
is_docker_service_up = False
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
if is_docker_service_up:
try:
self._hype_start_container(clients=clients,
network_info=network_info,
block_device_info=block_device_info)
except Exception, e:
error_info = 'Start container failed, exception: %s' % traceback.format_exc(e)
LOG.error(error_info)
raise exception.NovaException(error_info)
def get_instance_macs(self, instance):
LOG.debug('Start to get macs of instance %s', instance)
filters = {'tag:hybrid_cloud_instance_id': instance['uuid']}
nodes = self.compute_adapter.list_nodes(ex_filters=filters)
instance_macs = dict()
if nodes is not None and len(nodes) == 1:
node = nodes[0]
nw_interfaces = node.extra['network_interfaces']
for nw_interface in nw_interfaces:
subnet_id = nw_interface.extra['subnet_id']
vpc_id = nw_interface.extra['vpc_id']
mac_address = nw_interface.extra['mac_address']
# NOTE(nkapotoxin): Now we make the subnet_id is the provider
# network id
instance_macs[subnet_id] = mac_address
return instance_macs
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
"""
# 1.get node
instance_id = instance.uuid
provider_node_id = self._get_provider_node_id(instance)
if not provider_node_id:
LOG.error('instance %s is not found' % instance_id)
raise exception.InstanceNotFound
else:
provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
if not provider_nodes:
LOG.error('instance %s is not found' % instance_id)
raise exception.InstanceNotFound
if len(provider_nodes)>1:
LOG.error('instance %s are more than one' % instance_id)
raise exception_ex.MultiInstanceConfusion
provider_node = provider_nodes[0]
image_container_type = instance.system_metadata.get('image_container_format')
LOG.debug('image_container_type: %s' % image_container_type)
if image_container_type == CONTAINER_FORMAT_HYBRID_VM:
clients = self._get_hybrid_service_clients_by_node(provider_node)
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
raise e
if is_docker_service_up:
try:
self._clients_reboot_app(clients,
network_info=network_info,
block_device_info=block_device_info)
except Exception, e:
error_info = 'Start container failed, exception: %s' % traceback.format_exc(e)
LOG.error(error_info)
raise exception.NovaException(error_info)
else:
try:
self.compute_adapter.reboot_node(provider_node)
except Exception as e:
raise e
@RetryDecorator(max_retry_count= 50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _wait_hybrid_service_up(self, client):
return client.get_version()
@RetryDecorator(max_retry_count=20,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hypervm_inject_file(self, client, file_data):
LOG.info('start to inject file.')
inject_reslut = client.inject_file(CONF.provider_opts.dst_path, file_data=file_data)
LOG.info('end to inject file....')
return inject_reslut
@RetryDecorator(max_retry_count= 100,inc_sleep_time=5,max_sleep_time=120,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _start_container(self, client, network_info, block_device_info):
return client.start_container(network_info=network_info, block_device_info=block_device_info)
@RetryDecorator(max_retry_count= MAX_RETRY_COUNT,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_create_container(self, clients, name):
LOG.info('start to create container')
created_container = None
tmp_except = Exception('client is None')
for client in clients:
try:
created_container = client.create_container(name)
break
except Exception, e:
tmp_except = e
LOG.error('exception when create container, exception: %s' % traceback.format_exc(e))
time.sleep(1)
continue
if not created_container:
raise tmp_except
LOG.info('end to create container, created_container: %s' % created_container)
return created_container
@RetryDecorator(max_retry_count=MAX_RETRY_COUNT, inc_sleep_time=5, max_sleep_time=60, exceptions=(
errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError, Exception))
def _hyper_create_container_task(self, clients, image_name, image_uuid, injected_files, admin_password,
network_info, block_device_info):
LOG.info('start to submit task for creating container.')
LOG.debug('admin_password: %s' % admin_password)
LOG.debug('injected_files: %s' % injected_files)
created_task = None
tmp_exception = Exception('empty for creating container')
for client in clients:
try:
created_task = client.create_container(image_name, image_uuid, inject_files=injected_files, admin_password=<PASSWORD>,
network_info=network_info, block_device_info=block_device_info)
except Exception, e:
tmp_exception = e
LOG.error('exception when create container, exception: %s' % traceback.format_exc(e))
continue
if not created_task:
raise tmp_exception
LOG.info('end to submit task for creating container, task: %s' % created_task)
return created_task
@RetryDecorator(max_retry_count=50, inc_sleep_time=5, max_sleep_time=60,
exceptions=(exception_ex.RetryException))
def _wait_for_task_finish(self, clients, task):
task_finish = False
if task['code'] == wormhole_constants.TASK_SUCCESS:
return True
current_task = self._hyper_query_task(clients, task)
task_code = current_task['code']
if wormhole_constants.TASK_DOING == task_code:
LOG.debug('task is DOING, status: %s' % task_code)
raise exception_ex.RetryException(error_info='task status is: %s' % task_code)
elif wormhole_constants.TASK_ERROR == task_code:
LOG.debug('task is ERROR, status: %s' % task_code)
raise Exception('task error, task status is: %s' % task_code)
elif wormhole_constants.TASK_SUCCESS == task_code:
LOG.debug('task is SUCCESS, status: %s' % task_code)
task_finish = True
else:
raise Exception('UNKNOW ERROR, task status: %s' % task_code)
LOG.debug('task: %s is finished' % task )
return task_finish
@RetryDecorator(max_retry_count=MAX_RETRY_COUNT, inc_sleep_time=5, max_sleep_time=60, exceptions=(
errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError, Exception))
def _hyper_query_task(self, clients, task):
LOG.debug('star to query task.')
current_task = None
tmp_exception = 'empty for query task'
for client in clients:
try:
current_task = client.query_task(task)
break
except Exception, e:
tmp_exception = e
LOG.error('exception when query task. exception: %s' % traceback.format_exc(e))
continue
if not current_task:
raise tmp_exception
return current_task
@RetryDecorator(max_retry_count= MAX_RETRY_COUNT,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_start_container(self, clients, network_info, block_device_info):
LOG.info('Start to start container')
started_container = None
tmp_except = None
for client in clients:
try:
started_container = client.start_container(network_info=network_info, block_device_info=block_device_info)
break
except Exception, e:
tmp_except = e
continue
if not started_container:
raise tmp_except
LOG.info('end to start container, started_container: %s' % started_container)
return started_container
@RetryDecorator(max_retry_count=20, inc_sleep_time=5, max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_inject_file_to_container(self, clients, inject_file):
"""
:param clients:
:param inject_file: (path, file_contents)
:return:
"""
LOG.debug('start to inject file to container, inject_file: %s' % inject_file)
inject_result = None
tmp_except = None
for client in clients:
try:
inject_result = client.inject_files(inject_file)
break
except Exception, e:
tmp_except = e
continue
if not inject_result:
raise tmp_except
LOG.info('end to inject file to container, inject_file: %s' % inject_file)
return inject_result
@RetryDecorator(max_retry_count= 20,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound,
errors.ConnectionError, errors.InternalError, Exception))
def _hype_inject_file(self, clients, file_data):
inject_result = None
tmp_except = None
for client in clients:
try:
inject_result = client.inject_file(CONF.provider_opts.dst_path, file_data=file_data)
break
except Exception, e:
tmp_except = e
continue
if not inject_result:
raise tmp_except
return inject_result
def _get_node_private_ips(self, provider_node):
"""
:param provider_node: type Node,
:return: type list, return list of private ips of Node
"""
LOG.debug('start to get node private ips for node:%s' % provider_node.name)
private_ips = []
interfaces = self.compute_adapter.ex_list_network_interfaces(node=provider_node)
for interface in interfaces:
if len(interface.extra.get('private_ips')) > 0:
for private_ip_dic in interface.extra.get('private_ips'):
private_ip = private_ip_dic.get('private_ip')
if private_ip:
private_ips.append(private_ip)
else:
continue
else:
continue
LOG.debug('end to get node private ips, private_ips: %s' % private_ips)
return private_ips
def _get_hybrid_service_clients_by_instance(self, instance):
LOG.debug('start to get hybrid service clients.')
provider_node = self._get_provider_node(instance)
if not provider_node:
error_info = 'get instance %s error at provider cloud' % instance.uuid
LOG.error(error_info)
raise Exception(error_info)
clients = self._get_hybrid_service_clients_by_node(provider_node)
LOG.debug('end to get hybrid service clients')
return clients
def _get_hybrid_service_clients_by_node(self, provider_node):
port = CONF.provider_opts.hybrid_service_port
private_ips = self._get_node_private_ips(provider_node)
LOG.debug('port: %s' % port)
LOG.debug('private ips: %s' % private_ips)
clients = self._get_hybrid_service_client(private_ips, port)
return clients
def _get_hybrid_service_client(self, ips, port):
clients = []
for ip in ips:
clients.append(Client(ip, port))
return clients
@RetryDecorator(max_retry_count=50, inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_wait_hybrid_service_up(self, clients):
is_docker_up = False
tmp_except = Exception('Can not get version of docker server ')
for client in clients:
try:
docker_version = client.get_version()
LOG.debug('docker version: %s, docker is up.' % docker_version)
is_docker_up = True
break
except Exception, e:
tmp_except = e
continue
if not is_docker_up:
raise tmp_except
return is_docker_up
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_reboot_app(self, clients, network_info, block_device_info):
is_rebooted = False
tmp_except = Exception('Reboot app failed.')
for client in clients:
try:
client.restart_container(network_info=network_info, block_device_info=block_device_info)
LOG.debug('Reboot app success.')
is_rebooted = True
break
except Exception, e:
tmp_except = e
continue
if not is_rebooted:
raise tmp_except
return is_rebooted
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_stop_container(self, clients):
is_stop = False
tmp_except = Exception('Reboot app failed.')
for client in clients:
try:
client.stop_container()
LOG.debug('Reboot app success.')
is_stop = True
break
except Exception, e:
tmp_except = e
continue
if not is_stop:
raise tmp_except
return is_stop
@staticmethod
def _binding_host(context, network_info, host_id):
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
for vif in network_info:
neutron.update_port(vif.get('id'), port_req_body)
@staticmethod
def _binding_host_vif(vif, host_id):
context = RequestContext('user_id', 'project_id')
neutron = neutronv2.get_client(context, admin=True)
port_req_body = {'port': {'binding:host_id': host_id}}
neutron.update_port(vif.get('id'), port_req_body)
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_attach_volume_for_docker_app(self, clients, volume_id, device, mount_device):
attached = False
tmp_except = Exception('attach volume for app failed.')
for client in clients:
try:
client.attach_volume(volume_id, device, mount_device)
LOG.debug('attach volume for app success.')
attached = True
break
except Exception, e:
tmp_except = e
continue
if not attached:
raise tmp_except
return attached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_create_image_task(self, clients, image):
image_name = image['name']
LOG.debug('image name : %s' % image_name)
image_id = image['id']
LOG.debug('image id: %s' % image_id)
create_image_task = None
tmp_exception = Exception('tmp exception in create image task')
for client in clients:
try:
create_image_task = client.create_image(image_name, image_id)
LOG.debug('create image task: %s' % create_image_task)
break
except Exception, e:
tmp_exception = e
continue
if not create_image_task:
raise tmp_exception
return create_image_task
@RetryDecorator(max_retry_count=50, inc_sleep_time=5, max_sleep_time=60,
exceptions=(errors.APIError, errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_get_image_info(self, clients, image):
image_name = image['name']
image_id = image['id']
image_info = None
tmp_exception = Exception('tmp exception in get image_info')
for client in clients:
try:
image_info = client.image_info(image_name, image_id)
LOG.debug('get image_info: %s' % image_info)
break
except Exception, e:
tmp_exception = e
continue
if not image_info:
raise tmp_exception
return image_info
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_list_volume_devices_for_docker_app(self, clients):
volume_devices = None
tmp_except = Exception('list volumes devices failed.')
for client in clients:
try:
volume_devices = client.list_volume()
LOG.debug('list volume devices success, volume list: %s' % volume_devices)
break
except Exception, e:
tmp_except = e
continue
if not volume_devices:
raise tmp_except
return volume_devices
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_detach_volume_for_docker_app(self, clients, volume_id):
detached = False
tmp_except = Exception('detach volume for app failed.')
for client in clients:
try:
client.detach_volume(volume_id)
LOG.debug('detach volume for app success.')
detached = True
break
except Exception, e:
tmp_except = e
continue
if not detached:
raise tmp_except
return detached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_detach_interface(self, clients, vif):
detached = False
tmp_except = Exception('detach interface for app failed.')
for client in clients:
try:
client.detach_interface(vif)
LOG.debug('detach interface for app success.')
detached = True
break
except Exception, e:
tmp_except = e
continue
if not detached:
raise tmp_except
return detached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_attach_interface(self, clients, vif):
attached = False
tmp_except = Exception('attach interface for app failed.')
for client in clients:
try:
client.attach_interface(vif)
LOG.debug('attach interface for app success.')
attached = True
break
except Exception, e:
tmp_except = e
continue
if not attached:
raise tmp_except
return attached
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_pause_container(self, clients):
paused = False
tmp_except = Exception('pause container failed.')
for client in clients:
try:
client.pause_container()
LOG.debug('pause container success.')
paused = True
break
except Exception, e:
tmp_except = e
continue
if not paused:
raise tmp_except
return paused
@RetryDecorator(max_retry_count=50,inc_sleep_time=5,max_sleep_time=60,
exceptions=(errors.APIError,errors.NotFound, errors.ConnectionError, errors.InternalError))
def _clients_unpause_container(self, clients):
unpaused = False
tmp_except = Exception('unpause container failed.')
for client in clients:
try:
client.unpause_container()
LOG.debug('unpause container success.')
unpaused = True
break
except Exception, e:
tmp_except = e
continue
if not unpaused:
raise tmp_except
return unpaused
def pause(self, instance):
"""Pause the specified instance.
:param instance: nova.objects.instance.Instance
"""
LOG.debug('start to pause instance: %s' % instance)
node = self._get_provider_node(instance)
LOG.debug("Node is: %s" % node)
if instance.system_metadata.get('image_container_format') == CONTAINER_FORMAT_HYBRID_VM \
and self._node_is_active(node):
clients = self._get_hybrid_service_clients_by_node(node)
is_docker_service_up = False
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
if is_docker_service_up:
self._clients_pause_container(clients)
LOG.debug('end to pause instance success.')
def unpause(self, instance):
"""Unpause paused VM instance.
:param instance: nova.objects.instance.Instance
"""
LOG.debug('start to unpause instance: %s' % instance)
node = self._get_provider_node(instance)
if instance.system_metadata.get('image_container_format') == CONTAINER_FORMAT_HYBRID_VM \
and self._node_is_active(node):
clients = self._get_hybrid_service_clients_by_node(node)
is_docker_service_up = False
try:
is_docker_service_up = self._clients_wait_hybrid_service_up(clients)
except Exception, e:
LOG.error('docker is not start, exception: %s' % traceback.format_exc(e))
if is_docker_service_up:
self._clients_unpause_container(clients)
LOG.debug('end to unpause instance success.')
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
# flag.
if not os.path.exists(path):
msg = (_("Path does not exist %(path)s") % {'path': path})
raise exception.InvalidDiskInfo(reason=msg)
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
if not out:
msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") %
{'path': path, 'error': err})
raise exception.InvalidDiskInfo(reason=msg)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False, **kwargs):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
if kwargs.has_key('subformat'):
if kwargs.get('subformat') == 'streamoptimized':
dir_name = os.path.dirname(dest)
base_name = os.path.basename(dest)
ovf_name = '%s/%s.ovf' % (dir_name,base_name)
vmx_name_temp = '%s/vmx/template.vmx' % CONF.provider_opts.conversion_dir
vmx_name = '%s/template.vmx' % dir_name
shutil.copy2(vmx_name_temp,vmx_name)
mk_ovf_cmd = ('ovftool', '-o',vmx_name, ovf_name)
convert_file = '%s/converted-file.vmdk' % dir_name
os.rename(dest, convert_file)
utils.execute(*mk_ovf_cmd, run_as_root=run_as_root)
vmdk_file_name = '%s/%s-disk1.vmdk' % (dir_name,base_name)
fileutils.delete_if_exists(dest)
os.rename(vmdk_file_name, dest)
fileutils.delete_if_exists(ovf_name)
fileutils.delete_if_exists('%s/%s.mf' % (dir_name,base_name))
fileutils.delete_if_exists(convert_file)
| StarcoderdataPython |
6455453 | <reponame>LoansBot/database
"""Restore the backup specified to the database. Requires user confirmation or
--confirm
"""
import argparse
import os
import sys
import settings
import subprocess
def main(args=None):
parser = argparse.ArgumentParser(description='Restore backup')
parser.add_argument('--confirm', action='store_true',
help='Skip user confirmation requirement.')
parser.add_argument('dump', help='The path to the .dump file')
args = parser.parse_args(args=args)
if not args.confirm:
print('You are performing a DANGEROUS operation!')
print('This will DELETE the entire database! Are you sure? [y/N]')
res = input()
if res != 'y' and res != 'Y':
print('Cancelling')
return
if not os.path.exists(args.dump):
print(f'Dump file at {args.dump} does not exist')
sys.exit(1)
if not os.path.isfile(args.dump):
print(f'Dump file at {args.dump} is not a file')
sys.exit(1)
restore_database(args.dump)
def restore_database(local_file):
"""Backs up the database to the given local file"""
cfg = settings.load_settings()
db_host = cfg['DATABASE_HOST']
db_port = int(cfg['DATABASE_PORT'])
db_user = cfg['DATABASE_USER']
db_pass = cfg['DATABASE_PASSWORD']
auth_str = f'-h {db_host} -p {db_port} -U {db_user}'
old_pg_pass = os.environ.get('PGPASSWORD')
os.environ['PGPASSWORD'] = db_pass
pg_restore_version = subprocess.check_output('pg_restore --version', shell=True)
print(f'Initiating restore from {local_file} using {pg_restore_version}')
status = os.system(f'pg_restore -Fc --clean --create --dbname template1 {auth_str} {local_file}')
if old_pg_pass is not None:
os.environ['PGPASSWORD'] = old_pg_pass
else:
del os.environ['PGPASSWORD']
if status == 0:
print('Restore finished')
else:
print(f'Status failed with code {status}')
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5195818 | from django.contrib import admin
from .models import Country, Author, Category, Book
@admin.register(Country)
class CountryAdmin(admin.ModelAdmin):
pass
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
pass
| StarcoderdataPython |
6477734 | import os
from distutils.core import setup
install_requires=[
"numpy",
"nltk",
"textblob",
"keras",
"pandas",
]
setup_requires=[
"numpy",
]
extras_require = {
"fasttext": ["fasttext"],
}
setup(
name="ontokom",
version="0.1",
description="",
url="",
author="<NAME>",
license="MIT",
install_requires=install_requires,
setup_requires=setup_requires,
extras_require=extras_require,
packages=["ontokom"],
)
| StarcoderdataPython |
334393 | import cv2
DEFAULT_HEIGHT = 720
DEFAULT_WIDTH = 1280
"""
Change image resolution
Defaults to 1280 * 720
"""
class Image():
def __init__(self, image,height=DEFAULT_HEIGHT,width=DEFAULT_WIDTH):
self.image = image
self.height = height
self.width = width
def read(self):
img = cv2.imread(self.image, cv2.IMREAD_UNCHANGED)
return img
def resize(self):
img = self.read()
new_dim = (self.width, self.height)
resized = cv2.resize(img, new_dim, interpolation = cv2.INTER_AREA)
return resized | StarcoderdataPython |
296096 | <reponame>ANRGUSC/pyREM
import math
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import root
RHO_A = 1.21 #density of air in kg/m^3
RHO_D = 1000 #density of droplet in kg/m^3
RHO = RHO_A
RHO_P = RHO_D
G = 9.81 #gravitational acceleration in m/s^2
VISCOSITY = 1.81*10**-5 #viscosity of air in Pa s
RV = 461.52 #J/kgK specific gas constant for water
D_0 = 1.0*10**-5 #initial diameter of droplet in micro meters
A = 0.06 #given constant in dispersion coefficient equation
B = 0.92 #given constant in dispersion coefficient equation
NUMBER_OF_DROPLETS = 1 #number of droplets emitted (q)
X_0 = 0 #initial horizontal position
Z_0 = 0 #initial vertical position
RESPIRATORY_RATE = 0.25 #avg number of breaths/second
V_X = 1 #horizontal velocity of the air surrounding the droplets in m/s
RELATIVE_HUMIDITY = 60 #default relative humidity
TEMPERATURE = 293.15 #default ambient temperature in Kelvin
X_AWAY = 2 #default distance 2 meters away from source
def diameter_polynomial(time,temp,r_h,initial_D):
'''This function estimates the droplet's diameter in micrometers
by finding the real roots of the diameter polynomial. If the roots are complex,
the droplet diameter has reached its minimum, dmin, and is estimated at time = t_crit,
where the discrimiant of the polynomial is zero.
Parameters:
time (float): time at which the droplet diameter will be calculated
temp (float): ambient temperature in Kelvin
r_h (int): relative humidity
initial_D (float): initial droplet size in micrometers
Returns:
d (float): Returns d, a float value representing the diameter
of the droplet after t seconds.
'''
molec_diff = (2.16*10**-5)*(temp/273.15)**1.8 # molecular diffusivity of water vapor
p_sat = 611.21*math.exp((19.843-(temp/234.5))*((temp-273.15)/(temp-16.01))) # saturation water vapor pressure
p_infin = p_sat*r_h/100 # ambient water vapor pressure
t_crit = (RHO_P*RV*temp*(initial_D**2))/(32*molec_diff*(p_sat-p_infin)) # time when Discriminant is 0
k = ((8*molec_diff*(p_sat-p_infin)*(initial_D**2)*time)/(RHO_P*RV*temp))
m = -initial_D**2
p = np.poly1d([1, 0, m, 0, k])
roots = max(np.roots(p))
if time <= t_crit:
d = roots
else:
d = diameter_polynomial(t_crit,temp,r_h,initial_D)
return d.real
def terminal_velocity(time,temp,r_h,initial_D):
'''This function estimates the terminal velocity in m/s of the droplet as a function of time,
temperature, humidity and initial droplet size. For small velocities, v_t is calculated
using Stoke's Law. Otherwise, it is calculated by finding the roots of the velocity exponential.
Parameters:
time (float): time at which the terminal velocity will be calculated
temp (float): ambient temperature in Kelvin
r_h (int): relative humidity
initial_D (float): initial droplet size in micrometers
Returns:
v_t (float): v_t, a float value representing the terminal velocity of the droplet
after t seconds
'''
if time <= 0:
v_t = (RHO_P*initial_D**2*G)/(18*math.pi*VISCOSITY) #Stoke's Law for small velocities
else:
d = diameter_polynomial(time,temp,r_h,initial_D)
n = 10.8*VISCOSITY*((RHO_A*d)/VISCOSITY)**0.687
p = 4*(d**2)*(RHO_D-RHO_A)*G
m = 72*VISCOSITY
roots = root(lambda v: n*v**(2.687)+m*v**2-p*v,1)
v_t = roots.x[0]
return v_t
def position(time,temp,r_h,initial_D):
''' This function estimates the horizontal and vertical position of droplet after t seconds.
The vertical distance, z_d, is calculated using an integral since the terminal velocity continues
to change until the droplet's diameter reaches its minimum, dmin.
Parameters:
time (float): time at which the x_d and z_d values are calculated
temp (float): ambient temperature in Kelvin
r_h (int): relative humidity
initial_D (float): initial droplet size in micrometers
Returns:
(x_d,z_d): a 2-tuple of float values containing the x and z positions of the droplet in meters
'''
if time <= 0:
return (X_0, Z_0)
v_t = terminal_velocity(time,temp,r_h,initial_D)
v_integral = integrate.quad(terminal_velocity, 0, time, args=(temp,r_h,initial_D,))
x_d = X_0 + V_X*time
z_position = Z_0-v_integral[0]
if z_position >= -2:
z_d = z_position
else:
z_d = -2 #droplet reaches the ground
distance_tuple = (x_d,z_d)
return distance_tuple
def concentration(time,x_away,temp,r_h,initial_D):
''' Each breath is modeled as an expanding Gaussian puff containing
thousands of respiratory droplets. This function estimates the concentration of the puff at a
particular time.
Parameters:
time (float): time in seconds
x_away (float): distance x meters away from an infected source
temp (float): ambient temperature in Kelvin
r_h (int): relative humidity
initial_D (float): initial droplet size in micrometers
Returns:
conc_of_puff (float): a float value representing the concentration of the puff that interacts with a
person x meters from an infected source.
'''
distance_tuple = position(time,temp,r_h,initial_D)
x_d = distance_tuple[0]
z_d = distance_tuple[1]
sigma = A*(x_d**B) #dispersion coefficient
conc_of_puff = (NUMBER_OF_DROPLETS/((math.sqrt(2*math.pi)*sigma))**3)*math.exp((-1/(2*sigma**2))*((x_away-x_d)**2+z_d**2))
return conc_of_puff
def exposure_per_breath(time,x_away,temp,r_h,initial_D):
'''This function estimates the dose of respiratory droplets that a person is exposed to by
integrating the puff over time. The function uses the quad function to calculate the integral
using 50 subdivisions.
Parameters:
time (float): time in seconds that represents the upper limit of the integral
x_away (float): distance x meters away from the infected source
temp (float): ambient temperature in Kelvin
r_h (int): relative humidity
initial_D (float): initial droplet size in micrometers
Returns:
exposure (2-tuple float): A 2-tuple of float value containing the
concentration of the puff integrated over time and the possible
numerical error in the integrand from the use of quad
'''
exposure = integrate.quad(concentration, 0, time, args=(x_away,temp,r_h,initial_D,), limit=50) #integrating with respect to time
return exposure
def total_exposure(time,x_away=X_AWAY,temp=TEMPERATURE,r_h=RELATIVE_HUMIDITY, initial_D=D_0):
'''This function estimates the total exposure by multiplying the
exposure per breath by the number of breaths taken in t seconds.
Parameters:
time (float): time in seconds
x_away (float): proximity set to the default value of 2 meters
temp (float): temperature set to the default value of 293.15 K (20 C)
r_h (int): humidity set to the default value of 60
initial_D (float): initial droplet size set to the default value of 10 um
Returns:
total_dosage (float): a float value representing the total dosage a person
is exposed to after several breaths are taken from an infected source.
'''
exposure_tuple = exposure_per_breath(time,x_away,temp,r_h,initial_D)
number_of_breaths = RESPIRATORY_RATE*time
total_dosage = exposure_tuple[0]*number_of_breaths
# print(total_dosage)
return total_dosage
#example usage, for testing
if __name__ == '__main__':
total_exposure(5) #total accumulated exposure after 5 seconds
| StarcoderdataPython |
1972893 | <filename>leetcode/easy/Binary_Tree_Level_Order_Traversal_II.py
# -*- coding: utf-8 -*-
"""
created by huash06 at 2015-04-13 11:28
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
confused what "{1,#,2,3}" means? > read more on how binary tree is serialized on OJ.
OJ's Binary Tree Serialization:
The serialization of a binary tree follows a level order traversal, where '#'
signifies a path terminator where no node exists below.
Here's an example:
1
/ \
2 3
/
4
\
5
The above binary tree is serialized as "{1,2,3,#,#,4,#,#,5}".
"""
__author__ = 'huash06'
import sys
import os
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def levelOrderBottom(self, root):
if not root:
return []
result = list()
q = list()
q.append((root, 1))
while q:
h, l = q.pop(0)
if len(result) < l:
result.append([h.val])
else:
result[l-1].append(h.val)
if h.left:
q.append((h.left, l+1))
if h.right:
q.append((h.right, l+1))
return list(reversed(result))
s = Solution()
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
right = root.right
right.left = TreeNode(15)
right.right = TreeNode(7)
t = s.levelOrderBottom(root)
for r in t:
print(','.join(list(map(str, r)))) | StarcoderdataPython |
6632792 | from fpdf import FPDF
class Cards(FPDF):
def __init__(self, orientation = 'P', unit = 'mm', format='A4'):
super().__init__(orientation, unit, format)
self.cards = []
self.curr_card = 0
# we do not want to auto page break
self.set_auto_page_break(False)
def add_card(self, card):
self.cards.append(card)
def header(self):
self.set_font("Arial")
try:
self.cards[self.curr_card].title.to_pdf(self)
except IndexError:
return
def export(self,filename):
# draw each card
for card in self.cards:
# draw card
card.to_pdf(self)
# check to see if we went over the page; if so, print a warning
page_height = self.fw_pt if self.def_orientation == "L" else self.fh_pt
if self.get_y() > page_height:
print(f"WARNING: Card \"{card.title.text}\" is too long. Output truncated.")
# increment card number
self.curr_card += 1
# write card to file
self.output(filename)
class Card:
def __init__(self, title_str = "Untitled"):
self.title = Title(title_str)
self.contents = []
self.printed = []
def add_content(self, content):
self.contents.append(content)
def soft_page_break(self, pdf):
pdf.add_page()
for printed in self.printed:
printed.to_pdf(pdf)
def to_pdf(self, pdf):
# blank page with just title
pdf.add_page()
# page with information
pdf.add_page()
# card contents
for content in self.contents:
# insert an extra page break before printing subtitles
# but only if they are not the first subtitles
if type(content) is Subtitle and not content.first:
self.soft_page_break(pdf)
self.printed.append(content)
content.to_pdf(pdf)
# insert an extra page break after printing subtitles
if type(content) is Subtitle:
self.soft_page_break(pdf)
class CardContents:
def __init__(self, text = "NULL"):
self.text = text
def __str__(self):
return self.text
def to_pdf(self, pdf):
raise NotImplementedError("This is an abstract method and has no business being called.")
# a card title
class Title(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",20)
pdf.multi_cell(0, 20, txt=self.text, align="C", border=0)
pdf.set_font("Arial","",12)
pdf.ln(12)
# a subtitle within a card
class Subtitle(CardContents):
def __init__(self, text = "NULL", first=False):
super().__init__(text)
self.first = first
def to_pdf(self, pdf):
pdf.set_font("Arial","B",16)
# add a blank space if necessary
if not self.first:
pdf.ln(12)
pdf.multi_cell(0, 16, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubtitle within a card
class Subsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",14)
pdf.multi_cell(0, 14, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a subsubsubtitle within a card
class Subsubsubtitle(CardContents):
def to_pdf(self, pdf):
pdf.set_font("Arial","B",12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font("Arial","",12)
# a bulleted point
class BulletedPoint(CardContents):
def __init__(self, text = "NULL", level = 0):
super().__init__(text)
self.spacing = " " * level
self.number = 0
def to_pdf(self, pdf):
# save old font and change family to Courier
old_font = pdf.font_family
pdf.set_font("Courier")
# add spacing
pdf.cell(pdf.get_string_width(self.spacing) + pdf.c_margin * 2, 14, txt=self.spacing, align="L", border=0)
# draw bullet point
self.draw_point(pdf, self.number)
# return old font
pdf.set_font(old_font)
# draw text
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
def draw_point(self, pdf, number=1):
# set bullet character
bullet = "".join([" ",chr(149)])
# we want this to be wide enough to match NumberedPoint
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=bullet, align="L", border=0)
# a numbered point
class NumberedPoint(BulletedPoint):
def __init__(self, text="NULL", level=0, number=1):
super().__init__(text, level)
self.number = number
def draw_point(self, pdf, number=1):
# set number string
numstr = f"{number:2}. "
# we want this to be wide enough to fit up to 99 numbers
pdf.cell(pdf.get_string_width("99.") + 2 + pdf.c_margin * 2, 14, txt=numstr, align="L", border=0)
# a plaintext paragraph
class Text(CardContents):
def to_pdf(self, pdf):
pdf.set_font_size(12)
pdf.multi_cell(0, 12, txt=self.text, align="L", border=0)
pdf.set_font_size(12)
| StarcoderdataPython |
6414641 | <reponame>zhanghao000/project_news
from flask import render_template, current_app, session, request, jsonify
from info import constants
from info.models import User, News, Category
from info.modules.news import index_blu
from info.utils.response_code import RET
@index_blu.route("/news_list")
def get_news_list():
"""
首页主体新闻数据的显示
:return: 返回查询结果和具体新闻数据
"""
# 1. 获取参数
cid = request.args.get("cid", "1")
page = request.args.get("page", "1")
per_page = request.args.get("per_page", constants.HOME_PAGE_MAX_NEWS)
# 2. 校验参数
try:
cid = int(cid)
page = int(page)
per_page = int(per_page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 3. 查询新闻相关数据
filters = []
if cid != 1:
filters.append(News.category_id == cid)
try:
paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page, per_page, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库查询错误")
page = paginate.page
total_page = paginate.pages
news_list = paginate.items
# 将查询对象转化为数据
news_list = [news.to_basic_dict() for news in news_list]
# 4. 返回数据
data = {
"cid": cid,
"page": page,
"total_page": total_page,
"news_list": news_list
}
return jsonify(errno=RET.OK, errmsg="ok", data=data)
@index_blu.route("/")
def index():
"""
首页相关数据显示
:return: 返回渲染后的首页页面
"""
# 从session中获取当前用户的登录状态
user_id = session.get("user_id")
user = None
if user_id:
try:
user = User.query.filter().get(user_id)
except Exception as e:
current_app.logger.error(e)
# 将查询对象转化为数据
user_info = user.to_dict() if user else None
# 查询首页右侧的点击排行新闻数据并返回
news_list = list()
try:
news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger(e)
if not news_list:
# 将查询对象转化为数据
news_list = [news.to_basic_dict() for news in news_list]
# 查询首页新闻分类数据并返回
category_list = []
try:
category_list = Category.query.all()
except Exception as e:
current_app.logger.error(e)
if not category_list:
# 将查询对象转化为数据
category_list = [category.to_dict() for category in category_list]
data = {
"user_info": user_info,
"news_list": news_list,
"category_list": category_list
}
return render_template("news/index.html", data=data)
@index_blu.route("/favicon.ico")
def favicon():
return current_app.send_static_file("news/favicon.ico")
| StarcoderdataPython |
1956734 | <reponame>VirtualVFix/AndroidTestFramework
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
"""
Additional functions integrated to `logging.Logger` class when new logger created in :mod:`src.libs.core.logger` module.
"""
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "29/09/17 15:27"
import logging
from .config import LOCAL
def newline(self, *args, lines=1, level=logging.INFO):
"""
Print empty line to all logger handlers via change handlers formatter.
Args:
*args (logging.Logger): Additional loggers to repeat action
lines (int): Line counter
level(int): Logger level
"""
loggers = [x for x in args if isinstance(x, logging.Logger)]
loggers.insert(0, self)
for log in loggers:
formats = []
for x in log.handlers:
formats.append(x.formatter)
x.formatter = logging.Formatter(fmt=LOCAL.BLANK_LOGGER_FORMAT)
for i in range(lines):
log.log(level, '')
for i, x in enumerate(log.handlers):
x.formatter = formats[i]
def info(self, msg, *args, **kwargs):
"""
Print INFO massage to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
Usage:
.. code-block:: python
import logging
from core import getLogger, getSysLogger
syslogger = getSysLogger()
logger = getLoggger(__file__)
logger2 = getLoggger('custom', custom.log)
logger.info('spam message', logger2, syslogger)
"""
setattr(self, '_last_message', msg)
self._log(logging.INFO, msg, None, **kwargs)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.INFO:
x._log(logging.INFO, msg, None, **kwargs)
def debug(self, msg, *args, **kwargs):
"""
Print DEBUG massage to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
"""
setattr(self, '_last_message', msg)
self._log(logging.DEBUG, msg, None, **kwargs)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.DEBUG:
x._log(logging.DEBUG, msg, None, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Print WARNING massage to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
"""
setattr(self, '_last_message', msg)
self._log(logging.WARNING, msg, None, **kwargs)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.WARNING:
x._log(logging.WARNING, msg, None, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Print ERROR massage to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
"""
setattr(self, '_last_message', msg)
self._log(logging.ERROR, msg, None, **kwargs)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.ERROR:
x._log(logging.ERROR, msg, None, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Print EXCEPTION traceback to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
"""
setattr(self, '_last_message', msg)
self._log(logging.ERROR, msg, None, **kwargs, exc_info=True)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.ERROR:
x._log(logging.ERROR, msg, None, **kwargs, exc_info=True)
def critical(self, msg, *args, **kwargs):
"""
Print CITICAL massage to current logger and all additional loggers specified as function parameters.
Also function add **_last_message** attribute to loggers. This attribute uses in :func:`lastmsg` function.
Args:
msg (str): Logger massage
*args (logging.Logger): Additional loggers to print
"""
setattr(self, '_last_message', msg)
if self.isEnabledFor(logging.CRITICAL):
self._log(logging.CRITICAL, msg, None, **kwargs)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_last_message', msg)
if x.level <= logging.CRITICAL and x.isEnabledFor(logging.CRITICAL):
x._log(logging.CRITICAL, msg, None, **kwargs)
def lastmsg(self):
"""
Return last logged message if **_lastmsg** attribute is available.
Returns:
last massage or empty str
"""
return getattr(self, '_last_message', '')
def done(self, *args, level=logging.INFO):
"""
Print "Done" massage.
Args:
*args (logging.Logger): Additional loggers to print
level (int): Logger level
"""
# duplicate code from spam function due to logging traceback system
setattr(self, '_lastmsg', LOCAL.DONE_MESSAGE)
self._log(level, LOCAL.DONE_MESSAGE, None)
for x in args:
if isinstance(x, logging.Logger):
setattr(x, '_lastmsg', LOCAL.DONE_MESSAGE)
x._log(level, LOCAL.DONE_MESSAGE, None)
def warnlist(self, msg, *args, propagate=True):
"""
Print warning and keep it to ``CONFIG.SYSTEM.WARNINGS`` - those warning list prints after all tests
Args:
msg (str): Message
*args (logging.Logger): Additional loggers to print
propagate (bool): Print massage to loggers
"""
from config import CONFIG
CONFIG.SYSTEM.WARNINGS = msg
if propagate is True:
self.warning(msg, *args)
def jenkins(self, msg, *args, propagate=False, level=logging.INFO, secured=False):
"""
Keep message to print it to Jenkins job or send by email.
Args:
msg (str): Message
*args (logging.Logger): Additional loggers to print
propagate (bool): Print massage to loggers
level (int): Logger level
secured (bool): Is message secured. Secured messaged cannot be send via regular Email client.
"""
from config import CONFIG
CONFIG.SYSTEM.JENKINS = (msg, level, secured)
if propagate is True:
if level == logging.DEBUG:
self.debug(msg, *args)
elif level == logging.INFO:
self.info(msg, *args)
elif level == logging.WARNING:
self.warning(msg, *args)
elif level == logging.ERROR:
self.error(msg, *args)
elif level == logging.CRITICAL:
self.critical(msg, *args)
| StarcoderdataPython |
304074 | <filename>transactions/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.contrib.auth.models import User
from projects.models import Project
from transactions.models import Transaction, Candidate
from transactions.forms import CandidateForm, SourcingForm
from invitations.models import Invitation
from django.core.mail import send_mail, BadHeaderError
# payments view
from payments.views import process_payment
# Create your views here.
def transaction(request, id):
project = Project.objects.get(id=id)
user = request.user
new_transaction = Transaction.objects.create(user=user, project=project, stage='upload-candidates')
return redirect(reverse('transactions:process_transaction', args=[new_transaction.id]))
def process_transaction(request, id):
current_transaction = Transaction.objects.get(id=id)
if current_transaction.stage == 'upload-candidates':
return upload_candidates(request, current_transaction)
elif current_transaction.stage == 'payment-stage':
return all_candidates(request, current_transaction)
elif current_transaction.stage == 'make-payment':
return all_candidates(request, current_transaction)
elif current_transaction.stage == 'payment-confirmed':
return invitations(request, current_transaction)
elif current_transaction.stage == 'payment-verified':
return invitations(request, current_transaction)
elif current_transaction.stage == 'complete':
return redirect(reverse('frontend:index'))
def upload_candidates(request, current_transaction):
# id is transaction id
# TODO: add capapility to upload text document or csv file of Candidates
if request.method == 'POST':
candidate_form = CandidateForm(request.POST)
if request.POST.get('and_continue'):
if candidate_form.is_valid():
current_transaction.stage = 'payment-stage'
first_name = candidate_form.cleaned_data['first_name']
last_name = candidate_form.cleaned_data['last_name']
email = candidate_form.cleaned_data['email']
new_candidate = Candidate.objects.create(first_name=first_name, last_name=last_name, email=email,
transaction=current_transaction)
new_candidate.save()
current_transaction.save()
return redirect(reverse('transactions:process_transaction', args=[current_transaction.id]))
elif request.POST.get("add_another"):
if candidate_form.is_valid():
current_transaction.stage = 'upload-candidates'
first_name = candidate_form.cleaned_data['first_name']
last_name = candidate_form.cleaned_data['last_name']
email = candidate_form.cleaned_data['email']
new_candidate = Candidate.objects.create(first_name=first_name, last_name=last_name, email=email,
transaction=current_transaction)
new_candidate.save()
return redirect(reverse('transactions:process_transaction', args=[current_transaction.id]))
else:
candidate_form = CandidateForm()
return render(request, 'transactions/upload_candidate.html', {'candidate_form': candidate_form, 'current_transaction':current_transaction})
else:
candidate_form = CandidateForm()
return render(request, 'transactions/upload_candidate.html', {'candidate_form': candidate_form, 'current_transaction': current_transaction})
def all_candidates(request, current_transaction):
#candidates = current_transaction.allcandidates()
candidates = Candidate.objects.filter(transaction=current_transaction)
total_amount = current_transaction.amount()
return render(request, 'transactions/all_candidates.html',
{'candidates': candidates,'total_amount': total_amount,
'current_transaction': current_transaction})
def invitations(request, current_transaction):
candidates = Candidate.objects.filter(transaction=current_transaction)
if request.method == 'POST':
if candidates.count() != 0:
for candidate in candidates:
invite = Invitation.create(candidate.email, inviter=request.user)
invite.send_invitation(request)
current_transaction.stage = 'complete'
current_transaction.save()
return redirect(reverse('transactions:process_transaction', args=[current_transaction.id]))
return render(request, 'transactions/invitations.html',
{'candidates': candidates, 'current_transaction': current_transaction})
def my_invites(request):
candidates = Candidate.objects.filter(email=request.user.email)
return render(request, 'transactions/send_credentials.html', {'candidates': candidates})
def sourcing(request):
if request.method == 'POST':
form = SourcingForm(request.POST)
if form.is_valid():
subject = 'Sourcing Request'
from_email = form.cleaned_data['email_address']
data = ""
data += form.cleaned_data['name']
data += str(form.cleaned_data['phone_number'])
data += form.cleaned_data['company_name']
data += str(form.cleaned_data['job_role'])
data += form.cleaned_data['engagement_types']
data += form.cleaned_data['tech_stack']
data += form.cleaned_data['project_description']
data += str(form.cleaned_data['devs_needed'])
data += str(form.cleaned_data['renumeration'])
data += form.cleaned_data['tech_staff']
data += form.cleaned_data['skills_test']
try:
send_mail(subject, data, from_email, ['<EMAIL>'])
except BadHeaderError:
print('invalid error')
return redirect('frontend:home')
else:
form = SourcingForm()
return render(request, 'transactions/sourcing.html', {'form':form})
| StarcoderdataPython |
376128 | import argparse
from download.download import VideoDownloader
from utils import (
reset_default_params,
set_cookies_path,
set_media_directory,
update_params,
)
def str_to_bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ['true', 't', '1']:
return True
elif v.lower() in ['false', 'f', '0']:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser(
description="video dataset downloader",
)
subparsers = parser.add_subparsers(
dest='subparser_name',
help='sub-command help',
)
update_parser = subparsers.add_parser(
'update',
help="updates the downloader parameters",
)
update_parser.add_argument(
'--vid_dir', type=str, default='/PATH/TO/VID/DIR',
help="directory where videos will be downloaded to",
)
update_parser.add_argument(
'--dataset', type=str, default='kinetics400',
help=(
"one of 'kinetics400', 'kinetics600', 'kinetics700', "
"'kinetics700_2020, 'HACS', 'actnet100', 'actnet200', or 'sports1M'"
),
)
update_parser.add_argument(
'--cookies', type=str, default='/PATH/TO/COOKIES/DIR',
help="cookies to pass to youtube-dl",
)
update_parser.add_argument(
'--conda_path', type=str, default='none',
help="absolute path to conda package (if running a conda env)",
)
update_parser.add_argument(
'--conda_env', type=str, default='none',
help="name of your environment (if running a conda env)",
)
update_parser.add_argument(
'--retriever', type=str, default='streamer',
help="one of 'loader' or 'streamer' (original or processed video)",
)
update_parser.add_argument(
'--num_jobs', type=int, default=5,
help="number of simultaneous jobs to run with GNU Parallel",
)
update_parser.add_argument(
'--toy_set', type=str_to_bool, default=False,
help="whether to use a smaller dataset to experiment with or not",
)
update_parser.add_argument(
'--toy_samples', type=int, default=100,
help="number of samples for toy dataset",
)
update_parser.add_argument(
'--download_batch', type=int, default=20,
help="batch of videos to download on each iteration",
)
update_parser.add_argument(
'--download_fps', type=int, default=30,
help="frame rate to download each video with",
)
update_parser.add_argument(
'--time_interval', type=int, default=10,
help="length of video to be downloaded (in seconds)",
)
update_parser.add_argument(
'--shorter_edge', type=int, default=320,
help="length of frame's shorter side to download at",
)
update_parser.add_argument(
'--use_sampler', type=str_to_bool, default=False,
help="whether to use a clip sampler or not",
)
update_parser.add_argument(
'--max_duration', type=int, default=300,
help="max length of video to sample from",
)
update_parser.add_argument(
'--num_samples', type=int, default=10,
help="total number of sampled clips",
)
update_parser.add_argument(
'--sampling', type=str, default='random',
help="one of 'random' or 'uniform' sampling",
)
update_parser.add_argument(
'--sample_duration', type=int, default=1,
help="duration of each sampled clip",
)
reset_parser = subparsers.add_parser(
'reset',
help="resets params to default values",
)
reset_parser.add_argument(
'--defaults', type=str, default='base',
help="set of default params",
)
download_parser = subparsers.add_parser(
'download',
help="downloads the video dataset",
)
download_parser.add_argument(
'--setup', action='store_true',
help="whether to run setup script or not (run only once for full set)",
)
args = vars(parser.parse_args())
if args['subparser_name'] == 'reset':
reset_default_params(args['defaults'])
print('Params reset to default values.')
elif args['subparser_name'] == 'update':
args.pop('subparser_name')
set_media_directory(args.pop('vid_dir'))
set_cookies_path(args.pop('cookies'))
update_params(args)
print('Update complete.')
elif args['subparser_name'] == 'download':
downloader = VideoDownloader()
if args['setup']:
downloader.get_data()
downloader.setup()
downloader.download_videos()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4900947 | <gh_stars>0
from floodsystem.geo import rivers_by_station_number
from floodsystem.stationdata import build_station_list
from floodsystem.station import MonitoringStation
"""stations = build_station_list()
print(rivers_by_station_number(stations,10))"""
def run():
'build a list of stations'
stations = build_station_list()
'use rivers_by_station_number to return a list of N rivers in order of most to least stations'
print('9 rivers with the most stations')
print(rivers_by_station_number(stations,9))
if __name__ == '__main__':
run()
| StarcoderdataPython |
5123650 | """
A wrapper around Requests to make Restful API calls
"""
from urllib.error import HTTPError
from urllib.error import URLError
import requests
class Base_API:
"Main base class for Requests based scripts"
def __init__(self, url=None):
pass
def json_or_text(self, response):
"Class to define text or json response"
try:
json_response = response.json()
except Exception as e:
if (response.headers["Content-Type"] == 'application/json' or 'text/html'):
json_response = response.text
else:
json_response = None
return json_response
def get(self, url, headers={}):
"Get request"
json_response = None
error = {}
try:
response = self.request_obj.get(url=url, headers=headers)
json_response = self.json_or_text(response)
except (HTTPError, URLError) as e:
error = e
if isinstance(e, HTTPError):
error_message = e.read()
print("\n******\nGET Error: %s %s" %
(url, error_message))
elif e.reason.args[0] == 10061:
print("\033[1;31m\nURL open error: Please check if the API server is \
up or there is any other issue accessing the URL\033[1;m")
raise e
else:
print(e.reason.args)
# bubble error back up after printing relevant details
raise e
return {'response': response.status_code, 'text':response.text, \
'json_response':json_response, 'error': error}
def post(self, url, params=None, data=None, json=None, headers={}):
"Post request"
error = {}
json_response = None
try:
response = self.request_obj.post(url, data=data, json=json, headers=headers)
self.json_or_text(response)
except (HTTPError, URLError) as e:
error = e
if isinstance(e, HTTPError, URLError):
error_message = e.read()
print("\n******\nPOST Error: %s %s %s" %
(url, error_message, str(json)))
elif e.reason.args[0] == 10061:
print("\033[1;31m\nURL open error: Please check if the API server is up \
or there is any other issue accessing the URL\033[1;m")
else:
print(e.reason.args)
# bubble error back up after printing relevant details
raise e
return {'response': response.status_code, 'text':response.text,\
'json_response':json_response, 'error': error}
def delete(self, url, headers={}):
"Delete request"
response = False
error = {}
try:
response = self.request_obj.delete(url, headers=headers)
try:
json_response = response.json()
except Exception as e:
json_response = None
except (HTTPError, URLError) as e:
error = e
if isinstance(e, HTTPError):
error_message = e.read()
print("\n******\nPUT Error: %s %s %s" %
(url, error_message, str(data)))
elif e.reason.args[0] == 10061:
print("\033[1;31m\nURL open error: Please check if the \
API server is up or there is any other issue accessing the URL\033[1;m")
else:
print(str(e.reason.args))
# bubble error back up after printing relevant details
raise e
return {'response': response.status_code, 'text':response.text, \
'json_response':json_response, 'error': error}
def put(self, url, json=None, headers={}):
"Put request"
error = {}
response = False
try:
response = self.request_obj.put(url, json=json, headers=headers)
try:
json_response = response.json()
except Exception as e:
json_response = None
except (HTTPError, URLError) as e:
error = e
if isinstance(e, HTTPError):
error_message = e.read()
print("\n******\nPUT Error: %s %s %s" %
(url, error_message, str(data)))
elif e.reason.args[0] == 10061:
print("\033[1;31m\nURL open error: Please check if \
the API server is up or there is any other issue accessing the URL\033[1;m")
else:
print(str(e.reason.args))
# bubble error back up after printing relevant details
raise e
return {'response': response.status_code, 'text':response.text, \
'json_response':json_response, 'error': error}
| StarcoderdataPython |
12803768 | <reponame>shencebebetterme/pyTN
#!/usr/bin/python3
""" A module that generates and stores various results of different
coarse-graining algorithms for different lattice models. The point is
that when a tensor is requested, the module checks whether it already is
stored on the hard drive, and returns it if it is. If not it generates it,
stores it on the hard drive and returns it.
"""
import numpy as np
import toolbox
import initialtensors
import os
import argparse
from tensorstorer import write_tensor_file, read_tensor_file
from timer import Timer
from matplotlib import pyplot
from TNR import tnr_step
from scon import scon
from pathfinder import PathFinder
from custom_parser import parse_argv
filename = os.path.basename(__file__)
global_timer = Timer()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Functions for getting different tensors. These are the user interface.
def get_general(prefix, generator, pars, **kwargs):
""" A general getter function that either gets the asked-for data
from a file or generates it with the given generator function. """
pars = get_pars(pars, **kwargs)
id_pars, pars = get_id_pars_and_set_default_pars(pars)
try:
result = read_tensor_file(prefix=prefix, pars=id_pars,
filename=filename)
except RuntimeError:
result = generator(pars, id_pars)
return result
def get_tensor(pars=None, infotime=True, **kwargs):
generator = lambda p, i: generate_tensor(p, i, infotime=infotime)[0:2]
T, log_fact = get_general("tensor", generator, pars, **kwargs)
return T, log_fact
def get_normalized_tensor(pars=None, infotime=True, **kwargs):
generator = generate_normalized_tensor
T = get_general("tensor_normalized", generator, pars, **kwargs)
return T
def get_gauges(pars=None, infotime=True, **kwargs):
kwargs["return_gauges"] = True
generator = lambda p, i: generate_tensor(p, i, infotime=infotime)[-1]
gauges = get_general("gauges", generator, pars, **kwargs)
return gauges
def get_pieces(pars=None, infotime=True, **kwargs):
kwargs["return_pieces"] = True
generator = lambda p, i: generate_tensor(p, i, infotime=infotime)[2]
pieces = get_general("pieces", generator, pars, **kwargs)
return pieces
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Functions for modifying the given parameters to the needed form and
# sorting extra parameters from the ones that are important for
# identifying the tensors.
def get_pars(pars, **kwargs):
if pars is None:
return kwargs
else:
new_pars = pars.copy()
new_pars.update(kwargs)
return new_pars
# Parameters that need to be always given and will be used for
# identifying files.
global_mandatory_id_pars = {"dtype", "iter_count",
"initial2x2", "initial4x4", "symmetry_tensors",
"model"}
# Parameters that need to be always given, depend on the model and will
# be used for identifying files.
model_id_pars = {}
model_id_pars["ising"] = {"J", "H", "beta"}
model_id_pars["potts3"] = {"J", "beta"}
# Parameters that need to be always given, depend on the algorithm and
# will be used for identifying files.
algorithm_mandatory_id_pars = {}
algorithm_mandatory_id_pars["tnr"] = {"chis_tnr", "chis_trg", "opt_eps_conv",
"horz_refl", "opt_max_iter",
"opt_iters_tens"}
algorithm_mandatory_id_pars["trg"] = {"chis", "J", "H"}
# Parameters that may be given, depend on the algorithm and will be used
# for identifying files. If not given, the default (the second element
# in the tuple) will be used.
algorithm_optional_id_pars = {}
algorithm_optional_id_pars["tnr"] = {("A_chis", None),
("A_eps", 0),
("opt_eps_chi", 0),
("fix_gauges", False),
("reuse_initial", False)}
algorithm_optional_id_pars["trg"] = {("eps", 0)}
# Parameters that may be given and will NOT be used for identifying
# files. If not given, the default (the second element in the tuple)
# will be used.
optional_other_pars = {("save_errors", False),
("print_errors", 0),
("return_gauges", False),
("return_pieces", False),
("save_fit_plot", False)}
def get_id_pars_and_set_default_pars(pars):
""" Make a copy of pars and populate with defaults as needed. Also
copy from pars to id_pars the parameters by which different tensors
should be identified, also using defaults for some of the values as
needed.
"""
new_pars = pars.copy()
id_pars = {}
mandatory_id_pars = set()
optional_id_pars = set()
# The following are necessary regardless of algorithm and model.
model_name = pars["model"].strip().lower()
mandatory_id_pars |= global_mandatory_id_pars.copy()
mandatory_id_pars |= model_id_pars[model_name]
if pars["iter_count"] > 0:
algorithm_name = pars["algorithm"].strip().lower()
mandatory_id_pars.add("algorithm")
mandatory_id_pars |= algorithm_mandatory_id_pars[algorithm_name]
optional_id_pars |= algorithm_optional_id_pars[algorithm_name]
for k in mandatory_id_pars:
if k in pars:
id_pars[k] = pars[k]
else:
raise RuntimeError("The required parameter %s was not given."%k)
for t in optional_id_pars:
k = t[0]
d = t[1]
id_pars[k] = pars.get(k, d)
for t in optional_other_pars:
new_pars.setdefault(*t)
return id_pars, new_pars
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Functions for generating tensors.
def generate_first_tensor(pars):
T = initialtensors.get_initial_tensor(pars)
log_fact = 0
gauges = {}
pieces = {}
if pars["initial4x4"]:
# Construct G_vh
dim = T.shape[0]
try:
qim = T.qhape[0]
except TypeError:
qim = None
eye = type(T).eye(dim=dim, qim=qim)
u = scon((eye, eye, eye, eye), ([-1,-5], [-2,-6], [-3,-7], [-4,-9]))
swap = u.transpose((0,1,2,3,7,6,5,4))
swap = swap.join_indices([0,1,2,3], [4,5,6,7], dirs=[1,1])
gauges["G_vh"] = swap
# Construct G_hv
dim = T.shape[1]
try:
qim = T.qhape[1]
except TypeError:
qim = None
eye = type(T).eye(dim=dim, qim=qim)
u = scon((eye, eye, eye, eye), ([-1,-5], [-2,-6], [-3,-7], [-4,-9]))
swap = u.transpose((0,1,2,3,7,6,5,4))
swap = swap.join_indices([0,1,2,3], [4,5,6,7], dirs=[1,1])
gauges["G_hv"] = swap
# Contract T
T = toolbox.contract2x2(T)
T = toolbox.contract2x2(T)
elif pars["initial2x2"]:
# Construct G_vh
dim = T.shape[0]
try:
qim = T.qhape[0]
except TypeError:
qim = None
eye = type(T).eye(dim=dim, qim=qim)
u = scon((eye, eye), ([-1,-3], [-2,-4]))
swap = u.transpose((0,1,3,2))
swap = swap.join_indices([0,1], [2,3], dirs=[1,1])
gauges["G_vh"] = swap
# Construct G_hv
dim = T.shape[1]
try:
qim = T.qhape[1]
except TypeError:
qim = None
eye = type(T).eye(dim=dim, qim=qim)
u = scon((eye, eye), ([-1,-3], [-2,-4]))
swap = u.transpose((0,1,3,2))
swap = swap.join_indices([0,1], [2,3], dirs=[1,1])
gauges["G_hv"] = swap
# Contract T
T = toolbox.contract2x2(T)
return T, log_fact, pieces, gauges
def generate_next_tensor(pars):
algo_name = pars["algorithm"].strip().lower()
# Get the tensor from the previous step.
T, log_fact = get_tensor(pars, iter_count=pars["iter_count"]-1,
infotime=False)
print('\n / Coarse-graining, iter_count = #%i: / '%(pars["iter_count"]))
if algo_name == "tnr":
gauges = {}
pieces = {}
if pars["horz_refl"]:
gauges = get_gauges(pars, iter_count=pars["iter_count"]-1,
infotime=False)
if pars["reuse_initial"] or pars["fix_gauges"]:
pieces = get_pieces(pars, iter_count=pars["iter_count"]-1,
infotime=False)
tnr_result = tnr_step(T, pars=pars, gauges=gauges, pieces=pieces,
log_fact=log_fact)
T, log_fact = tnr_result[0:2]
if pars["return_pieces"]:
pieces = tnr_result[2]
if pars["return_gauges"]:
gauges = tnr_result[-1]
elif algo_name == "trg":
pieces = None
gauges = None
T, log_fact = trg_step(T, pars=pars, log_fact=log_fact)
return T, log_fact, pieces, gauges
def generate_tensor(pars, id_pars, infotime=True):
if infotime:
# - Infoprint and start timer -
print("\n" + ("="*70) + "\n")
print("Generating coarse-grained tensor with the following "
"parameters:")
for k,v in sorted(pars.items()):
print("%s = %s"%(k, v))
global_timer.start()
if pars["iter_count"] == 0:
T, log_fact, pieces, gauges = generate_first_tensor(pars)
else:
algo_name = pars["algorithm"].strip().lower()
T, log_fact, pieces, gauges = generate_next_tensor(pars)
# Save to file(s)
pather = PathFinder(filename, id_pars)
write_tensor_file(data=(T, log_fact), prefix="tensor", pars=id_pars,
pather=pather)
if algo_name == "tnr" and pars["return_pieces"]:
write_tensor_file(data=pieces, prefix="pieces", pars=id_pars,
pather=pather)
if algo_name == "tnr" and pars["return_gauges"]:
write_tensor_file(data=gauges, prefix="gauges", pars=id_pars,
pather=pather)
if infotime:
print("\nDone generating the coarse-grained tensor.")
global_timer.print_elapsed()
global_timer.stop()
print()
return_value = (T, log_fact)
if "algorithm" in pars and pars["algorithm"].strip().lower() == "tnr":
if pars["return_pieces"]:
return_value += (pieces,)
if pars["return_gauges"]:
return_value += (gauges,)
return return_value
def generate_normalized_tensor(pars, id_pars):
# - Infoprint and start timer -
print("\n" + ("="*70) + "\n")
print("Generating the normalized, coarse-grained tensor with the "
"following parameters:")
for k,v in sorted(pars.items()):
print("%s = %s"%(k, v))
global_timer.start()
algo_name = pars["algorithm"].strip().lower()
# Number of tensors to use to fix the normalization
n = max(8, pars["iter_count"] + 4)
# Number of tensors from the beginning to discard
n_discard = max(min(pars["iter_count"]-3, 3), 0)
tensors_and_log_facts = []
for i in range(n+1):
T, log_fact = get_tensor(pars=pars, iter_count=i, infotime=False)
tensors_and_log_facts.append((T, log_fact))
tensors, log_facts = zip(*tensors_and_log_facts)
Zs = np.array([scon(T, [1,2,1,2]).norm() for T in tensors])
log_Zs = np.log(Zs)
log_Zs += np.array(log_facts)
if algo_name == "tnr":
Ns = np.array([2*4**i for i in range(n+1)])
elif algo_name == "trg":
Ns = np.array([2*2**i for i in range(n+1)])
if pars["initial4x4"]:
Ns *= 16
elif pars["initial2x2"]:
Ns *= 4
A, B = np.polyfit(Ns[pars["n_discard"]:], log_Zs[pars["n_discard"]:], 1)
tensors = [T / np.exp(N*A - log_fact)
for T, N, log_fact in zip(tensors, Ns, log_facts)]
if pars["print_errors"]:
print("Fit when normalizing Ts: %.3e * N + %.3e"%(A,B))
if pars["save_fit_plot"]:
pyplot.plot(Ns, log_Zs, marker='*', linestyle='')
pyplot.plot(Ns, A*Ns+B)
pather = PathFinder(filename, id_pars, ignore_pars=['iter_count'])
path = pather.generate_path("Normalization_fit", extension='.pdf')
os.makedirs(os.path.dirname(path), exist_ok=True)
pyplot.savefig(path)
pyplot.clf()
for i, T in enumerate(tensors):
write_tensor_file(data=T, prefix="tensor_normalized", pars=id_pars,
filename=filename, iter_count=i)
print("Returning normalized tensor.")
global_timer.print_elapsed()
global_timer.stop()
return tensors[pars["iter_count"]]
| StarcoderdataPython |
6407937 | from __future__ import annotations
from datetime import datetime
from typing import List, Optional
from config import table, LONGITUDE, LATITUDE, logger, DATETIME_FORMAT, LIMIT_OUTPUT, APPID, DYNAMODB_TABLE
from models import OpenWeatherInsight
from openweather_api import OneCallAPI
def run(event, context):
api = OneCallAPI(latitude=LATITUDE, longitude=LONGITUDE)
response: List[OpenWeatherInsight] = api.extract_next_48_hours(
output_limit=LIMIT_OUTPUT
)
errors = 0
for insight in response:
try:
logger.info(insight.put(table=table))
except Exception as e:
logger.exception(e)
errors += 1
continue
logger.info(f"\nerrors = {errors} & successfully uploaded {len(response)} items")
date_from: Optional[OpenWeatherInsight] = (
response.pop(0) if len(response) > 0 else None
)
date_from: Optional[datetime] = (
datetime.fromtimestamp(date_from.dt) if date_from else None
)
date_from: Optional[str] = (
date_from.strftime(DATETIME_FORMAT) if date_from else None
)
date_to: Optional[OpenWeatherInsight] = (
response.pop() if len(response) > 0 else date_from
)
date_to: Optional[datetime] = (
datetime.fromtimestamp(date_to.dt) if date_to else None
)
date_to: Optional[str] = date_to.strftime(DATETIME_FORMAT) if date_to else None
logger.info(f"extracted from {date_from} to {date_to}")
| StarcoderdataPython |
1793809 | # Generated by Django 2.2.6 on 2019-12-18 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('a1test', '0006_questiontype_kinds'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='time',
field=models.IntegerField(default=20),
),
]
| StarcoderdataPython |
3222831 | <filename>crowdsourcing/viewsets/task.py
from crowdsourcing.serializers.task import *
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from django.shortcuts import get_object_or_404
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.models import Task, TaskWorker, TaskWorkerResult, WorkerRequesterRating
from django.utils import timezone
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated
from crowdsourcing.permissions.task import HasExceededReservedLimit
from crowdsourcing.serializers.rating import WorkerRequesterRatingSerializer
from crowdsourcing.experimental_models import SubModule
from datetime import timedelta
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_task(self, request, id=None):
task_serializer = TaskSerializer(data=request.data)
task = self.get_object()
if task_serializer.is_valid():
task_serializer.update(task, task_serializer.validated_data)
return Response({'status': 'updated task'})
else:
return Response(task_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
module = request.query_params.get('module')
task = Task.objects.filter(module=module)
task_serialized = TaskSerializer(task, many=True)
return Response(task_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
task_serializer = TaskSerializer()
task = self.get_object()
task_serializer.delete(task)
return Response({'status': 'deleted task'})
@detail_route(methods=['get'])
def retrieve_with_data(self, request, *args, **kwargs):
task = self.get_object()
serializer = TaskSerializer(instance=task, fields=('id', 'task_template', 'module_data', 'status', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task.module.owner.profile.id,
origin_type='worker', module=task.module.id)
requester_alias = task.module.owner.alias
module = task.module.id
target = task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_module(self, request, **kwargs):
tasks = Task.objects.filter(module=request.query_params.get('module_id'))
task_serializer = TaskSerializer(instance=tasks, many=True, fields=('id', 'status',
'template_items_monitoring',
'task_workers_monitoring',
'has_comments', 'comments'))
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@list_route(methods=['get'])
def sample_by_submodule(self, request, **kwargs):
submodule = SubModule.objects.get(fake_module_id=request.query_params.get('fake_module_id'))
hours_before_results = submodule.hours_before_results
if submodule.created_timestamp + timedelta(hours=submodule.hours_before_results) <= timezone.now():
results_per_round = submodule.results_per_round
round_exp = submodule.round_exp
sample = len(submodule.taskworkers) == 0
pool = submodule.owner.pool
tasks = Task.objects.filter(module=submodule.origin_module.id)
task_serializer = TaskSerializer(instance=tasks, many=True,
context={'requester': request.user.userprofile.id, 'submodule': submodule.id,
'round_exp': round_exp, 'results_per_round': results_per_round,
'sample': sample, 'pool': pool},
fields=('id', 'status', 'template_items_monitoring', 'has_comments',
'comments', 'task_workers_sampled'))
for task in task_serializer.data:
task['task_workers_monitoring'] = task['task_workers_sampled']
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
else:
return Response([], status.HTTP_200_OK)
@detail_route(methods=['get'])
def list_comments(self, request, **kwargs):
comments = models.TaskComment.objects.filter(task=kwargs['pk'])
serializer = TaskCommentSerializer(instance=comments, many=True, fields=('comment', 'id',))
response_data = {
'task': kwargs['pk'],
'comments': serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@detail_route(methods=['post'])
def post_comment(self, request, **kwargs):
serializer = TaskCommentSerializer(data=request.data)
task_comment_data = {}
if serializer.is_valid():
comment = serializer.create(task=kwargs['pk'], sender=request.user.userprofile)
task_comment_data = TaskCommentSerializer(comment, fields=('id', 'comment',)).data
return Response(task_comment_data, status.HTTP_200_OK)
class TaskWorkerViewSet(viewsets.ModelViewSet):
queryset = TaskWorker.objects.all()
serializer_class = TaskWorkerSerializer
permission_classes = [IsAuthenticated, HasExceededReservedLimit]
lookup_field = 'task__id'
def create(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer(data=request.data)
if serializer.is_valid():
instance, http_status = serializer.create(worker=request.user.userprofile.worker,
module=request.data.get('module', None))
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer()
obj = self.queryset.get(task=kwargs['task__id'], worker=request.user.userprofile.worker.id)
instance, http_status = serializer.create(worker=request.user.userprofile.worker, module=obj.task.module_id)
obj.task_status = 6
obj.save()
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
@list_route(methods=['post'])
def bulk_update_status(self, request, *args, **kwargs):
task_status = request.data.get('task_status', -1)
task_workers = TaskWorker.objects.filter(id__in=tuple(request.data.get('task_workers', [])))
task_workers.update(task_status=task_status, last_updated=timezone.now())
return Response(TaskWorkerSerializer(instance=task_workers, many=True,
fields=('id', 'task', 'task_status', 'task_worker_results_monitoring',
'worker_alias', 'updated_delta')).data, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_status(self, request, *args, **kwargs):
status_map = {1: 'In Progress', 2: 'Submitted', 3: 'Accepted', 4: 'Rejected', 5: 'Returned'}
response = dict()
for key, value in status_map.iteritems():
task_workers = TaskWorker.objects.filter(worker=request.user.userprofile.worker, task_status=key)
serializer = TaskWorkerSerializer(instance=task_workers, many=True,
fields=(
'id', 'task_status', 'task', 'requester_alias', 'module', 'project_name',
'is_paid', 'last_updated'))
response[value] = serializer.data
return Response(response, status.HTTP_200_OK)
@detail_route(methods=['get'])
def retrieve_with_data_and_results(self, request, *args, **kwargs):
task_worker = TaskWorker.objects.get(id=request.query_params['id'])
serializer = TaskWorkerSerializer(instance=task_worker,
fields=('task', 'task_status', 'task_template', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task_worker.task.module.owner.profile.id,
origin_type='worker', module=task_worker.task.module.id)
requester_alias = task_worker.task.module.owner.alias
module = task_worker.task.module.id
target = task_worker.task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['post'])
def drop_saved_tasks(self, request, *args, **kwargs):
task_ids = request.data.get('task_ids', [])
self.queryset.filter(task_id__in=task_ids, worker=request.user.userprofile.worker.id).update(
task_status=6, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
@list_route(methods=['post'])
def bulk_pay_by_module(self, request, *args, **kwargs):
module = request.data.get('module')
accepted, rejected = 3, 4
task_workers = TaskWorker.objects.filter(task__module=module).filter(
Q(task_status=accepted) | Q(task_status=rejected))
task_workers.update(is_paid=True, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
class TaskWorkerResultViewSet(viewsets.ModelViewSet):
queryset = TaskWorkerResult.objects.all()
serializer_class = TaskWorkerResultSerializer
# permission_classes = [IsOwnerOrReadOnly]
def update(self, request, *args, **kwargs):
task_worker_result_serializer = TaskWorkerResultSerializer(data=request.data)
task_worker_result = self.queryset.filter(id=kwargs['pk'])[0]
status = 1
if 'status' in request.data:
status = request.data['status']
task_worker_result.status = status
task_worker_result.save()
return Response("Success")
def retrieve(self, request, *args, **kwargs):
worker = get_object_or_404(self.queryset, worker=request.worker)
serializer = TaskWorkerResultSerializer(instance=worker)
return Response(serializer.data)
@list_route(methods=['post'], url_path="submit-results")
def submit_results(self, request, *args, **kwargs):
task = request.data.get('task', None)
template_items = request.data.get('template_items', [])
task_status = request.data.get('task_status', None)
saved = request.data.get('saved')
task_worker = TaskWorker.objects.get(worker=request.user.userprofile.worker, task=task)
task_worker.task_status = task_status
task_worker.save()
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=task_worker.id)
if task_status == 1:
serializer = TaskWorkerResultSerializer(data=template_items, many=True, partial=True)
else:
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=task_worker)
if task_status == 1 or saved:
return Response('Success', status.HTTP_200_OK)
elif task_status == 2 and not saved:
task_worker_serializer = TaskWorkerSerializer()
instance, http_status = task_worker_serializer.create(
worker=request.user.userprofile.worker, module=task_worker.task.module_id)
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class CurrencyViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Currency
queryset = Currency.objects.all()
serializer_class = CurrencySerializer
| StarcoderdataPython |
6563842 | <gh_stars>0
# coding=utf-8
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Tag(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '标签'
verbose_name_plural = '标签'
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Category(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '分类目录'
verbose_name_plural = '分类目录'
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Post(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '文章'
verbose_name_plural = '文章'
# 作者
author = models.ForeignKey(User)
# 标题
title = models.CharField(max_length=200)
# 正文
text = models.TextField()
# 标签
tags = models.ManyToManyField(Tag)
# 分类目录
category = models.ForeignKey(Category)
# 点击量
click = models.IntegerField(default=0)
# 创建时间
created_date = models.DateTimeField(default=timezone.now)
# 发布时间
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '评论'
verbose_name_plural = '评论'
author = models.CharField(max_length=20)
email = models.EmailField()
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
post = models.ForeignKey(Post)
def __str__(self):
return '{0}: {1}'.format(self.author, self.post.title)
class Evaluate(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '评分'
verbose_name_plural = '评分'
ip = models.CharField(max_length=40)
evaluate = models.IntegerField()
post = models.ForeignKey(Post)
def __str__(self):
return '{0}: {1}'.format(self.ip, self.evaluate)
class Page(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '页面'
verbose_name_plural = '页面'
# 作者
author = models.ForeignKey(User)
# 标题
title = models.CharField(max_length=200)
# 正文
text = models.TextField()
# 排列顺序
porder = models.IntegerField(default=0)
# 创建时间
created_date = models.DateTimeField(default=timezone.now)
# 发布时间
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| StarcoderdataPython |
1785249 | from trex.models.project import *
from trex.models.user import *
| StarcoderdataPython |
3517777 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 11:12:29 2019
@author: anna
Import the tilt and splay angle data.
make an histogram and fit the histogram with a gaussian.
use the range of the gaussian to fit the PMF
Compute the Kt and Kc as described in Phys. Chem. Chem. Phys. 2017, 19, 16806.
Additional Arguments: Estimate of apl of the disordered and ordered phase
"""
import numpy as np
from numpy import trapz
from scipy.optimize import curve_fit
import MDAnalysis
import matplotlib.pyplot as plt
import sys
apl_Ld = 0.6 #sys.argv[1]
apl_Lo = 0.4 #sys.argv[2]
top = 'ANALYSIS/recentered_x.gro'
traj = 'ANALYSIS/recentered_x.xtc'
u = MDAnalysis.Universe(top,traj)
def _gauss(x, *p):
A, mu, sigma = p
return A * np.exp(-(x - mu)**2 / (2. * sigma**2))
def _FitGaussian(bincenters, pa):
mu0 = np.sum(bincenters * pa) / np.sum(pa)
A0 = np.max(pa)
sigma0 = np.sqrt(np.sum(((bincenters - mu0)**2.0) * pa) / np.sum(pa))
# sigma0 = 0.1
#print(mu0, A0, sigma0)
(A, mu, sigma), v = curve_fit(_gauss, bincenters, pa, [A0, mu0, sigma0])
return A, mu, abs(sigma)
#
# =============================================================================
def _parabole(x, a, b, x0):
return a + (b) * (x-x0)**2.0
def first_quadrant(x):
if (x >= 90) :
x= 180 - x
else:
x= x
return x
def _FindIndexOfClosestValue(l, v):
return min(enumerate(l), key=lambda x: abs(x[1] - v))[0]
def _FitParabole(bincenters, fa, fitting_range):
first = _FindIndexOfClosestValue(bincenters, fitting_range[0])
last = _FindIndexOfClosestValue(bincenters, fitting_range[1])
mask = fa != np.inf
a = min(fa)
x0 = bincenters[np.argmin(fa)] #argmin return the indices of minimum value
xm = bincenters[mask][np.argmax(fa[mask])]
fm = max(fa[mask])
b = (fm - a) / (xm - x0)**2.0
r, v = curve_fit(_parabole, bincenters[first:last], fa[
first:last], [a, b, x0])
return r
def splay_modulus( leaflet, angles_in_radians, area_per_lipid, status,nbins=100, Plot=True):
""" compute the distribution of splay angles using an histogram """
histo, bins = np.histogram(angles_in_radians, bins= nbins , density=True) #bins=len(angles_first_quadrant)
bincenters = 0.5 * (bins[1:] + bins[:-1])
if status == "disordered":
cutoff = 35
g_range_sa = np.where(bincenters < np.radians(cutoff))[0]
A, mu, sigma = _FitGaussian(bincenters[g_range_sa], histo[g_range_sa])
else:
cutoff = 20
g_range_sa = np.where(bincenters < np.radians(cutoff))[0]
A, mu, sigma = _FitGaussian(bincenters[g_range_sa], histo[g_range_sa])
#plt.plot(bincenters_Lo, _gauss(bincenters_Lo, Ao, muo, sigmao ))
y=np.sin(bincenters)
Area=trapz(y, bincenters)
sin_normalized=y/Area
#plt.plot(bincenters_Ld, sin_normalized)
""" normlize the probability with the sin(theta) """
pa2 = histo / sin_normalized
""" PMF in KbT units """
PMF = -np.log(pa2)
#plt.plot(bincenters, PMF)
ranges = [ (max(mu - i * sigma, 0), mu + i * sigma)
for i in [ 1, 1.25, 1.5, 1.75, 2.0]]
print ("Using the following ranges to fit the PMF:", ranges)
res_list = [_FitParabole(bincenters, PMF, fitting_range)
for fitting_range in ranges]
K_list = [(2. * r[1])/ area_per_lipid for r in res_list]
DeltaK = np.std(K_list)
K = K_list[0]
if Plot:
fig, ax = plt.subplots(3, 1, sharex=True, sharey=False)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
ax[0].fill_between(bincenters, _gauss(bincenters,A, mu, sigma), alpha=0.5)
ax[0].plot(bincenters, histo)
xcoords = [mu - sigma, mu, mu + sigma]
for xc in xcoords:
ax[0].axvline(x=xc, linestyle='--')
ax[1].plot(bincenters, pa2,'-')
ax[2].plot(bincenters, PMF,'-')
ax[2].plot(bincenters, _parabole(bincenters, res_list[0][0],res_list[0][1], res_list[0][2] ), 'g--', label =r'$k$ = %3.1f $\pm$ %3.1f [$k_BT$]' %(K,DeltaK ))
ax[2].grid('True')
plt.xlim(0,np.pi/2)
plt.legend()
plt.savefig('ANALYSIS/tilts_local_normals/Splay_modulus_'+ str(leaflet)+ '_' + str(status) +'.png', dpi=300)
plt.savefig('ANALYSIS/tilts_local_normals/Splay_modulus_'+ str(leaflet)+ '_' + str(status) +'.svg')
return K, DeltaK, K_list
def tilt_modulus( leaflet, angles_in_radians, status, nbins=100, Plot=True):
"""
It will first fit a gaussian y=A exp[(x-mu)/sigma^2] to the distribution of tilts
to determine the fitting range then used to fit the corresponding potential of mean force (PMF).
Different fitting ranges are used to estimate the error on the extracted tilt modulus.
The function will calculate one tilt modulus for each lipid species and one splay modulus for each pair
of lipid species. It will then combine these to calculate the overall tilt modulus and splay modulus (bending rigidity).
More details about this procedure can be found in ref. [2]_
"""
""" set the angles in range [0,90] degrees """
angles_in_degree = np.degrees(angles_in_radians) #all_tilts[disordered_indx]
angles_first_quadrant = np.array([first_quadrant(x) for x in angles_in_degree])
""" compute the distribution of tilt angles using an histogram """
histo, bins = np.histogram(np.radians(angles_first_quadrant), bins= nbins , density=True) #bins=len(angles_first_quadrant)
bincenters = 0.5 * (bins[1:] + bins[:-1])
if status == "disordered":
cutoff = 30
g_range_sa = np.where(bincenters < np.radians(cutoff))[0]
A, mu, sigma = _FitGaussian(bincenters[g_range_sa], histo[g_range_sa])
else:
cutoff = 30
g_range_sa = np.where(bincenters < np.radians(cutoff))[0]
A, mu, sigma = _FitGaussian(bincenters[g_range_sa], histo[g_range_sa])
#plt.plot(bincenters_Lo, _gauss(bincenters_Lo, Ao, muo, sigmao ))
y=np.sin(bincenters)
Area=trapz(y, bincenters)
sin_normalized=y/Area
#plt.plot(bincenters_Ld, sin_normalized)
""" normlize the probability with the sin(theta) """
pa2 = histo / sin_normalized
""" PMF in KbT units """
PMF = -np.log(pa2)
#plt.plot(bincenters, PMF)
ranges = [ (max(mu - i * sigma, 0), mu + i * sigma)
for i in [ 1, 1.25, 1.5, 1.75, 2.0]]
print ("Using the following ranges to fit the PMF:", ranges)
res_list = [_FitParabole(bincenters, PMF, fitting_range)
for fitting_range in ranges]
K_list = [(2. * r[1]) for r in res_list]
DeltaK = np.std(K_list)
K = K_list[0]
if Plot:
fig, ax = plt.subplots(3, 1, sharex=True, sharey=False)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
ax[0].fill_between(bincenters, _gauss(bincenters,A, mu, sigma), alpha=0.5)
ax[0].plot(bincenters, histo)
xcoords = [mu - sigma, mu, mu + sigma]
for xc in xcoords:
ax[0].axvline(x=xc, linestyle='--')
#ax[0].plot(X_plot[:, 0], _gauss(bincenters,A_test2, mu_test2, test_sigma2 ),'-')
ax[1].plot(bincenters, pa2,'-')
ax[2].plot(bincenters, PMF,'-')
ax[2].plot(bincenters, _parabole(bincenters, res_list[0][0],res_list[0][1], res_list[0][2] ), 'g--', label =r'$k_t$ = %3.1f $\pm$ %3.1f [$k_BT/ nm^2$]' %(K,DeltaK ))
ax[2].grid('True')
plt.xlim(0,np.pi/2)
plt.legend()
plt.savefig('ANALYSIS/tilts_local_normals/Tilt_modulus_'+ str(leaflet)+ '_' + str(status)+ '.png', dpi=300)
plt.savefig('ANALYSIS/tilts_local_normals/Tilt_modulus_'+ str(leaflet)+ '_' + str(status)+ '.svg')
return K, DeltaK, K_list
##======== better using the arctan2 method ==================================##
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
import numpy.linalg as la
def compute_angle(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'
The sign of the angle is dependent on the order of v1 and v2
so acos(norm(dot(v1, v2))) does not work and atan2 has to be used, see:
https://stackoverflow.com/questions/21483999/using-atan2-to-find-angle-between-two-vectors
"""
cosang = np.dot(v1, v2)
sinang = la.norm(np.cross(v1, v2))
angle = np.arctan2(sinang, cosang)
return angle
def compute_splays(first_neighbors_splay, time, all_tilts_vect_upper):
angles_splay = np.zeros(( len(first_neighbors_splay[0]), 4))
time = np.full(len(first_neighbors_splay[0]), time)
for i in range(len(first_neighbors_splay[0])):
angles_splay[i, :] = compute_angle(all_tilts_vect_upper[first_neighbors_splay[0][i]], all_tilts_vect_upper[first_neighbors_splay[1][i]]), first_neighbors_splay[0][i], first_neighbors_splay[1][i], time[i]
return angles_splay
###=======================Main ==============================================######
Kb= 0.0083144621
T =298
input_dir = "ANALYSIS/directors/"
input_tilts_dir = "ANALYSIS/tilts_local_normals/"
input_phase_assignment="ANALYSIS/directors/plots/"
assigned_up_all = []
assigned_down_all = []
leaflet = 'upper'
import pandas as pd
assignment_up_all = []
assignment_down_all = []
appended_data_up = []
appended_data_down = []
for ts in range (0,u.trajectory.n_frames,1) :
infile_up = 'ANALYSIS/directors/Dataframeup'+ str(ts)
data_up = pd.read_pickle(infile_up)
# store DataFrame in list
appended_data_up.append(data_up)
# see pd.concat documentation for more info
Data_up = pd.concat(appended_data_up)
infile_down = 'ANALYSIS/directors/Dataframedown'+ str(ts)
data_down = pd.read_pickle(infile_down)
# store DataFrame in list
appended_data_down.append(data_down)
# see pd.concat documentation for more info
Data_down = pd.concat(appended_data_down)
""" read in the Lo/Ld assignment: ATTENTION: for the lipids you have saved the value two times(one time for chain): CLEAN UP!
taking only one value per chain!
Assignment : 1 = Lo, 0 = Ld
"""
assignment_up = np.load(input_phase_assignment + 'resid_phases'+ 'upper' +'.'+ str(ts) + '.npy')
assignment_down = np.load(input_phase_assignment + 'resid_phases'+ 'lower' +'.'+ str(ts) + '.npy')
chl_res_up = np.load(input_dir + 'cholesterol_'+'upper'+'_tail_' + str(ts) + '.npy')
dlipc_res_up = np.load(input_dir + 'dlipc_' + 'upper'+'_tail_' + str(ts) + '.npy')
dspc_res_up = np.load(input_dir + 'dspc_' + 'upper'+'_tail_' + str(ts) + '.npy')
ssm_res_up = np.load(input_dir + 'ssm_' + 'upper'+'_tail_' + str(ts) + '.npy')
chl_res_down = np.load(input_dir + 'cholesterol_'+'lower'+'_tail_' + str(ts) + '.npy')
dlipc_res_down = np.load(input_dir + 'dlipc_' + 'lower'+'_tail_' + str(ts) + '.npy')
dspc_res_down = np.load(input_dir + 'dspc_' + 'lower'+'_tail_' + str(ts) + '.npy')
ssm_res_down = np.load(input_dir + 'ssm_' + 'lower'+'_tail_' + str(ts) + '.npy')
cleaned_assignment_up = np.vstack((assignment_up[0:len(chl_res_up) + len(dlipc_res_up)],
assignment_up[len(chl_res_up) + len(dlipc_res_up)*2 : len(chl_res_up) + len(dlipc_res_up)*2 +len(ssm_res_up)],
assignment_up[len(chl_res_up) + len(dlipc_res_up)*2 + len(ssm_res_up)*2 : len(chl_res_up) + len(dlipc_res_up)*2 +len(ssm_res_up)*2 + len(dspc_res_up)] ))
assigned_up_all.append(cleaned_assignment_up)
cleaned_assignment_down = np.vstack((assignment_down[0:len(chl_res_down) + len(dlipc_res_down)],
assignment_down[len(chl_res_down) + len(dlipc_res_down)*2 : len(chl_res_down) + len(dlipc_res_down)*2 +len(ssm_res_down)],
assignment_down[len(chl_res_down) + len(dlipc_res_down)*2 + len(ssm_res_down)*2 : len(chl_res_down) + len(dlipc_res_down)*2 +len(ssm_res_down)*2 + len(dspc_res_down)] ))
assigned_down_all.append(cleaned_assignment_down)
assignment_down_all.append(cleaned_assignment_down)
ass_down_all = np.vstack((assigned_down_all))
ass_up_all = np.vstack((assigned_up_all))
Data_down['Assign'] = ass_down_all[:,1]
Data_up['Assign'] = ass_up_all[:,1]
Data_up_Lo = Data_up[Data_up['Assign'] ==1]
Data_down_Lo = Data_down[Data_down['Assign'] ==1]
Data_up_Ld = Data_up[Data_up['Assign'] ==0]
Data_down_Ld = Data_down[Data_down['Assign'] ==0]
try:
disordered_Kc_up = splay_modulus('up', Data_up_Ld['Splay'].values, area_per_lipid= apl_Ld, status="disordered", Plot=True, nbins=10 )
except Exception as e:
print(e)
try:
ordered_Kc_up = splay_modulus('up', Data_up_Lo['Splay'].values, area_per_lipid= apl_Lo, status="ordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
try:
disordered_Kt_up = tilt_modulus('up', Data_up_Ld['Tilt_angles'].values , status="disordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
try:
ordered_Kt_up = tilt_modulus('up', Data_up_Lo['Tilt_angles'].values, status="ordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
try:
disordered_Kc_down = splay_modulus('down', Data_down_Ld['Splay'].values, area_per_lipid= apl_Ld, status="disordered", Plot=True, nbins=10 )
except Exception as e:
print(e)
try:
ordered_Kc_down = splay_modulus('down', Data_down_Lo['Splay'].values, area_per_lipid= apl_Lo, status="ordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
try:
disordered_Kt_down = tilt_modulus('down', Data_down_Ld['Tilt_angles'].values , status="disordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
try:
ordered_Kt_down = tilt_modulus('down', Data_down_Lo['Tilt_angles'].values, status="ordered", Plot=True, nbins=20 )
except Exception as e:
print(e)
| StarcoderdataPython |
8138799 | <reponame>ryanjwise/free-speech
import os
def get_input():
user_input = input("What would you like to say?")
return user_input
def play_input(input):
os.system(f"espeak '{input}'")
def app_loop():
loop = True
while loop:
user_input = get_input()
play_input(user_input)
os.system('clear')
if user_input == "exit":
loop = False
print("Goodbye")
app_loop() | StarcoderdataPython |
5193615 | <reponame>david58/gradertools
import os
from .compile_python import CompilerPython
from .compile_cpp import CompilerCpp
#from ..isolation.isolate import Isolate
class Compile:
def __init__(self, sourcepath, compiler, isolator=None):
if compiler == 'python':
Compiler = CompilerPython
elif compiler == 'cpp':
Compiler = CompilerCpp
else:
raise Exception('Unknown Language')
# if isolator is None:
# self._isol = Isolate()
# else:
self._isol = isolator
self._comp = Compiler(sourcepath)
@property
def binarypath(self):
return self._comp.get_binarypath()
@property
def status(self):
return self._comp.get_status()
@property
def errormessage(self):
return self._comp.get_error()
def compile(self):
self._comp.compile(self._isol)
| StarcoderdataPython |
6432882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, "../")
import threading
import time
import logging
import snakemq
import snakemq.link
import snakemq.packeter
import snakemq.messaging
import snakemq.queues
import snakemq.rpc
class B(object):
def wer(self):
print("wer")
def f():
time.sleep(1)
c = None
while True:
if list(m._conn_by_ident.keys()):
c = 1
if c:
try:
print(proxy.get_fo())
except Exception as exc:
print("remote traceback", str(exc.__remote_traceback__))
s.stop()
time.sleep(2)
snakemq.init_logging()
logger = logging.getLogger("snakemq")
logger.setLevel(logging.DEBUG)
s = snakemq.link.Link()
s.add_connector(("localhost", 4000))
tr = snakemq.packeter.Packeter(s)
m = snakemq.messaging.Messaging("soldier", "", tr, None)
t = threading.Thread(target=f)
t.setDaemon(1)
t.start()
rh = snakemq.messaging.ReceiveHook(m)
crpc = snakemq.rpc.RpcClient(rh)
srpc = snakemq.rpc.RpcServer(rh)
srpc.register_object(B(), "b")
proxy = crpc.get_proxy("boss", "abc")
proxy.mysignal.as_signal(10)
proxy.mysignal()
s.loop()
| StarcoderdataPython |
6606027 | from func.firebase_init import db
from func.blackjack import *
from disnake.ext.commands import Param
from disnake.ext import commands
import disnake
class BJ(disnake.ui.View):
def __init__(self, og_inter: disnake.MessageInteraction, bet: int, user_money: int):
super().__init__()
self.result = None # lose: -1; tie: 0; win: 1; user BJ: 2
self.action = None
self.og_inter = og_inter
self.bet = bet
self.user_money = user_money
self.deck = new_deck()
self.player, self.dealer = deal_first_hand(self.deck)
@disnake.ui.button(label="Stand", style=disnake.ButtonStyle.gray)
async def stand(self, button: disnake.ui.Button, inter: disnake.MessageInteraction):
if self.og_inter.author.id != inter.author.id:
return await inter.response.send_message("You are NOT allowed to do this..", ephemeral=True)
self.action = 20
while sum(self.dealer) <= 16:
self.dealer.append(self.deck.pop(0))
self.stop()
await self.og_inter.edit_original_message(embed=generate_game_embed(self), view=None)
return db.child('users').child(self.og_inter.author.id).update({'money': get_result_money(self)})
@disnake.ui.button(label="Hit", style=disnake.ButtonStyle.success)
async def hit(self, button: disnake.ui.Button, inter: disnake.MessageInteraction):
if self.og_inter.author.id != inter.author.id:
return await inter.response.send_message("You are NOT allowed to do this..", ephemeral=True)
self.action = 30
self.player.append(self.deck.pop(0))
if sum(self.player) > 21 or sum(self.player) == 21:
self.stop()
await self.og_inter.edit_original_message(embed=generate_game_embed(self), view=None)
return db.child('users').child(self.og_inter.author.id).update({'money': get_result_money(self)})
await self.og_inter.edit_original_message(embed=generate_game_embed(self), view=self)
class BlackJack(commands.Cog):
def __init__(self, client):
"""Blackjack game."""
self.client = client
@commands.slash_command(name="blackjack", description="Game of Black Jack")
async def _blackjack(self, inter: disnake.MessageInteraction, bet: int = Param(..., desc="Place your bet!")):
user_money = db.child('users').child(inter.author.id).child('money').get().val()
# user_money = 10
if bet <= 0:
return await inter.response.send_message("You can't do that, and you know it..", ephemeral=True)
if bet > user_money:
message = f"You cannot bet more than you have.. (You have {user_money:,} monies)".replace(',', ' ')
return await inter.response.send_message(message, ephemeral=True)
game = BJ(inter, bet, user_money)
if sum(game.player) == sum(game.dealer) and sum(game.player) == 21: # Tie
game.action = 0
game.result = 0
elif sum(game.player) != sum(game.dealer) and sum(game.player) == 21: # Player W
game.action = 1
game.result = 0
elif sum(game.dealer) == 21: # Dealer W
game.action = 2
game.result = -1
if game.action is not None:
await inter.response.send_message(embed=generate_game_embed(game))
return db.child('users').child(inter.author.id).update({'money': get_result_money(game)})
game.action = 69
await inter.response.send_message(embed=generate_game_embed(game), view=game)
def setup(client):
client.add_cog(BlackJack(client))
| StarcoderdataPython |
1652060 | <reponame>lejion/django-sagepaypi<filename>sagepaypi/urls.py
from django.urls import path
from sagepaypi import views
app_name = 'sagepaypi'
urlpatterns = [
path(
'transactions/<tidb64>/<token>/3d-secure/complete/',
views.Complete3DSecureView.as_view(),
name='complete_3d_secure'
)
]
| StarcoderdataPython |
6579542 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
TimeSeriesProcessingEntryPoints.SlidingWindowTransform
"""
import numbers
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def timeseriesprocessingentrypoints_slidingwindowtransform(
source,
data,
name,
output_data=None,
model=None,
window_size=2,
lag=1,
begin='NaNValues',
**params):
"""
**Description**
Returns the last values for a time series [y(t-d-l+1), y(t-d-l+2),
..., y(t-l-1), y(t-l)] where d is the size of the window, l
the lag and y is a Float.
:param source: The name of the source column (inputs).
:param data: Input dataset (inputs).
:param name: The name of the new column (inputs).
:param window_size: The size of the sliding window for computing
the moving average (inputs).
:param lag: Lag between current observation and last observation
from the sliding window (inputs).
:param begin: Define how to populate the first rows of the
produced series (inputs).
:param output_data: Transformed dataset (outputs).
:param model: Transform model (outputs).
"""
entrypoint_name = 'TimeSeriesProcessingEntryPoints.SlidingWindowTransform'
inputs = {}
outputs = {}
if source is not None:
inputs['Source'] = try_set(
obj=source,
none_acceptable=False,
is_of_type=str,
is_column=True)
if data is not None:
inputs['Data'] = try_set(
obj=data,
none_acceptable=False,
is_of_type=str)
if name is not None:
inputs['Name'] = try_set(
obj=name,
none_acceptable=False,
is_of_type=str,
is_column=True)
if window_size is not None:
inputs['WindowSize'] = try_set(
obj=window_size,
none_acceptable=True,
is_of_type=numbers.Real)
if lag is not None:
inputs['Lag'] = try_set(
obj=lag,
none_acceptable=True,
is_of_type=numbers.Real)
if begin is not None:
inputs['Begin'] = try_set(
obj=begin,
none_acceptable=True,
is_of_type=str,
values=[
'NaNValues',
'FirstValue'])
if output_data is not None:
outputs['OutputData'] = try_set(
obj=output_data,
none_acceptable=False,
is_of_type=str)
if model is not None:
outputs['Model'] = try_set(
obj=model,
none_acceptable=False,
is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
| StarcoderdataPython |
1624478 | <reponame>kanglicheng/learn-python-2020<filename>stephen/week1.py
"""
Chooses a random integer in [0, 100]. Asks user to enter a guess,
terminates only when user guesses correctly
"""
import random
def guessing_game():
number = random.randint(1, 5)
while True:
guess = int(
input("Please enter a number between 1 and 5 (inclusive) "))
if guess == number:
return "correct!"
# print(guessing_game())
"""
implement sum function
sum(a, b, c, d, ... m, n) = a+b+c+d+...+m+n
"""
def mysum(*numbers):
total = 0
for number in numbers:
total += number
return total
#print(mysum(1, 2, 3, 4, 5))
def get_avg():
# taking number from user enter number through string message
# stores them in an array and then averages values in array5
total = 0
count = 0
while True:
user_input = input("enter a number: ")
try:
n = int(user_input)
count += 1
except:
break
total += n
if count > 0:
print(total/count)
return
get_avg()
| StarcoderdataPython |
3598588 | import json, logging, os, psutil, requests, sys, traceback
from datetime import datetime
from ischedule import schedule, run_loop
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
from os import environ
def create_logger() :
log = logging.getLogger('')
log.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(format)
log.addHandler(ch)
return log
log = create_logger()
pid = os.getpid()
def die():
log.fatal("Dying.")
thisApp = psutil.Process(pid)
thisApp.terminate()
def parse_metrics_config(config_line):
if not config_line or config_line.strip() == "":
log.fatal("No metrics found in config. At least 1 metric must be configured!")
die()
parts = config_line.split(",")
result = []
for part in parts:
if part.strip() == "":
continue
result.append(part.strip())
if len(result) < 1 or len(result) > 5:
log.fatal("At least one metric is required and no more than 5 are allowed. Found %d." % len(result))
die()
return result
def construct_data_url(base_url, metrics):
result = base_url
for metric in metrics:
log.info("base_url => [%s]; Result => [%s]; metric => [%s]" % (base_url, result, metric))
result = "%s&metric=%s" % (result, metric)
return result
influx_url = environ.get('DOCKER_INFLUXDB_HOST')
influx_org = environ.get('DOCKER_INFLUXDB_INIT_ORG')
influx_bucket = environ.get('DOCKER_INFLUXDB_INIT_BUCKET')
influx_token = environ.get('DOCKER_INFLUXDB_INIT_ADMIN_TOKEN')
data_url = environ.get('DATA_URL')
fetch_interval_mins = float(environ.get('FETCH_INTERVAL_MINUTES'))
fetch_interval_seconds = fetch_interval_mins * 60
json_archive_path = os.path.abspath(environ.get('JSON_ARCHIVE_DIR'))
metrics_str = environ.get('DATA_METRICS')
metrics = parse_metrics_config(metrics_str)
data_url = construct_data_url(data_url, metrics)
log.info("Initialising UK Covid Data Fetcher on PID %s ..." % pid)
log.info("Fetch poll interval set to %s minutes (%s seconds)." % (fetch_interval_mins, fetch_interval_seconds))
if not data_url:
log.fatal("Missing data API URL to fetch data from.")
log.info("Influx DB URL [%s], org [%s], bucket [%s]." % (influx_url, influx_org, influx_bucket))
log.info("API responses will be archived under directory: [%s]." % json_archive_path)
def save_covid_data(data_json):
log.info("Saving...")
influx_client = InfluxDBClient(url=influx_url, token=influx_token, org=influx_org)
influx_write_api = influx_client.write_api(write_options=SYNCHRONOUS)
json_records = data_json['body']
for json_record in json_records:
timestamp = datetime.strptime(json_record['date'], '%Y-%m-%d')
record = Point("uk_covid_day") \
.time(timestamp) \
.tag("areaType", json_record['areaType']) \
.tag("areaCode", json_record['areaCode']) \
.tag("areaName", json_record['areaName'])
for metric in metrics:
record = record.field(metric, json_record[metric])
influx_write_api.write(bucket=influx_bucket, record=record)
influx_write_api.close()
influx_client.close()
log.info("Latest data persisted to Influx DB.")
def build_save_path():
return "%s/%s.json" % (json_archive_path, datetime.today().strftime('%Y-%m-%d_%H%M%S'))
def fetch_data():
log.info("Fetching data from: [%s]" % data_url)
response = None
try:
response = requests.get(data_url)
except Exception as e:
log.error(traceback.format_exc())
die()
status_code = response.status_code
log.info("Received status code: %s" % status_code)
log.debug("Received output: %s" % response.text)
json_output = response.json()
save_path = build_save_path()
with open(save_path, 'w', encoding='utf-8') as f:
json.dump(json_output, f, ensure_ascii=False, indent = 4)
log.info("Data archived under: [%s]" % save_path)
save_covid_data(json_output)
fetch_data()
schedule(fetch_data, interval=fetch_interval_seconds)
run_loop() | StarcoderdataPython |
9610890 | <reponame>guanghuixu/multi-model-forgetting<gh_stars>10-100
from collections import defaultdict, deque
import os
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
# from torch.tensor import ones
from models.cnn_layers import CNN_LAYER_CREATION_FUNCTIONS, initialize_layers_weights, get_cnn_layer_with_names
from scipy.special import expit, logit
from typing import List
from models.shared_base import *
from utils import get_logger, get_variable, keydefaultdict
logger = get_logger()
def node_to_key(node):
idx, jdx, _type = node
if isinstance(_type, str):
return f'{idx}-{jdx}-{_type}'
else:
return f'{idx}-{jdx}-{_type.__name__}'
def dag_to_keys(dag):
return [node_to_key(node) for node in dag]
class Architecture:
"""Represents some hyperparameters of the architecture requested.
final_filter_size is the number of filters of the cell before the output layer.
Each reduction filter doubles the number of filters (as it halves the width and height)
There are num_modules modules stacked together.
Each module except for the final one is made up of num_repeat_normal normal Cells followed by a reduction cell.
The final layer doesn't have the reduction cell.
"""
def __init__(self, final_filter_size, num_repeat_normal, num_modules):
self.final_filter_size = final_filter_size
self.num_repeat_normal = num_repeat_normal
self.num_modules = num_modules
class CNN(SharedModel):
"""Represents a Meta-Convolutional network made up of Meta-Convolutional Cells.
Paths through the cells can be selected and moved to the gpu for training and evaluation.
Adapted from online code. need intense modification.
"""
def __init__(self, args, corpus):
"""
# input_channels, height, width, output_classes, gpu, num_cell_blocks,
# architecture=Architecture(final_filter_size=768 // 2, num_repeat_normal=6, num_modules=3)):
:param args: arguments
:param corpus: dataset
"""
super(CNN, self).__init__(args)
self.args = args
self.corpus = corpus
architecture = Architecture(final_filter_size=args.cnn_final_filter_size,
num_repeat_normal=args.cnn_num_repeat_normal,
num_modules=args.cnn_num_modules)
input_channels = args.cnn_input_channels
self.height = args.cnn_height
self.width = args.cnn_width
self.output_classes = args.output_classes
self.architecture = architecture
self.output_height = self.height
self.output_width = self.width
self.num_cell_blocks = args.num_blocks
self.cells = nn.Sequential()
self.reduce_cells = nn.Sequential()
self.normal_cells = nn.Sequential()
self.gpu = torch.device("cuda:0") if args.num_gpu > 0 else torch.device('cpu')
self.cpu_device = torch.device("cpu")
self.dag_variables_dict = {}
self.reducing_dag_variables_dict = {}
last_input_info = _CNNCell.InputInfo(input_channels=input_channels, input_width=self.width)
current_input_info = _CNNCell.InputInfo(input_channels=input_channels, input_width=self.width)
# count connections
temp_cell = _CNNCell(input_infos=[last_input_info, current_input_info],
output_channels=architecture.final_filter_size,
output_width=self.output_width, reducing=False, dag_vars=None,
num_cell_blocks=self.num_cell_blocks)
self.all_connections = list(temp_cell.connections.keys()) # as all possible connections.
self.dag_variables = torch.ones(len(self.all_connections), requires_grad=True, device=self.gpu)
self.reducing_dag_variables = torch.ones(len(self.all_connections), requires_grad=True, device=self.gpu)
for i, key in enumerate(self.all_connections):
self.dag_variables_dict[key] = self.dag_variables[i]
self.reducing_dag_variables_dict[key] = self.reducing_dag_variables[i]
cells = [('normal', architecture.final_filter_size)] * architecture.num_repeat_normal
current_filter_size = architecture.final_filter_size
for module in range(architecture.num_modules - 1):
cells.append(('reducing', current_filter_size))
current_filter_size //= 2
cells.extend([('normal', current_filter_size)] * architecture.num_repeat_normal)
cells.reverse()
for i, (type, num_filters) in enumerate(cells):
if type == 'reducing':
self.output_height /= 2
self.output_width /= 2
reducing = True
else:
reducing = False
assert (type == 'normal')
dag_vars = self.dag_variables_dict if reducing == False else self.reducing_dag_variables_dict
name = f'{i}-{type}-{num_filters}'
a_cell = _CNNCell(input_infos=[last_input_info, current_input_info],
output_channels=num_filters, output_width=self.output_width,
reducing=reducing, dag_vars=dag_vars, num_cell_blocks=self.num_cell_blocks,
args=self.args)
self.cells.add_module(name, a_cell)
# Registering for the WPL later.
if reducing:
self.reduce_cells.add_module(name, a_cell)
else:
self.normal_cells.add_module(name, a_cell)
last_input_info, current_input_info = current_input_info, _CNNCell.InputInfo(input_channels=num_filters,
input_width=self.output_width)
if self.output_classes:
self.conv_output_size = self.output_height * self.output_width * self.architecture.final_filter_size
self.out_layer = nn.Linear(self.conv_output_size, self.output_classes)
torch.nn.init.kaiming_normal_(self.out_layer.weight, mode='fan_out', nonlinearity='relu')
torch.nn.init.constant_(self.out_layer.bias, 0)
self.out_layer.to(self.gpu)
parent_counts = [0] * (2 + self.num_cell_blocks)
for idx, jdx, _type in self.all_connections:
parent_counts[jdx] += 1
probs = np.array(list(2 / parent_counts[jdx] for idx, jdx, _type in self.all_connections))
self.dags_logits = (logit(probs), logit(probs))
self.target_ave_prob = np.mean(probs)
self.cell_dags = ([], [])
self.ignore_module_keys = ['cell', 'out_layer']
self.wpl_monitored_modules = self.cells._modules
self.init_wpl_weights()
def forward(self, inputs,
dag,
is_train=True,
hidden=None
):
"""
:param cell_dags: (normal_cell_dag, reduction_cell_dag)
:param inputs: [last_input, current_input]
:param hidden: don't care. legacy for RNN.
"""
cell_dag, reducing_cell_dag = dag or self.cell_dags
# cell_dag, reducing_cell_dag = dag # support the dynamic dags.
is_train = is_train and self.args.mode in ['train'] # add here for behaviors differs from train and test.
last_input, current_input = inputs, inputs
for cell in self.cells:
if cell.reducing:
dag = reducing_cell_dag
else:
dag = cell_dag
output, extra_out = cell(dag, last_input, current_input)
last_input, current_input = current_input, output
x = output.view(-1, self.conv_output_size)
x = self.out_layer(x)
return x, extra_out
def get_f(self, name):
""" Get the cell structure """
name = name.lower()
# return f
raise NotImplementedError
def get_num_cell_parameters(self, dag):
"""
Returns the parameters of the path through the Meta-network given by the dag.
:param dag: a list of [normal_dag, reduce_dag]
return parameters.
"""
dag, reducing_dag = dag
params = []
for cell in self.cells:
if cell.reducing:
d = reducing_dag
else:
d = dag
params.extend(cell.get_parameters(d))
# return params
raise NotImplementedError
def get_parameters(self, dags):
""" return the parameter of given dags """
dag, reducing_dag = dags
params = []
for cell in self.cells:
if cell.reducing:
d = reducing_dag
else:
d = dag
params.extend(cell.get_parameters(d))
return params
def reset_parameters(self):
""" reset all parameters ? """
params = self.get_parameters(self.cell_dags)
raise NotImplementedError('reset not implemented')
def update_dag_logits(self, gradient_dicts, weight_decay, max_grad=0.1):
"""
Updates the probabilities of each path being selected using the given gradients.
"""
dag_probs = tuple(expit(logit) for logit in self.dags_logits)
current_average_dag_probs = tuple(np.mean(prob) for prob in dag_probs)
for i, key in enumerate(self.all_connections):
for grad_dict, current_average_dag_prob, dag_logits in zip(gradient_dicts, current_average_dag_probs,
self.dags_logits):
if key in grad_dict:
grad = grad_dict[key] - weight_decay * (
current_average_dag_prob - self.target_ave_prob) # *expit(dag_logits[i])
deriv = sigmoid_derivitive(dag_logits[i])
logit_grad = grad * deriv
dag_logits[i] += np.clip(logit_grad, -max_grad, max_grad)
def get_dags_probs(self):
"""Returns the current probability of each path being selected.
Each index corresponds to the connection in self.all_connections
"""
return tuple(expit(logits) for logits in self.dags_logits)
def __to_device(self, device, cell_dags):
cell_dag, reducing_cell_dag = cell_dags
for cell in self.cells:
if cell.reducing:
cell.to_device(device, reducing_cell_dag)
else:
cell.to_device(device, cell_dag)
def set_dags(self, new_cell_dags=([], [])):
"""
Sets the current active path. Moves other variables to the cpu to save gpu memory.
:param new_cell_dags: (normal_cell_dag, reduction_cell_dag)
"""
new_cell_dags = tuple(list(sorted(cell_dag)) for cell_dag in new_cell_dags)
set_cell_dags = [set(cell_dag) for cell_dag in new_cell_dags]
last_set_cell_dags = [set(cell_dag) for cell_dag in self.cell_dags]
cell_dags_to_cpu = [last_set_cell_dag - set_cell_dag
for last_set_cell_dag, set_cell_dag in zip(last_set_cell_dags, set_cell_dags)]
cell_dags_to_gpu = [set_cell_dag - last_set_cell_dag
for last_set_cell_dag, set_cell_dag in zip(last_set_cell_dags, set_cell_dags)]
self.__to_device(self.cpu_device, cell_dags_to_cpu)
self.__to_device(self.gpu, cell_dags_to_gpu)
self.cell_dags = new_cell_dags
# doing this is very important for grouping all the cells and unified the process.
# maybe can move this to outer cells.
# def init_wpl_weights(self):
# """
# Init for WPL operations.
#
# NOTE: only take care of all the weights in self._modules, and others.
# for self parameters and operations, please override later.
#
# :return:
# """
# for cell in self.cells:
# if isinstance(cell, WPLModule):
# cell.init_wpl_weights()
#
# def set_fisher_zero(self):
# for cell in self.cells:
# if isinstance(cell, WPLModule):
# cell.set_fisher_zero()
#
# def update_optimal_weights(self):
# """ Update the weights with optimal """
# for cell in self.cells:
# if isinstance(cell, WPLModule):
# cell.update_optimal_weights()
def update_fisher(self, dags):
""" logic is different here, for dags, update all the cells registered. """
normal, reduce = dags
for cell in self.cells:
if cell.reducing:
d = reduce
else:
d = normal
cell.update_fisher(d)
def compute_weight_plastic_loss_with_update_fisher(self, dags):
loss = 0
normal, reduce = dags
for cell in self.cells:
if cell.reducing:
d = reduce
else:
d = normal
loss += cell.compute_weight_plastic_loss_with_update_fisher(d)
return loss
# Represents a Meta-Convolutional cell. It generates a possible forward connection between
# every layer except between the input layers of every type in CNN_LAYER_CREATION_FUNCTIONS
# Any path can then be chose to run and train with
class _CNNCell(WPLModule):
class InputInfo:
def __init__(self, input_channels, input_width):
self.input_channels = input_channels
self.input_width = input_width
def __init__(self, input_infos: List[InputInfo],
output_channels, output_width,
reducing, dag_vars, num_cell_blocks,
args=None):
super().__init__(args)
self.input_infos = input_infos
self.num_inputs = len(self.input_infos)
self.num_cell_blocks = num_cell_blocks
num_outputs = self.num_inputs + num_cell_blocks
self.output_channels = output_channels
self.output_width = output_width
self.reducing = reducing
self.dag_vars = dag_vars
self.connections = dict()
# self._connections = nn.ModuleList()
for idx in range(num_outputs - 1):
for jdx in range(max(idx + 1, self.num_inputs), num_outputs):
for _type, type_name in get_cnn_layer_with_names():
if idx < self.num_inputs:
input_info = self.input_infos[idx]
if input_info.input_width != output_width:
assert (input_info.input_width / 2 == output_width)
stride = 2
else:
stride = 1
in_planes = input_info.input_channels
else:
stride = 1
in_planes = output_channels
out_planes = output_channels
try:
self.connections[(idx, jdx, type_name)] = _type(in_planes=in_planes, out_planes=out_planes,
stride=stride)
except RuntimeError as e:
logger.error(f'Identity Matching error {e}')
initialize_layers_weights(self.connections[(idx, jdx, type_name)])
self.add_module(node_to_key((idx, jdx, type_name)), self.connections[(idx, jdx, type_name)])
self.init_wpl_weights()
def forward(self, dag, *inputs):
"""
Define the actual CELL of one CNN structure.
:param dag:
:param inputs:
:return:
output: whatever output this mean
extra_out: dict{string_keys}: to output additional variable/Tensors for regularization.
"""
assert (len(inputs) == self.num_inputs)
inputs = list(inputs)
inputs = inputs + self.num_cell_blocks * [None]
outputs = [0] * (self.num_inputs + self.num_cell_blocks)
num_inputs = [0] * (self.num_inputs + self.num_cell_blocks)
inputs_relu = [None] * (self.num_inputs + self.num_cell_blocks)
for source, target, _type in dag:
key = (source, target, _type)
conn = self.connections[key]
if inputs[source] is None:
outputs[source] /= num_inputs[source]
inputs[source] = outputs[source]
layer_input = inputs[source]
if hasattr(conn, 'input_relu') and conn.input_relu:
if inputs_relu[source] is None:
inputs_relu[source] = torch.nn.functional.relu(layer_input)
layer_input = inputs_relu[source]
val = conn(layer_input) * self.dag_vars[key]
outputs[target] += val
num_inputs[target] += self.dag_vars[key]
outputs[-1] /= num_inputs[-1]
output = outputs[-1]
raw_output = output
extra_out = {'dropped': None,
'hiddens': None,
'raw': raw_output}
return output, extra_out
def to_device(self, device, dag):
"""Moves the parameters on the specified path to the device"""
for source, target, type_name in dag:
self.connections[(source, target, type_name)].to(device)
def get_parameters(self, dag):
"""Returns the parameters of the path through the Cell given by the dag."""
params = []
for key in dag:
params.extend(self.connections[key].parameters())
return params
def update_fisher(self, dag):
""" a single dag"""
super(_CNNCell, self).update_fisher(dag_to_keys(dag))
def compute_weight_plastic_loss_with_update_fisher(self, dag):
return super(_CNNCell, self).compute_weight_plastic_loss_with_update_fisher(dag_to_keys(dag))
def sigmoid_derivitive(x):
"""Returns the derivitive of a sigmoid function at x"""
return expit(x) * (1.0 - expit(x))
| StarcoderdataPython |
4838053 | import sys
import click
import pprint
import json
import os
import datetime
import pyaurorax
from dateutil.parser import parse
from ..helpers import (print_request_logs_table,
print_request_status,
get_search_data)
from ..templates import EPHEMERIS_SEARCH_TEMPLATE
def __create_search_object_from_query(q):
start = parse(q["start"], ignoretz=True)
end = parse(q["end"], ignoretz=True)
programs = None if "programs" not in q["data_sources"] else q["data_sources"]["programs"]
platforms = None if "platforms" not in q["data_sources"] else q["data_sources"]["platforms"]
instrument_types = None if "instrument_types" not in q["data_sources"] else q["data_sources"]["instrument_types"]
metadata_filters = None
metadata_filters_logical_operator = None
if ("ephemeris_metadata_filters" in q["data_sources"]):
if ("expressions" in q["data_sources"]["ephemeris_metadata_filters"]):
metadata_filters = q["data_sources"]["ephemeris_metadata_filters"]["expressions"]
if ("logical_operator" in q["data_sources"]["ephemeris_metadata_filters"]):
metadata_filters_logical_operator = q["data_sources"]["ephemeris_metadata_filters"]["logical_operator"]
s = pyaurorax.ephemeris.Search(start,
end,
programs=programs,
platforms=platforms,
instrument_types=instrument_types,
metadata_filters=metadata_filters,
metadata_filters_logical_operator=metadata_filters_logical_operator)
return s
@click.group("ephemeris", help="Interact with ephemeris searches")
def ephemeris_group():
pass
@ephemeris_group.command("get_status",
short_help="Get status info for an ephemeris search request")
@click.argument("request_uuid", type=str)
@click.option("--show-logs", "show_logs", is_flag=True,
help="Show the logs for the request")
@click.option("--show-query", "show_query", is_flag=True,
help="Show the query for the request")
@click.option("--filter-logs",
type=click.Choice(["debug", "info", "warn", "error"]),
help="Filter log messages (used with --show-logs)")
@click.option("--table-max-width", "--max-width", type=int,
help="Max width for the logs table")
@click.pass_obj
def get_status(config, request_uuid, show_logs, show_query, filter_logs, table_max_width):
"""
Get information for an ephemeris search request
\b
REQUEST_UUID the request unique identifier
"""
# get request status
try:
url = pyaurorax.api.urls.ephemeris_request_url.format(request_uuid)
s = pyaurorax.requests.get_status(url)
except pyaurorax.AuroraXNotFoundException as e:
click.echo("%s occurred: request ID not found" % (type(e).__name__))
sys.exit(1)
except pyaurorax.AuroraXException as e:
click.echo("%s occurred: %s" % (type(e).__name__, e.args[0]))
sys.exit(1)
# print status nicely
print_request_status(s,
show_logs=show_logs,
show_query=show_query,
filter_logs=filter_logs,
table_max_width=table_max_width)
@ephemeris_group.command("get_logs",
short_help="Get logs for an ephemeris search request")
@click.argument("request_uuid", type=str)
@click.option("--filter", "--filter-logs", "filter_",
type=click.Choice(["debug", "info", "warn", "error"]),
help="Filter log messages")
@click.option("--table-max-width", "--max-width", type=int,
help="Max width for the logs table")
@click.pass_obj
def get_logs(config, request_uuid, filter_, table_max_width):
"""
Get the logs for an ephemeris search request
\b
REQUEST_UUID the request unique identifier
"""
# get request status
try:
url = pyaurorax.api.urls.ephemeris_request_url.format(request_uuid)
s = pyaurorax.requests.get_status(url)
except pyaurorax.AuroraXNotFoundException as e:
click.echo("%s occurred: request ID not found" % (type(e).__name__))
sys.exit(1)
except pyaurorax.AuroraXException as e:
click.echo("%s occurred: %s" % (type(e).__name__, e.args[0]))
sys.exit(1)
# print out the logs nicely
if ("logs" in s):
print_request_logs_table(s["logs"],
filter_level=filter_,
table_max_width=table_max_width)
else:
click.echo("Search logs: missing, unable to display")
@ephemeris_group.command("get_query",
short_help="Get query for an ephemeris search request")
@click.argument("request_uuid", type=str)
@click.pass_obj
def get_query(config, request_uuid):
"""
Get the query for an ephemeris search request
\b
REQUEST_UUID the request unique identifier
"""
# get request status
try:
url = pyaurorax.api.urls.ephemeris_request_url.format(request_uuid)
s = pyaurorax.requests.get_status(url)
except pyaurorax.AuroraXNotFoundException as e:
click.echo("%s occurred: request ID not found" % (type(e).__name__))
sys.exit(1)
except pyaurorax.AuroraXException as e:
click.echo("%s occurred: %s" % (type(e).__name__, e.args[0]))
sys.exit(1)
# print out query
if ("query" in s["search_request"]):
query_to_show = s["search_request"]["query"]
del query_to_show["request_id"]
click.echo(pprint.pformat(query_to_show))
else:
click.echo("\nSearch query missing from request status, unable to display")
@ephemeris_group.command("get_data",
short_help="Get data for an ephemeris search request")
@click.argument("request_uuid", type=str)
@click.option("--outfile", type=str, help="output file to save data to (a .json file)")
@click.option("--output-to-terminal", type=click.Choice(["dict", "objects"]),
help="output data to terminal in a certain format (instead of to file)")
@click.option("--indent", type=int, default=2, show_default=True,
help="indentation when saving data to file")
@click.option("--minify", is_flag=True, help="Minify the JSON data saved to file")
@click.pass_obj
def get_data(config, request_uuid, outfile, output_to_terminal, indent, minify):
"""
Get the data for an ephemeris search request
\b
REQUEST_UUID the request unique identifier
"""
get_search_data("ephemeris",
request_uuid,
outfile,
output_to_terminal,
indent,
minify)
@ephemeris_group.command("search_resubmit",
short_help="Resubmit an ephemeris search request")
@click.argument("request_uuid", type=str)
@click.pass_obj
def search_resubmit(config, request_uuid):
"""
Resubmit an ephemeris search request
\b
REQUEST_UUID the request unique identifier
"""
# get request status
try:
click.echo("Retrieving query for request '%s' ..." % (request_uuid))
url = pyaurorax.api.urls.ephemeris_request_url.format(request_uuid)
status = pyaurorax.requests.get_status(url)
except pyaurorax.AuroraXNotFoundException as e:
click.echo("%s occurred: request ID not found" % (type(e).__name__))
sys.exit(1)
except pyaurorax.AuroraXException as e:
click.echo("%s occurred: %s" % (type(e).__name__, e.args[0]))
sys.exit(1)
# set the query to use for resubmission
if ("query" not in status["search_request"]):
click.echo("Error resubmitting: missing query from original request ID")
sys.exit(1)
q = status["search_request"]["query"]
# create search object
click.echo("Preparing new search ...")
s = __create_search_object_from_query(q)
# submit search
click.echo("Submitting new search ...")
s.execute()
# output new request ID
click.echo("Request has been resubmitted, new request ID is %s" % (s.request_id))
@ephemeris_group.command("search_template",
short_help="Output template for an ephemeris search request")
@click.option("--outfile", type=str, help="save template to a file")
@click.option("--indent", type=int, default=2, show_default=True,
help="indentation to use when outputing template")
@click.pass_obj
def search_template(config, outfile, indent):
"""
Output template for an ephemeris search request
"""
if (outfile is not None):
with open(outfile, 'w', encoding="utf-8") as fp:
json.dump(EPHEMERIS_SEARCH_TEMPLATE, fp, indent=indent)
click.echo("Saved template to %s" % (outfile))
else:
click.echo(json.dumps(EPHEMERIS_SEARCH_TEMPLATE, indent=indent))
@ephemeris_group.command("search",
short_help="Perform an ephemeris search request")
@click.argument("infile", type=str)
@click.option("--poll-interval",
default=pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME,
show_default=True,
help="polling interval when waiting for data (seconds)")
@click.option("--outfile", type=str, help="output file to save data to (a .json file)")
@click.option("--output-to-terminal", type=click.Choice(["dict", "objects"]),
help="output data to terminal in a certain format (instead of to file)")
@click.option("--indent", type=int, default=2, show_default=True,
help="indentation when saving data to file")
@click.option("--minify", is_flag=True, help="Minify the JSON data saved to file")
@click.option("--quiet", is_flag=True, help="Quiet output")
@click.pass_obj
def search(config, infile, poll_interval, outfile, output_to_terminal, indent, minify, quiet):
"""
Perform an ephemeris search request
\b
INFILE input file with query (must be a JSON)
"""
# check that infile exists
if not (os.path.exists(infile)):
click.echo("Error: infile doesn't exist (%s" % (infile))
sys.exit(1)
# read in infile
if (quiet is False):
click.echo("[%s] Reading in query file ..." % (datetime.datetime.now()))
with open(infile, 'r', encoding="utf-8") as fp:
q = json.load(fp)
# set search params
if (quiet is False):
click.echo("[%s] Preparing search ..." % (datetime.datetime.now()))
start = parse(q["start"], ignoretz=True)
end = parse(q["end"], ignoretz=True)
programs = None if "programs" not in q["data_sources"] else q["data_sources"]["programs"]
platforms = None if "platforms" not in q["data_sources"] else q["data_sources"]["platforms"]
instrument_types = None if "instrument_types" not in q["data_sources"] else q["data_sources"]["instrument_types"]
metadata_filters = None
metadata_filters_logical_operator = None
if ("ephemeris_metadata_filters" in q["data_sources"]):
if ("expressions" in q["data_sources"]["ephemeris_metadata_filters"]):
metadata_filters = q["data_sources"]["ephemeris_metadata_filters"]["expressions"]
if ("logical_operator" in q["data_sources"]["ephemeris_metadata_filters"]):
metadata_filters_logical_operator = q["data_sources"]["ephemeris_metadata_filters"]["logical_operator"]
verbose_search = True if quiet is False else False # pylint: disable=simplifiable-if-expression
# start search
s = pyaurorax.ephemeris.search(start,
end,
programs=programs,
platforms=platforms,
instrument_types=instrument_types,
metadata_filters=metadata_filters,
metadata_filters_logical_operator=metadata_filters_logical_operator,
poll_interval=poll_interval,
verbose=verbose_search,
return_immediately=True)
# wait for data
s.wait(poll_interval=poll_interval, verbose=verbose_search)
# search has finished, save results to a file or output to terminal
get_search_data("ephemeris",
s.request_id,
outfile,
output_to_terminal,
indent,
minify,
show_times=True,
search_obj=s)
@ephemeris_group.command("describe",
short_help="Describe an ephemeris search request")
@click.argument("infile", type=str)
@click.pass_obj
def describe(config, infile):
"""
Describe an ephemeris search request using
"SQL-like" syntax
\b
INFILE input file with query (must be a JSON)
"""
# check that infile exists
if not (os.path.exists(infile)):
click.echo("Error: infile doesn't exist (%s" % (infile))
sys.exit(1)
# read in infile
with open(infile, 'r', encoding="utf-8") as fp:
q = json.load(fp)
# create search object
s = __create_search_object_from_query(q)
# describe the search
d = pyaurorax.ephemeris.describe(s)
# output
click.echo(d)
| StarcoderdataPython |
3570816 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
__author__ = 'zd'
import joblib
import data_utils
import model
import global_parameters as config
from flask import Flask, request, jsonify
app = Flask(__name__)
stop_words = data_utils.read_stopwords()
w2v_model = joblib.load(config.w2v_model_path)
@app.route('/get_summary', methods=['POST'])
def get_summary():
content = request.form.get('content') # Body x-www 中书写请求
# content = request.json['content'] # Bady raw 中书写请求 同时选择json
print(content)
final_list = model.get_first_summaries(content, stop_words, w2v_model)
summaries = model.get_last_summaries(content, final_list, stop_words, w2v_model)
summary = ','.join(summaries)
return jsonify({'summary': summary})
if __name__ == '__main__':
content = "记得很小的时候,我到楼下去玩,一不小心让碎玻璃割伤了腿,疼得我“哇哇”大哭。爸爸问讯赶来,把我背到了医院,仔仔细细地为我清理伤口《爸爸是医生》、缝合、包扎,妈妈则在一旁流眼泪,一副胆战心惊的样子。我的腿慢慢好了,爸爸妈妈的脸上,才渐渐有了笑容。 一天下午,放学时,忽然下起了倾盆大雨。我站在学校门口,喃喃自语:“我该怎么办?”正在我发愁的时候,爸爸打着伞来了。“儿子,走,回家!”我高兴得喜出望外。这时,爸爸又说话了:“今天的雨太大了,地上到处是水坑,我背你回家!”话音未落,爸爸背起我就走了。一会儿,又听到爸爸说:“把伞往后挪一点,要不挡住我眼了。”我说:“好!”回到家,发现爸爸的衣服全湿透了,接连打了好几个喷嚏。我的眼泪涌出来了。 “可怜天下父母心”,这几年里,妈妈为我洗了多少衣服,爸爸多少次陪我学习玩耍,我已经记不清了。让我看在眼里、记在心里的是妈妈的皱纹、爸爸两鬓的白发。我的每一步成长,都包含了父母太多的辛勤汗水和无限爱心,“可怜天下父母心”!没有人怀疑,父母的爱是最伟大的、最无私的!"
final_list = model.get_first_summaries(content, stop_words, w2v_model)
summaries = model.get_last_summaries(content, final_list, stop_words, w2v_model)
summary = ','.join(summaries)
print(summary)
# postman访问http://1172.16.31.10:5000/get_summary,POST请求,并传入数据。
# app.run(host='127.0.0.1', port=5000)
| StarcoderdataPython |
330991 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class EndpointManagerBase(object):
""" Endpoint Manager base class
Defines an interface between the GBP opflex Agent and the endpoint policy
repository. The endpoint manager takes care of policy based connectivity,
that includes NAT when applicable.
"""
vrf_dict = {}
@abc.abstractmethod
def initialize(self, host, bridge_manager, config):
""" EP Manager initialization method.
This method will be called before any other.
:param host: agent host
:param bridge_manager: the integration bridge manager.
:param config: configuration dictionary
:returns: self
"""
@abc.abstractmethod
def declare_endpoint(self, port, mapping):
""" Process Endpoint Mapping.
This method takes care of processing server side mapping info into
fruible data for the endpoint repository. When appropriate, this
method will undeclare the endpoint altogether.
:param port: Object that represents the Openstack port.
:param mapping: dictionary containing info retrieved from the Openstack
server. See the gbp_details RPC
:return: None
"""
@abc.abstractmethod
def undeclare_endpoint(self, port_id):
""" Undeclare Endpoint Mapping.
This method takes care of undeclaring the Eendpoint
:param port_id: ID of the Openstack port.
:return: None
"""
@abc.abstractmethod
def get_registered_endpoints(self):
""" Get registered endpoints.
:return: set of port IDs for each endpoint registered in the EP
directory
"""
@abc.abstractmethod
def get_stale_endpoints(self):
""" Get stale endpoints that are not tracked by registered endpoints.
:return: set of stale endpoint IDs
"""
@abc.abstractmethod
def get_access_int_for_vif(self, vif):
""" Get access interface for a given vif id.
:return: access interface name
"""
| StarcoderdataPython |
3376173 | <reponame>showerbugs/showerbasket
import time
import requests
from api import API
class Account(API):
def __init__(self):
super().__init__()
self.base = f'{self.base}/account'
def balance(self):
url = f'{self.base}/balance'
payload = {
'access_token': self.access_token,
'nonce': int(time.time() * 1000),
}
header = self._header(payload)
resp = requests.post(url, headers=header, data=payload)
result = resp.json()
return result
| StarcoderdataPython |
3594906 | <filename>sources/models/femnist/femnist_model_template.py
import tensorflow as tf
from typing import List, Union, Optional
from sources.global_data_properties import FEMNIST_IMAGE_SIZE, FEMNIST_CLASSES
from sources.metrics.default_metrics_tf import get_default_sparse_categorical_metrics_tf
from sources.models.keras_model_template import KerasModelTemplate
class FemnistKerasModelTemplate(KerasModelTemplate):
def __init__(self, seed, num_classes=FEMNIST_CLASSES,
loss=tf.keras.losses.SparseCategoricalCrossentropy()):
super(FemnistKerasModelTemplate, self).__init__(seed, loss, num_classes)
def get_model(self) -> tf.keras.Model:
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(input_shape=([FEMNIST_IMAGE_SIZE, FEMNIST_IMAGE_SIZE, 1]),
dtype=tf.float32))
model.add(tf.keras.layers.Conv2D(32, 5, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2), strides=2))
model.add(tf.keras.layers.Conv2D(64, 5, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2), strides=2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=2048, activation='relu'))
model.add(tf.keras.layers.Dense(units=self.num_classes))
model.add(tf.keras.layers.Softmax())
return model
def get_centralised_metrics(self) -> List[Union[str, tf.keras.metrics.Metric]]:
return get_default_sparse_categorical_metrics_tf(self.num_classes)
def get_optimizer(self, lr=0.1, model: Optional[tf.keras.models.Model] = None) \
-> tf.keras.optimizers.Optimizer:
if self.optimizer is not None:
return self.optimizer
else:
return tf.keras.optimizers.SGD(learning_rate=lr)
| StarcoderdataPython |
381534 | <filename>resources.py
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\xe5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x03\x00\x00\x00\xd7\xa9\xcd\xca\
\x00\x00\x00\x01\x73\x52\x47\x42\x01\xd9\xc9\x2c\x7f\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\
\x9c\x18\x00\x00\x02\x4f\x50\x4c\x54\x45\x00\xff\xff\x00\xff\xff\
\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\
\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\
\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\xff\xff\
\x00\xff\xff\x00\xff\xff\x00\xff\xff\x00\x40\x40\x00\x52\x52\x00\
\x6c\x6c\x00\xb7\xb7\x00\xf2\xf2\x00\xed\xed\x00\x74\x74\x00\x4c\
\x4c\x00\x64\x64\x00\x99\x99\x00\xde\xde\x00\x85\x85\x00\xa0\xa0\
\x00\xd8\xd8\x00\x65\x65\x00\x4d\x4d\x00\x95\x95\x00\xf0\xf0\x00\
\x6d\x6d\x00\x00\x00\x00\x12\x12\x00\x03\x03\x00\x7e\x7e\x00\xcf\
\xcf\x00\x46\x46\x00\x0c\x0c\x00\x09\x09\x00\x3d\x3d\x00\x96\x96\
\x00\x14\x14\x00\x76\x76\x00\xc6\xc6\x00\x22\x22\x00\x5c\x5c\x00\
\xe9\xe9\x00\x2d\x2d\x00\xe1\xe1\x00\x8b\x8b\x00\x16\x16\x00\x19\
\x19\x00\xab\xab\x00\x9a\x9a\x00\xbe\xbe\x00\x43\x43\x00\x06\x06\
\x00\x63\x63\x00\x7f\x7f\x00\x0b\x0b\x00\x66\x66\x00\x10\x10\x00\
\x13\x13\x00\x41\x41\x00\xe7\xe7\x00\xe4\xe4\x00\xc8\xc8\x00\x24\
\x24\x00\xa3\xa3\x00\xd6\xd6\x00\x58\x58\x00\x02\x02\x00\x93\x93\
\x00\x50\x50\x00\xae\xae\x00\x0d\x0d\x00\x2f\x2f\x00\x2c\x2c\x00\
\x01\x01\x00\xaa\xaa\x00\x77\x77\x00\x1d\x1d\x00\x91\x91\x00\xa2\
\xa2\x00\x94\x94\x00\x1b\x1b\x00\x5e\x5e\x00\x1e\x1e\x00\xb6\xb6\
\x00\x34\x34\x00\x04\x04\x00\x71\x71\x00\xca\xca\x00\x82\x82\x00\
\xaf\xaf\x00\x2e\x2e\x00\x6b\x6b\x00\x26\x26\x00\x88\x88\x00\x87\
\x87\x00\x05\x05\x00\x54\x54\x00\x39\x39\x00\xee\xee\x00\xe8\xe8\
\x00\x44\x44\x00\x83\x83\x00\xfa\xfa\x00\xbb\xbb\x00\x36\x36\x00\
\x45\x45\x00\xb5\xb5\x00\x0f\x0f\x00\x59\x59\x00\x07\x07\x00\xfd\
\xfd\x00\x8a\x8a\x00\x17\x17\x00\xc0\xc0\x00\x48\x48\x00\x55\x55\
\x00\xcc\xcc\x00\x2b\x2b\x00\x8f\x8f\x00\xca\xd6\x00\xee\xfd\x00\
\xf0\xff\x00\xda\xe8\x00\x42\x46\x00\x88\x90\x00\xb1\xbc\x00\x43\
\x47\x00\x1a\x1c\x00\x7e\x86\x00\xb8\xc4\x00\x57\x5c\x00\x11\x12\
\x00\x68\x6f\x00\xc7\xd4\x00\x0a\x0b\x00\xa5\xaf\x00\xb4\xd6\x00\
\xd5\xfd\x00\xd7\xff\x00\xc3\xe8\x00\x3b\x46\x00\x7a\x91\x00\xc2\
\xe7\x00\x6e\x83\x00\x14\x18\x00\x36\x41\x00\x84\x9c\x00\x5c\x6e\
\x00\x17\x1b\x00\x72\x88\x00\xb8\xda\x00\x2a\x32\x00\x0b\x0d\x00\
\xa8\xc7\x00\xb8\xff\x00\x95\xff\x00\x70\xff\x00\x4c\xff\x00\x2c\
\xff\x00\x2c\xff\x00\x2c\xff\x00\x12\xff\x00\x12\xff\x00\x12\xff\
\x00\x12\xff\x00\x12\xff\x00\x12\xff\x00\x12\xff\x00\x00\xff\x00\
\x00\xff\x00\x00\xff\x00\x00\xff\x00\x00\xff\x00\x00\xff\x00\x00\
\xff\x00\x00\xff\x00\x00\xff\x00\x00\xff\x00\x00\x00\x00\x00\xff\
\x00\x00\xff\x00\x00\xff\x00\x00\xff\x66\x3f\x32\x00\x00\x00\x00\
\xc5\x74\x52\x4e\x53\x00\x01\x58\xdd\xfd\xff\xf9\xc5\x25\x3d\xd9\
\xfc\xc8\x5e\x08\x61\xd1\xfb\xfe\xf0\x9f\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xfe\xf5\x8f\xe5\xfd\xff\xfe\xf0\x8d\x01\x49\xe4\xfe\xff\
\xfd\xec\x9e\x2c\x03\x00\x73\xfa\xc9\x30\x7f\xe4\x12\x42\x00\x00\
\x01\x5e\x49\x44\x41\x54\x78\x9c\x63\x60\x60\x64\x62\x66\x61\x45\
\x01\x6c\xec\x1c\x8c\x0c\x0c\x8c\x9c\x5c\xdc\xac\x68\x80\x8d\x87\
\x97\x8f\x81\x5f\x40\x10\x5d\x9c\x95\x55\x48\x58\x84\x01\x53\x14\
\x02\xc8\x90\x10\x15\x15\x13\x97\x90\x94\x92\x16\x95\x91\x95\x93\
\x97\x57\x10\x53\x54\x52\x56\x51\x55\x53\x17\x65\xd0\xd0\xd0\xd4\
\xd2\xd0\xd6\xd1\xd5\xd0\xd3\xd7\x30\x30\x14\x37\x32\x36\x31\xd5\
\x33\x33\xb7\xd0\x00\x4a\x58\x5a\x59\xdb\xd8\xea\x6a\xd8\xd9\x3b\
\x38\x3a\x39\xbb\xb8\xda\xbb\xb9\x7b\x78\x5a\x83\x24\xbc\xbc\x7d\
\x34\x7c\x81\x12\x7e\xfe\x01\xea\x81\x5a\x41\xc1\x21\xa1\x61\x5e\
\xe1\x46\x20\x09\x55\x1b\xeb\x08\x5d\x0d\xf1\x48\xd3\xa8\xe8\x98\
\x10\xd1\xd8\xb8\xf8\x84\x44\xad\x24\xa0\x84\x75\xb2\x46\x4a\xaa\
\xae\x86\x86\x86\x7b\x9a\x64\xba\x69\x46\x66\x56\xb6\x75\x4e\x6e\
\x1e\x50\x22\xdf\xdf\xb7\xa0\x50\x57\x23\xbe\x28\xa4\xb8\xa4\xb4\
\x2c\xaa\x3c\xbf\xa2\xb2\xaa\x5a\x1a\x28\xe1\x57\xc3\xca\x0a\x94\
\xb0\xab\xad\x2b\xaf\x37\x69\xa8\xb4\x68\x6c\xaa\x6e\xd6\x6f\x01\
\x4a\xb4\xb6\xb5\xb7\x77\x74\x6a\x74\x75\xf7\xf4\xf6\xf5\x4f\xd0\
\x98\x38\x69\xb2\x91\xc6\x94\xa9\x40\x89\x69\xd3\x67\xcc\x98\x39\
\x4b\x63\xf6\x9c\xb9\xf3\xe6\x2f\x58\xa8\xb1\x68\xf1\x92\xa5\x1a\
\xcb\x96\x33\xac\xc0\x01\x18\x56\xe2\x00\x0c\xab\x70\x00\x86\xd5\
\x38\x00\xc3\x1a\xac\x60\xed\x3a\x86\xf5\x1b\x36\x6e\xc2\x00\x9b\
\xb7\x6c\x65\xd8\xb6\x7d\xc7\xce\x5d\xa8\x60\xf7\x9e\xbd\xfb\xf6\
\x33\x1c\xd8\x76\x70\xc7\x6e\x54\x89\x43\x87\x8f\x6c\x3b\x00\x00\
\x07\x53\xcf\x32\xff\x45\xdf\xc5\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x03\
\x00\x00\x55\x77\
\x00\x50\
\x00\x52\x00\x57\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x71\xa0\xb4\x13\xda\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| StarcoderdataPython |
3503217 | <gh_stars>0
# coding=utf-8
# Advent of Code 2021 - Day 1
import utils
class Visit:
def __init__(self, allowed, count):
self.allowed = allowed
self.count = count
| StarcoderdataPython |
11274813 | """
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
https://leetcode.com/problems/letter-combinations-of-a-phone-number/
"""
from typing import Set, Tuple, List, Generator
from collections import Counter
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
digit_map = {
"1": "",
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
"0": "",
}
strings = [digit_map[digit] for digit in digits]
def increment_at_index(my_list, index, strings):
if index < 0:
return False
my_list[index] += 1
if my_list[index] >= len(strings[index]):
my_list[index] = 0
increment_at_index(my_list, index - 1, strings)
return True
def my_generator(strings) -> Generator[List[int], None, None]:
indices = [0 for _ in strings]
limit = [len(string) - 1 for string in strings]
yield indices
while 1:
result: bool = increment_at_index(indices, len(indices) - 1, strings)
# Limit reached: break
yield indices
if indices == limit:
break
return None
combinations = []
indices_generator = my_generator(strings)
for indices in indices_generator:
# print(indices)
my_combination = "".join([string[index] for string, index in zip(strings, indices)])
combinations.append(my_combination)
# print(combinations)
return combinations
test_cases = ["23"]
results = [["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]]
if __name__ == "__main__":
app = Solution()
for test_case, correct_result in zip(test_cases, results):
my_solution = app.letterCombinations(test_case)
assert my_solution == correct_result, f"My result: {my_solution}, correct result: {correct_result}"
| StarcoderdataPython |
1976719 | <gh_stars>0
class LLNode:
def __init__(self, n = None):
self.data = n
self.next = None
class SLL:
def __init__(self, n = None):
self.Head = None
if __name__ == '__main__':
ll = SLL()
ll.Head = LLNode()
ll.delete(1)
ll.insertleft(1)
ll.insertright(2)
ll.printall()
ll.delete(1)
| StarcoderdataPython |
3427694 | '''
create_db.py
create db tables
'''
import sqlite3
def create_db(dbname):
conn = sqlite3.connect(dbname)
c = conn.cursor()
# create table document
c.execute('''CREATE TABLE document
(id INTEGER PRIMARY KEY,
document TEXT UNIQUE,
hash TEXT UNIQUE)
''')
# create table sentence
c.execute('''CREATE TABLE sentence
(id INTEGER PRIMARY KEY,
sentence TEXT,
sentence_idx INTEGER,
document_id INTEGER,
FOREIGN KEY(document_id) REFERENCES document(id))
''')
# create table word
c.execute('''CREATE TABLE word
(id INTEGER PRIMARY KEY,
word TEXT UNIQUE)
''')
# create table lemma
c.execute('''CREATE TABLE lemma
(id INTEGER PRIMARY KEY,
lemma TEXT UNIQUE)
''')
# create table lemma_word_sentence
c.execute('''CREATE TABLE lemma_word_sentence
(lemma_id INTEGER,
word_id INTEGER,
sentence_id INTEGER,
count INTEGER,
FOREIGN KEY(lemma_id) REFERENCES lemma(id)
FOREIGN KEY(word_id) REFERENCES word(id)
FOREIGN KEY(sentence_id) REFERENCES sentence(id))
''')
conn.commit()
conn.close()
if __name__ == '__main__':
from config import defaults
create_db(defaults['DATABASE_NAME'])
| StarcoderdataPython |
3419119 | from django.shortcuts import render
from .models import brand,car_model
from django.views.generic import DetailView
def home(request):
for x in brand.objects.all():
cars_related = x.car_model_set.all()
x.cars_to_brand.set(cars_related)
x.save()
context = {
'cars':brand.objects.all(),
}
return render(request,'cars/base.html', context)
def brands(request):
context = {
'brands':brand.objects.all(),
}
return render(request,'cars/brands.html', context)
class BrandDetailView(DetailView):
model = brand | StarcoderdataPython |
4875559 | """Test the TimeoutR2CClient"""
import unittest
from PiCN.Packets import Name, Content, Interest
from PiCN.Layers.NFNLayer.R2C import TimeoutR2CHandler
from PiCN.Layers.NFNLayer.Parser import DefaultNFNParser
from PiCN.Layers.NFNLayer.NFNComputationTable import NFNComputationList
class test_TimeoutR2CClient(unittest.TestCase):
def setUp(self):
self.r2cClient = TimeoutR2CHandler()
def test_create_r2c_message(self):
"""test the creation of r2c names"""
name = Name("/test/NFN")
new_name = self.r2cClient.R2C_create_message(name)
compare_name = Name("/test/R2C/KEEPALIVE/NFN")
self.assertEqual(compare_name, new_name)
def test_get_original_r2c_message(self):
"""test the creation of r2c names"""
name = Name("/test/R2C/KEEPALIVE/NFN")
compare_name = Name("/test/NFN")
new_name = self.r2cClient.R2C_get_original_message(name)
self.assertEqual(compare_name, new_name)
def test_handle_r2c_request(self):
"""test the handling of r2c messages"""
name = Name("/test/NFN")
comp_list = NFNComputationList(self.r2cClient, DefaultNFNParser())
comp_list.add_computation(name, 1, Interest(name))
r2c_request = self.r2cClient.R2C_create_message(name)
c = self.r2cClient.R2C_handle_request(r2c_request, comp_list)
self.assertEqual(c, Content(r2c_request, "Running"))
| StarcoderdataPython |
6403106 | # -*- coding: utf-8 -*-
'''
Created on 2016-12-07
@author: hustcc
'''
from __future__ import absolute_import
from app import SQLAlchemyDB as db
class BaseMethod(object):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
# insert and update
def save(self):
db.session.add(self)
db.session.commit()
# delete
def delete(self):
db.session.delete(self)
db.session.commit()
| StarcoderdataPython |
8049651 | import os
import sys
import copyright
from distutils.core import setup
from setuptools import setup, find_packages
version = copyright.__version__
setup(
name='django-copyright',
version=version,
packages=find_packages(),
license='BSD',
description="Copyright django app",
long_description=open('README.md').read(),
install_requires=open('requirements.txt').read().split('\n'),
include_package_data=True,
author="arteria GmbH",
author_email='<EMAIL>',
)
| StarcoderdataPython |
11204558 | <gh_stars>1000+
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to fetch IMDB data set."""
import os
import pandas as pd
import tensorflow_datasets as tfds
# Run this file to download and preprocess the entire imdb dataset.
# Remove the imdb_small_with_labels.csv that comes natively in the repo/data
# folder. Make sure imdb.csv is present in the /data folder.
# Change the hyperparameters to better suit the bigger dataset.
# The configurations that were found reasonable are listed below:
# imdb_pipeline_native_keras.py:
# tfma.GenericValueThreshold(lower_bound={'value':0.85}
# trainer_pb2.TrainArgs(num_steps=7000)
# trainer_pb2.EvalArgs(num_steps=800)
# imdb_utils_native_keras.py:
# _TRAIN_BATCH_SIZE=64
# _EVAL_BATCH_SIZE=64
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
# Dataset source acknowledgement:
# @InProceedings{maas-EtAl:2011:ACL-HLT2011,
# author = {<NAME>. and <NAME>. and <NAME>. and
# Huang, Dan and Ng, <NAME>. and <NAME>},
# title = {Learning Word Vectors for Sentiment Analysis},
# booktitle = {Proceedings of the 49th Annual Meeting of the Association for
# Computational Linguistics: Human Language Technologies},
# month = {June},
# year = {2011},
# address = {Portland, Oregon, USA},
# publisher = {Association for Computational Linguistics},
# pages = {142--150},
# url = {http://www.aclweb.org/anthology/P11-1015}
# }
def fetch_data():
"""This downloads the full dataset to $(pwd)/data/imdb.csv."""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode('utf-8')
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
| StarcoderdataPython |
12857636 | import pandas as pd
import numpy as np
print("Data Loading....")
#data = pd.read_csv("adult.csv")
data = pd.read_csv("adult_2.csv")
# print(data)
# print(data.columns)
# print(data.shape)
# print(data.info())
# print(data.nunique())
data.describe()
data.isin(['?']).sum()
data = data.replace('?', np.NaN)
for col in ['workclass', 'occupation', 'native.country']:
data[col].fillna(data[col].mode()[0], inplace=True)
data.isnull().sum()
data['income'].value_counts()
data['income'] = data['income'].map({'<=50K': 0, '>50K': 1})
# print(data.head())
print("********** Checking Missing Values **********")
print(data.isnull().sum())
# Separate the numeric and categorical variables
numeric_data = data.select_dtypes(include=[np.number])
categorical_data = data.select_dtypes(exclude=[np.number])
print("Numeric Variable")
print(numeric_data.head())
print(numeric_data.info())
print(numeric_data.columns)
print("Shape of Numeric Data :", numeric_data.shape)
print(categorical_data.nunique())
print("Categorical Variable")
print(categorical_data.head())
print("Shape of Numeric Data :", categorical_data.shape)
# We have to rename all the columns of Categorical variable subset
categorical_data.columns = ['Private', 'HSgrad', 'Widowed',
'Execmanagerial', 'Unmarried', 'Black', 'Female', 'UnitedStates']
print(categorical_data.head())
print("Shape of Numeric Data :", categorical_data.shape)
| StarcoderdataPython |
396948 | <reponame>jkbjh/sacreddata
import os
try:
import ujson as json
except ImportError:
import json
import dictor
import datetime
import io
import shutil
import pandas as pd
import warnings
class BuildCommandMixin(object):
def build_command(self):
vals = dict(self.run["experiment"])
vals.update(self.run["meta"])
vals = {k: v for k, v in vals.items() if v}
vals["options"] = {k: v for k, v in vals["options"].items() if v}
update = vals["options"].pop("UPDATE", {})
updater = ""
if vals["options"].pop("with", False):
updater += " with "
updater += " ".join(update)
options = vals.pop("options", {})
option_str = " ".join(["%s %s" % (k, v) for k, v in options.items()])
vals["use_options"] = option_str
vals["cfg_updates"] = updater
command = "{base_dir}/{mainfile} {command} {use_options} {cfg_updates}".format(**vals)
return command
def _slurp_json(filename):
with open(filename) as fp:
return json.loads(fp.read())
class lazy_property(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, klass=None):
if obj is None:
return None
result = obj.__dict__[self.__name__] = self._func(obj)
return result
class JSONObj(object):
@classmethod
def slurp(cls, filename):
return cls(_slurp_json(filename))
def __init__(self, json_data):
self._json = json_data
def __getitem__(self, value_path):
return dictor.dictor(self._json, value_path)
@property
def raw(self):
return self._json
def keys(self):
return self._json.keys()
def items(self):
return self._json.items()
def __repr__(self):
return "%s %r>" % (super(JSONObj, self).__repr__()[:-1],
self.keys())
class FileRun(BuildCommandMixin, object):
def __init__(self, base_directory, run_directory, run_json):
self._base_directory = os.path.expanduser(base_directory)
self._run_directory = os.path.expanduser(run_directory)
self._run_json = run_json
self._artifacts = set(self["artifacts"])
@lazy_property
def config(self):
return JSONObj.slurp(os.path.join(self._run_directory, "config.json"))
@lazy_property
def metrics(self):
return JSONObj.slurp(os.path.join(self._run_directory, "metrics.json"))
@property
def run(self):
return JSONObj(self._run_json)
def __getitem__(self, value_path):
return dictor.dictor(self._run_json, value_path)
def keys(self):
return self._run_json.keys()
def info(self):
str_format = "%Y-%m-%dT%H:%M:%S.%f"
start_time = datetime.datetime.strptime(self["start_time"], str_format)
stop_time = datetime.datetime.strptime(self['stop_time'], str_format) if self['stop_time'] else None
return dict(
run_directory=self._run_directory,
name=self["experiment.name"],
start_time=start_time,
duration=(stop_time - start_time) if stop_time is not None else None)
@property
def artifacts(self):
return self._artifacts
def __artifact_path(self, artifact):
return os.path.join(self._run_directory, artifact)
def open(self, artifact, *a):
assert artifact in self._artifacts
return io.open(self.__artifact_path(artifact), *a)
def __repr__(self):
return "%s info=%r>" % (
super(FileRun, self).__repr__()[:-1],
self.info()
)
def extract_artifacts(self, output_path, artifacts, create_output_path=True):
unknown_artifacts = set(artifacts) - self.artifacts
if unknown_artifacts:
raise RuntimeError("Unknown artifacts requested: %r" % (sorted(list(unknown_artifacts))))
if not os.path.exists(output_path) and create_output_path:
os.makedirs(output_path)
targets = []
for artifact in artifacts:
target_path = os.path.join(output_path, artifact)
shutil.copyfile(self.__artifact_path(artifact), target_path)
targets.append(target_path)
return targets
class FileReporter(object):
def __init__(self, directory):
self.base_directory = os.path.expanduser(directory)
self.sources_directory = os.path.join(self.base_directory, "_sources")
if not os.path.exists(self.sources_directory):
raise RuntimeError(("_sources directory not found, probably "
"not a sacred %r results directory!") %
(self.base_directory,))
self._run_json = {}
self.update()
def update(self):
self._runs = [run for run in os.listdir(self.base_directory) if run.isdigit()]
self._runs.sort(key=lambda x: int(x))
old_json = self._run_json
self._run_json = {}
for run in self._runs:
if run in old_json:
self._run_json[run] = old_json[run] # use already loaded version
def _get_run_json(self, run):
assert run in self._runs
json_filename = os.path.join(self.base_directory, run, "run.json")
if os.path.exists(json_filename):
self._run_json[run] = _slurp_json(json_filename)
return self._run_json[run]
def __getitem__(self, run_key):
if not isinstance(run_key, str):
conv_key = str(run_key)
warnings.warn("Got item %r as run_key but expected a string, will be converted to: %r" % (run_key, conv_key))
run_key = conv_key
return FileRun(self.base_directory, os.path.join(self.base_directory, run_key), self._get_run_json(run_key))
def keys(self):
return self._runs
def as_df(self, keyfilter=None):
result = []
keys = self.keys()
if keyfilter is not None:
keys = keyfilter(keys)
for key in keys:
tr = self[key]
info = tr.info()
values = dict(run_key=key,
name=info["name"],
status=tr["status"],
start_time=info["start_time"],
duration=info["duration"],
)
values.update(dict(tr.config.items()))
result.append(values)
return pd.DataFrame(result)
| StarcoderdataPython |
6624006 | <filename>wsm/backend/asyncwhois/cache.py
from .base import BaseCacheHandler, Action, Kind
import json
from ..services import (
get_whois,
create_whois,
get_whois_by_ip,
update_whois_by_ip,
get_cache_by_ip,
)
class IPWhoisCacheHandler(BaseCacheHandler):
async def create(self, action: Action):
if action.kind == Kind.CREATE_WHOIS:
return await create_whois(action.payload.ip)
async def read(self, action: Action):
if action.kind == Kind.GET_WHOIS_BY_IP:
return await get_whois_by_ip(action.payload.ip)
elif action.kind == Kind.GET_WHOIS:
return await get_whois()
elif action.kind == Kind.GET_CACHE_BY_IP:
return await get_cache_by_ip(action.payload.ip)
async def update(self, action: Action):
if action.kind == Kind.UPDATE_WHOIS_BY_IP:
return await update_whois_by_ip(
action.payload.ip,
action.payload.country,
json.dumps(action.payload.whois),
)
async def delete(self, action: Action):
return super().delete(action)
| StarcoderdataPython |
3227811 | from datetime import datetime, timedelta
import pandas as pd
import flask
from sqlalchemy import extract, asc, desc, func, text
from app import db, app
today = datetime.today()
first_of_this_month = today.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
last_of_prev_month = first_of_this_month - timedelta(days=1)
first_of_prev_month = last_of_prev_month.replace(day=1)
minus_13_months = (first_of_this_month - timedelta(days=390)).replace(day=1)
class Account(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
accName = db.Column(db.String, unique=True, nullable=False)
#transactions = db.relationship('Transaction', backref=db.backref('trans', lazy=True))
def __repr__(self):
return '<Account {}>'.format(self.accName)
def create_one(newAccName):
stmt = Account(accName=newAccName)
db.session.add(stmt)
db.session.commit()
def one_acc(accountid):
return Account.query.filter_by(id = accountid).first()
'''def list_acc():
q1 = db.session.query(Transaction.acc_id, Transaction.amount.label('balance'), Transaction.traDate)\
.distinct(Transaction.acc_id)\
.outerjoin(Tag)\
.filter(Tag.isBlnc==True)\
.order_by(Transaction.acc_id, Transaction.traDate.desc())\
.subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.TO_CHAR(Transaction.uplDate,'YYYY-MM-DD')).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)'''
def list_acc():
cte = db.session.query(Transaction.acc_id\
,Transaction.amount.label('balance')\
,func.row_number().over(partition_by=Transaction.acc_id, order_by=desc(Transaction.traDate)).label("rn"))\
.outerjoin(Tag)\
.filter(Tag.isBlnc==1)\
.cte()
q1 = db.session.query(cte.c.acc_id, cte.c.balance).filter(cte.c.rn == 1).subquery()
q2 = db.session.query(Account.id, Account.accName, func.max(func.date(Transaction.uplDate)).label('upldate'))\
.outerjoin(Transaction)\
.group_by(Account.id, Account.accName)\
.subquery()
return db.session.query(q2.c.id, q2.c.accName, q2.c.upldate, q1.c.balance)\
.outerjoin(q1, q2.c.id == q1.c.acc_id)
class Transaction(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
traDate = db.Column(db.Date, nullable=False)
amount = db.Column(db.Float, nullable=False)
desc = db.Column(db.String, nullable=False)
card = db.Column(db.String(1), nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
uplDate = db.Column(db.DateTime, nullable=False, default=datetime.now)
confirmed = db.Column(db.Boolean, nullable=True, default=False)
def __repr__(self):
return '<Transaction {}>'.format(self.desc)
def create_one(tDate, tAmnt, tDesc, tag, acc, card, confrm):
stmt = Transaction(traDate=tDate, amount=tAmnt, desc=tDesc, card=card, tag_id=tag, acc_id=acc, confirmed=confrm)
db.session.add(stmt)
db.session.commit()
def update_trans(tid, traDate, amount, desc, tag):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.traDate = traDate
stmt.amount = amount
stmt.desc = desc
stmt.tag_id = tag
stmt.confirmed = True
db.session.commit()
def update_trans_amount(tid, amount):
stmt = Transaction.query.filter_by(id=tid).first()
stmt.amount = amount
db.session.commit()
def update_desc(account_id, desc_from, desc_to):
db.session.query(Transaction)\
.filter(Transaction.desc.like('%'+ desc_from +'%'))\
.update({Transaction.desc: func.replace(Transaction.desc, desc_from, desc_to)}
,synchronize_session=False)
db.session.commit()
def delete_trans(tid):
stmt = Transaction.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def cnt_all(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id).one_or_none()
def cnt_new(account_id):
return Transaction.query.with_entities(func.count(Transaction.id).label('cnt'))\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == False).one_or_none()
def cnt_avg_sum_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags)).one_or_none()
def list_filtered(account_id, date_from, date_to, sel_tags):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.tag_id.in_(sel_tags))\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def cnt_avg_sum_filtered_new(account_id, date_from, date_to):
return Transaction.query\
.with_entities(func.count(Transaction.amount).label('a_cnt'), func.avg(Transaction.amount).label('a_avg'), func.sum(Transaction.amount).label('a_sum'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False).one_or_none()
def list_filtered_new(account_id, date_from, date_to):
return Transaction.query.filter(Transaction.acc_id == account_id, Transaction.traDate >= date_from, Transaction.traDate <= date_to, Transaction.confirmed == False)\
.order_by(Transaction.traDate.desc(), Transaction.amount)
def list_latest_uploads_by_card(account_id, card):
return db.session.query(Transaction.card, Transaction.desc, Transaction.traDate, Transaction.amount)\
.filter(Transaction.acc_id == account_id, Transaction.card == card)\
.order_by(Transaction.traDate.desc()).limit(3).all()
def first_date(account_id):
return db.session.query(db.func.min(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def last_date(account_id):
return db.session.query(db.func.max(Transaction.traDate)).filter(Transaction.acc_id==account_id).scalar() or today
def count_months(account_id):
return db.session.query(func.TO_CHAR(Transaction.traDate,'YYYYMM'))\
.filter(Transaction.acc_id == account_id, Transaction.traDate < first_of_this_month)\
.distinct().count()
def max_year(account_id):
return Transaction.query\
.with_entities(extract('year',func.max(Transaction.traDate).label('max_year')))\
.filter(Transaction.acc_id == account_id).scalar()
def list_year(account_id):
return db.session.query(extract('year',Transaction.traDate).label('year'))\
.filter(Transaction.acc_id == account_id).distinct().order_by(desc('year'))
def chart_header(column_name, account_id):
subquery = db.session.query(Tag.tgr_id).filter(getattr(Tag, column_name)==True, Taggroup.acc_id==account_id)
return db.session.query(Taggroup.gName, Taggroup.gColor)\
.filter(Taggroup.id.in_(subquery))\
.order_by(Taggroup.gName)
def chart_data(account_id, column_name, months):
first_of_n_month = (first_of_this_month - timedelta(days=months*30)).replace(day=1)
q = db.session.query(Taggroup.gName
,func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id\
,Transaction.confirmed == True\
,Transaction.traDate >= first_of_n_month\
,Transaction.traDate < first_of_this_month\
,getattr(Tag, column_name)==True)\
.group_by(Taggroup.gName\
,func.TO_CHAR(Transaction.traDate,'YYYYMM')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth'))\
.order_by('orderByCol',Taggroup.gName)
#get unique groups
g = []
prev_val = ''
for row in q:
if row.gName != prev_val:
g.append(row.gName)
prev_val = row.gName
#create months/group with default value
m = {}
prev_val = ''
for row in q:
if row.mnth != prev_val:
m[row.mnth] = {g_:0 for g_ in g}
prev_val = row.mnth
#replace values in dict if exists in q
for row in q:
for key in m:
for mk in m[key]:
if row.mnth==key and mk==row.gName :
m[key][mk] = row.total
return m
def get_dates(what_year_):
what_year = int(what_year_)
prev_year = what_year - 1
prev_month_num = last_of_prev_month.strftime("%m")
prev_month = int(prev_month_num) - 1 if int(prev_month_num) > 1 else 12
year_num = last_of_prev_month.strftime("%Y")
which_year = year_num if int(year_num) == what_year else prev_year
which_month = prev_month_num if int(year_num) == what_year else prev_month
end_12_month = last_of_prev_month.replace(year=what_year)
start_12_month = (end_12_month - timedelta(days=360)).replace(day=1)
return what_year, prev_year, which_year, which_month, start_12_month, end_12_month
def get_stats_year(account_id, what_year, lbl1, lbl2):
return db.session.query(Tag.tgr_id.label(lbl1), func.SUM(Transaction.amount).label(lbl2))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)==what_year)\
.group_by(Tag.tgr_id).subquery()
def get_statsDate(what_year):
gd = Transaction.get_dates(what_year)
fopm = first_of_prev_month.replace(year=int(gd[2]))
lopm = last_of_prev_month.replace(year=int(gd[2]))
return [str(gd[1])+'-01-01', str(gd[1])+'-12-31', str(gd[0])+'-01-01', str(gd[0])+'-12-31', str(fopm), str(lopm)]
def get_stat_year(account_id, what_year):
gd = Transaction.get_dates(what_year)
tg = Taggroup.list_tgroup_id_inSum(account_id)
q1 = db.session.query(Tag.tgr_id.label('tag1'), Taggroup.gName.label('Category'), Taggroup.gColor.label('color'), func.SUM(Transaction.amount).label('Total'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False, extract('year',Transaction.traDate)<=gd[0])\
.group_by(Tag.tgr_id, Taggroup.gName, Taggroup.gColor)\
.order_by(Tag.tgr_id).subquery()
q2 = Transaction.get_stats_year(account_id, gd[1], 'tag2', 'Prev_Year')
q3 = Transaction.get_stats_year(account_id, gd[0], 'tag3', 'This_Year')
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 12 else 12
q4 = db.session.query(Tag.tgr_id.label('tag4'), func.SUM(Transaction.amount/month_count).label('Avg_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Transaction.traDate>=gd[4], Transaction.traDate<gd[5])\
.group_by(Tag.tgr_id).subquery()
q5 = db.session.query(Tag.tgr_id.label('tag5'), func.SUM(Transaction.amount).label('Prev_Month'))\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, extract('year',Transaction.traDate)==gd[2], extract('month',Transaction.traDate)==gd[3])\
.group_by(Tag.tgr_id).subquery()
return db.session.query(q1.c.Category, q1.c.tag1, q1.c.Total, q2.c.Prev_Year, q3.c.This_Year, (100*(q3.c.This_Year/q2.c.Prev_Year)).label('%_YTD'), q4.c.Avg_Month, q5.c.Prev_Month, q1.c.color)\
.outerjoin(q2, q1.c.tag1 == q2.c.tag2)\
.outerjoin(q3, q1.c.tag1 == q3.c.tag3)\
.outerjoin(q4, q1.c.tag1 == q4.c.tag4)\
.outerjoin(q5, q1.c.tag1 == q5.c.tag5)\
.order_by(q1.c.tag1)
def get_stat_year_df(account_id, what_year):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = Transaction.get_stat_year(account_id, what_year)
df = pd.read_sql_query(q.statement, db.session.bind)
#transform valies from object to float
pd.options.display.float_format = '{:.2f}'.format
#exclude BILLS from summary
s = df.mask(~df['tag1'].isin(tg)).drop('tag1',1).sum()
#calculate '% YTD'
s.loc['%_YTD'] = 100*(s['This_Year'] / s['Prev_Year'])
#replace calculated value in specific position
df.loc[len(df)] = s
#replace summarised categ name
df = df.fillna({'Category':'Summary','tag1':0,'color':''})
#replace 'NaN' to '0', then limit decimals to 2
return df.fillna(0).round(2)
def get_stat_year_by_year(account_id):
tg = Taggroup.list_tgroup_id_inSum(account_id)
q = db.session.query( Tag.tgr_id.label('tag')\
, Taggroup.gName.label('Category')\
, Transaction.traDate.label('date')\
, Transaction.amount)\
.outerjoin(Tag, Transaction.tag_id == Tag.id)\
.outerjoin(Taggroup, Taggroup.id == Tag.tgr_id)\
.filter(Transaction.acc_id == account_id, Transaction.confirmed == True, Tag.isBlnc == False)\
.order_by(Tag.tgr_id)
df = pd.read_sql_query(q.statement, db.session.bind)
#add column 'year' based on 'date'
df['Year'] = pd.DatetimeIndex(df['date']).year
#groupby
df = df.groupby(['tag','Category','Year']).sum()
#pivot
df = pd.pivot_table(df, values = 'amount', index=['Category','tag'], columns = 'Year')\
.sort_values(by=['tag'], ascending=True)
#add column 'Total', to sum horizontally, per category
df.insert(loc=0, column='Total', value=df.sum(axis=1))
#add row 'Summary' to sum columns, except BILLS
df.loc['Summary'] = df.query("tag in @tg").sum()
#change FLOAT values to INT
return df.fillna(0).astype(int)
def chart_in_out(account_id):
sum_in = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount > 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
sum_out = Transaction.query.with_entities(func.ABS(func.SUM(Transaction.amount)))\
.outerjoin(Tag)\
.filter(Transaction.acc_id == account_id, Transaction.amount < 0 \
, Tag.isBlnc == False \
, Transaction.traDate>=first_of_prev_month, Transaction.traDate<first_of_this_month)\
.scalar()
return sum_in if sum_in is not None else 0, sum_out if sum_out is not None else 0
def chart_monthly_trend(account_id):
tag_inSum = Tag.list_tag_id_inSum(account_id)
month_by_month = db.session.query(\
func.TO_CHAR(Transaction.traDate,'YYYYMM').label('orderByCol')\
,func.TO_CHAR(Transaction.traDate,'MON').label('mnth')\
,func.SUM(Transaction.amount).label('total')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.group_by(func.TO_CHAR(Transaction.traDate,'YYYYMM'),func.TO_CHAR(Transaction.traDate,'MON'),func.TEXT('Dummy'))\
.subquery()
month_count = Transaction.count_months(account_id) if Transaction.count_months(account_id) < 13 else 13
month_avg = db.session.query(\
func.TEXT('AvgYear').label('orderByCol')\
,func.TEXT('AvgMonth').label('MON')\
,func.SUM(Transaction.amount/month_count).label('total_avg')\
,func.TEXT('Dummy').label('D'))\
.filter(Transaction.tag_id.in_(tag_inSum), Transaction.traDate>=minus_13_months, Transaction.traDate<first_of_this_month)\
.subquery()
return db.session.query(month_by_month.c.orderByCol, month_by_month.c.mnth, month_by_month.c.total, month_avg.c.total_avg)\
.outerjoin(month_by_month, month_by_month.c.D == month_avg.c.D)\
.order_by(month_by_month.c.orderByCol)
class Taggroup(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
gName = db.Column(db.String, nullable=False)
gColor = db.Column(db.String(11), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<TagGroup {}>'.format(self.gName)
def insert_tag_group(g_name, color, accid):
stmt = Taggroup(gName=g_name, gColor=color, acc_id=accid)
db.session.add(stmt)
db.session.commit()
newid = stmt.id
def update_tag_group(gid, g_name, color):
stmt = Taggroup.query.filter_by(id=gid).first()
stmt.gName = g_name
stmt.gColor = color
db.session.commit()
def delete_tag_group(gid):
stmt = Taggroup.query.filter_by(id=gid).first()
db.session.delete(stmt)
db.session.commit()
def list_tgroup(account_id):
return Taggroup.query.filter(Taggroup.acc_id == account_id).order_by(Taggroup.id)
def list_tgroup_id(account_id):
q = db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id).all()
return [val for val, in q]
def list_tgroup_id_one(account_id):
return db.session.query(Taggroup.id).filter(Taggroup.acc_id==account_id).order_by(Taggroup.id.desc()).first()
def list_count(account_id):
return db.session.query(db.func.count(Taggroup.id)).filter(Taggroup.acc_id==account_id).scalar()
def list_tgroup_id_inSum(account_id):
q = db.session.query(Taggroup.id)\
.outerjoin(Tag)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)\
.distinct()
return [val for val, in q]
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
tName = db.Column(db.String, nullable=False)
tgr_id = db.Column(db.Integer, db.ForeignKey('taggroup.id'), nullable=False)
isBlnc = db.Column(db.Boolean, nullable=False, default=0)
inSum = db.Column(db.Boolean, nullable=False, default=1)
chart1 = db.Column(db.Boolean, nullable=False, default=0)
chart2 = db.Column(db.Boolean, nullable=False, default=0)
chart3 = db.Column(db.Boolean, nullable=False, default=0)
def __repr__(self):
return '<Tag {}>'.format(self.tName)
def insert_tag(t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag(tName=t_name, tgr_id=g_id, isBlnc=balance, inSum=summary, chart1=c1, chart2=c2, chart3=c3)
db.session.add(stmt)
db.session.commit()
def update_tag(tid, t_name, g_id, balance, summary, c1, c2, c3):
stmt = Tag.query.filter_by(id=tid).first()
stmt.tName = t_name
stmt.tgr_id = g_id
stmt.isBlnc = balance
stmt.inSum = summary
stmt.chart1 = c1
stmt.chart2 = c2
stmt.chart3 = c3
db.session.commit()
def delete_tag(tid):
stmt = Tag.query.filter_by(id=tid).first()
db.session.delete(stmt)
db.session.commit()
def list_tag(account_id):
return db.session.query(Tag.id ,Tag.tName ,Tag.tgr_id ,Tag.isBlnc ,Tag.inSum ,Tag.chart1 ,Tag.chart2 ,Tag.chart3)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)\
.order_by(Tag.tgr_id, Tag.id)
def list_tag_id(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_of_group(grpid,account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.tgr_id==grpid, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_tag_id_inSum(account_id):
q = db.session.query(Tag.id)\
.outerjoin(Taggroup)\
.filter(Tag.inSum==True, Taggroup.acc_id==account_id)
return [val for val, in q]
def list_count(account_id):
return db.session.query(db.func.count(Tag.id))\
.outerjoin(Taggroup)\
.filter(Taggroup.acc_id==account_id).scalar()
class Condition(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
cName = db.Column(db.String, nullable=False)
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id'), nullable=False)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.cName)
def insert_cond(cname, tag, accid):
stmt = Condition(cName=cname, tag_id=tag, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_cond(cid, cName, tag):
stmt = Condition.query.filter_by(id=cid).first()
stmt.cName = cName
stmt.tag_id = tag
db.session.commit()
def delete_cond(cid):
stmt = Condition.query.filter_by(id=cid).first()
db.session.delete(stmt)
db.session.commit()
def list_cond(account_id):
return db.session.query(Condition.id, Condition.cName, Condition.tag_id)\
.outerjoin(Tag, Condition.tag_id == Tag.id)\
.filter(Condition.acc_id == account_id)\
.order_by(Tag.tgr_id, Condition.tag_id, Condition.id)
def list_count(account_id):
return db.session.query(db.func.count(Condition.id)).filter(Condition.acc_id==account_id).scalar()
class Description(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
descfrom = db.Column(db.String, nullable=False)
descto = db.Column(db.String, nullable=True)
acc_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
def __repr__(self):
return '<Condition {}>'.format(self.descfrom)
def insert_desc(descfrom, descto, accid):
stmt = Description(descfrom=descfrom, descto=descto, acc_id=accid)
db.session.add(stmt)
db.session.commit()
def update_desc(id, descfrom, descto):
stmt = Description.query.filter_by(id=id).first()
stmt.descfrom = descfrom
stmt.descto = descto
db.session.commit()
def delete_desc(id):
stmt = Description.query.filter_by(id=id).first()
db.session.delete(stmt)
db.session.commit()
def list_desc(account_id):
return Description.query.filter(Description.acc_id == account_id).order_by(Description.descfrom)
def list_count(account_id):
return db.session.query(db.func.count(Description.id)).filter(Description.acc_id==account_id).scalar()
#create all tables based on models above
with app.app_context():
db.create_all()
| StarcoderdataPython |
1897419 | """Sphinx demo."""
from pathlib import Path
import sys
if sys.platform == 'win32':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
__version__ = '0.0.2'
__version_full__ = __version__
def get_html_theme_path():
"""
Return path to Sphinx templates folder.
"""
parent = Path(__file__).parent.resolve()
theme_path = parent / "themes" / "xyzstyle"
return theme_path
def get_html_template_path():
theme_dir = get_html_theme_path()
return theme_dir/"_templates"
def update_context(app, pagename, templatename, context, doctree):
context["xyzstyle_version"] = __version_full__
def setup(app):
theme_dir = get_html_theme_path()
app.add_html_theme("xyzstyle", str(theme_dir))
app.connect("html-page-context", update_context)
template_path = get_html_template_path()
app.config.templates_path.append(str(template_path))
return {
"version": __version_full__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| StarcoderdataPython |
3384153 | <filename>src/aptsources_cleanup/util/zipfile.py
# -*- coding: utf-8
from . import strings
from . import collections
from .itertools import filterfalse
import sys
import os
import stat
import errno
import functools
from zipfile import *
import zipfile as _zipfile
__all__ = _zipfile.__all__
try:
from os import fspath
except ImportError:
def fspath(path, *, _str_types=(str, bytes)):
if isinstance(path, _str_types):
return path
path = path.__fspath__()
if not isinstance(path, _str_types):
raise TypeError(str(type(path)))
return path
class ZipFile(_zipfile.ZipFile):
"""Extends zipfile.ZipFile with in-archive resolution of symbolic links"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_path_value = None
def getinfo(self, name, pwd=None, *, follow_symlinks=False,
fail_missing=True
):
if follow_symlinks:
return self._resolve_path(name, pwd, fail_missing)
if isinstance(name, ZipInfo):
return name
name = fspath(name)
return self._check_missing(self.NameToInfo.get(name), name, fail_missing)
def open(self, path, mode='r', pwd=None, *, follow_symlinks=False,
fail_missing=True, **kwargs
):
path = self.getinfo(
path, pwd, follow_symlinks=follow_symlinks, fail_missing=fail_missing)
return path and super().open(path, mode, pwd, **kwargs)
def read(self, path, pwd=None, *, follow_symlinks=True, fail_missing=True):
path = self.getinfo(
path, pwd, follow_symlinks=follow_symlinks, fail_missing=fail_missing)
return path and super().read(path, pwd)
def extract(self, member, path=None, pwd=None, *, follow_symlinks=False,
fail_missing=True
):
member = self.getinfo(member, pwd, follow_symlinks, fail_missing)
success = member is not None
if success:
super().extract(member, path, pwd)
return success
def _resolve_path(self, path, pwd, fail_missing):
if isinstance(path, ZipInfo):
path = path.filename
else:
path = fspath(path)
inspected = []
uninspected = path.split(os.sep)
uninspected.reverse()
seen_set = collections.ExtSet()
c_info = None
while uninspected:
c_info = self._resolve_path_component(
inspected, uninspected, pwd, seen_set)
return self._check_missing(c_info, path, fail_missing)
def _resolve_path_component(self, inspected, uninspected, pwd, seen_set):
c = uninspected.pop()
#_eprintf('_resolve_path_component(): {!r}, {!r}, {!r}', inspected, c, uninspected)
if not c or c == os.curdir:
return None
if c == os.pardir:
if not inspected:
uninspected.append(c)
uninspected.reverse()
raise self._OSError(
errno.EINVAL, 'Path points outside of this archive',
os.sep.join(uninspected))
inspected.pop()
return None
inspected.append(c)
c_full = os.sep.join(inspected)
c_info = self.NameToInfo.get(c_full)
if c_info is None or not stat.S_ISLNK(c_info.external_attr >> 16):
if self.debug >= 2:
_eprintf('{:s}: {!r}',
('Not a symlink', 'Does not exist')[c_info is None],
':'.join((self.filename, c_full)))
return c_info
if len(c_full) - len(c) + c_info.file_size > self._max_path:
raise self._OSError(errno.ENAMETOOLONG, None, c_full)
c_seen = resolved = not seen_set.add(c_full)
if c_info.file_size == 0:
resolved = ''
elif not c_seen:
resolved = strings.prefix(os.fsdecode(super().read(c_info, pwd)), '\0')
if not resolved:
raise self._OSError(
errno.EINVAL, 'Empty symbolic link in archive', c_full)
if c_seen:
raise self._OSError(errno.ELOOP, None, c_full)
if self.debug >= 2:
_eprintf('Found symbolic link: {!r} => {!r}',
':'.join((self.filename, c_full)), resolved)
inspected.pop()
uninspected.extend(reversed(resolved.split(os.sep)))
return c_info
def _check_missing(self, info, path, fail_missing):
if info is None and fail_missing:
raise KeyError(
'There is no item named {!r} in the archive {!r}'
.format(path, self.filename))
return info
@property
def _max_path(self):
val = self._max_path_value
if val is None:
fileno = getattr(self.fp, 'fileno', None)
fileno = os.curdir if fileno is None else fileno()
self._max_path_value = val = os.pathconf(fileno, 'PC_PATH_MAX')
return val
def _OSError(self, err, msg=None, filename=None, filename2=None):
if filename is None:
filename = self.filename
else:
filename = ':'.join((self.filename, filename))
err = OSError(err, msg or os.strerror(err), filename)
err.filename2 = filename2
return err
def _eprintf(fmt, *args):
return print(fmt.format(*args), file=sys.stderr)
def _parse_args(args):
import argparse
class ProxyFunction:
def __init__(self, fun, name=None):
self._fun = fun
self.__name__ = name or fun.__name__
def __call__(self, *args):
return self._fun(*args)
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.exit(2,
'{:s}Error: {:s}\nPlease use the options "-h" or "--help" for more '
'detailled usage info.\n'
.format(self.format_usage(), message))
ap = ArgumentParser(
description='Show symbolic link targets inside ZIP archives.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
ap.add_argument('archive',
type=argparse.FileType('rb'),
help='Path to a ZIP archive')
ap.add_argument('paths', nargs='+',
help='Archive member paths to inspect')
ap.add_argument('-L', '--follow-symlinks', metavar='N',
type=ProxyFunction(lambda s: int(s) > 0, int.__name__), default=1,
help='Follow symbolic links during archive member inspection if N > 0.')
ap.add_argument('-h', '--help', dest=argparse.SUPPRESS,
action='help', help=argparse.SUPPRESS)
apdg = ap.add_mutually_exclusive_group()
apdg.add_argument('-d', dest='debug',
action='count', default=0,
help='Increase debugging level by 1. Can be specified multiple times.')
apdg.add_argument('--debug', dest='debug',
metavar='N', type=int, default=0,
help='Set debugging level directly.')
return ap.parse_args(args)
def _main(args=None):
args = _parse_args(args)
with args.archive, ZipFile(args.archive) as archive:
archive.debug = args.debug
getinfo = functools.partial(ZipFile.getinfo, archive,
follow_symlinks=args.follow_symlinks, fail_missing=False)
for path in args.path:
resolved_info = getinfo(path)
if resolved_info is not None:
print('{:s}: {!r} => {!r}'.format(
archive.filename, path, resolved_info.filename))
else:
_eprintf(
'{:s}: {!r} => No such archive entry or dangling symbolic link',
archive.filename, path)
if __name__ == '__main__':
_main()
| StarcoderdataPython |
8174486 | #!/usr/bin/env python2.7
# pylint: disable=bad-indentation, no-member, invalid-name, line-too-long
import os
import shutil
import random
import argparse
import multiprocessing
import cv2
import lmdb
import caffe
import numpy as np
from jfda.config import cfg
from jfda.utils import load_wider, load_celeba
from jfda.utils import get_logger, crop_face
from jfda.detector import JfdaDetector
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
from bbox import bbox_overlaps
logger = get_logger()
G8 = 8*1024*1024*1024
G16 = 2*G8
G24 = 3*G8
G32 = 4*G8
def fill_queues(data, qs):
data_n = len(data)
queue_n = len(qs)
for i in range(len(data)):
qs[i%queue_n].put(data[i])
def remove_if_exists(db):
if os.path.exists(db):
logger.info('remove %s'%db)
shutil.rmtree(db)
def get_detector():
nets = cfg.PROPOSAL_NETS[cfg.NET_TYPE]
if nets is None or not cfg.USE_DETECT:
detector = None
else:
if cfg.GPU_ID >= 0:
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.set_mode_cpu()
detector = JfdaDetector(nets)
return detector
# =========== region proposal =============================
def sliding_windows(x, y, width, height, kw, kh, sw, sh):
'''given a region (x, y, width, height), return sliding window locations (x1, y1, x2, y2)
x, y: region top left position
width, height: region width and height
kw, kh: window width and height
sw, sh: stride width and height
'''
xs = np.arange(0, width-kw, sw)
ys = np.arange(0, height-kh, sh)
xs, ys = np.meshgrid(xs, ys)
xy = np.vstack([xs.ravel(), ys.ravel()]).transpose()
wh = np.array([kw, kh])
bbox = np.hstack([xy, np.tile(wh, (len(xy), 1))])
bbox[:, 0] += x
bbox[:, 1] += y
bbox[:, 2] += bbox[:, 0]
bbox[:, 3] += bbox[:, 1]
return bbox.astype(np.float32)
def proposal(img, gt_bboxes, detector=None):
'''given an image with face bboxes, proposal negatives, positives and part faces
for rNet and oNet, we use previous networks to proposal bboxes
Return
(negatives, positives, part)
negatives: [data, bbox]
positives: [(data, bbox, bbox_target)]
part: [(data, bbox, bbox_target)]
'''
# ======================= proposal for rnet and onet ==============
if detector is not None:
assert isinstance(detector, JfdaDetector)
bboxes = detector.detect(img, **cfg.DETECT_PARAMS)
# # maybe sort it by score in descending order
# bboxes = bboxes[bboxes[:, 4].argsort()[::-1]]
# keep bbox info, drop score, offset and landmark
bboxes = bboxes[:, :4]
ovs = bbox_overlaps(bboxes, gt_bboxes)
ovs_max = ovs.max(axis=1)
ovs_idx = ovs.argmax(axis=1)
pos_idx = np.where(ovs_max > cfg.FACE_OVERLAP)[0]
neg_idx = np.where(ovs_max < cfg.NONFACE_OVERLAP)[0]
part_idx = np.where(np.logical_and(ovs_max > cfg.PARTFACE_OVERLAP, ovs_max <= cfg.FACE_OVERLAP))[0]
# pos
positives = []
for idx in pos_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('pos', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
positives.append((data, bbox, bbox_target))
# part
part = []
for idx in part_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
part.append((data, bbox, bbox_target))
# neg
negatives = []
np.random.shuffle(neg_idx)
for idx in neg_idx[:cfg.NEG_DETECT_PER_IMAGE]:
bbox = bboxes[idx].reshape(4)
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('neg', data)
# cv2.waitKey()
negatives.append((data, bbox))
return negatives, positives, part
# ======================= proposal for pnet =======================
height, width = img.shape[:-1]
negatives, positives, part = [], [], []
# ===== proposal positives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_positives = []
for scale in cfg.POS_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.POS_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
pos_bboxes = candidates[ovs > cfg.FACE_OVERLAP, :]
if len(pos_bboxes) > 0:
np.random.shuffle(pos_bboxes)
for bbox in pos_bboxes[:cfg.POS_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('positive', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_positives.append((data, bbox, bbox_target))
random.shuffle(this_positives)
positives.extend(this_positives[:cfg.POS_PER_FACE])
# ===== proposal part faces =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_part = []
for scale in cfg.PART_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.PART_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
part_bboxes = candidates[np.logical_and(ovs > cfg.PARTFACE_OVERLAP, ovs <= cfg.FACE_OVERLAP), :]
if len(part_bboxes) > 0:
np.random.shuffle(part_bboxes)
for bbox in part_bboxes[:cfg.PART_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_part.append((data, bbox, bbox_target))
random.shuffle(this_part)
part.extend(this_part[:cfg.POS_PER_FACE])
# ===== proposal negatives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_negatives = []
for scale in cfg.NEG_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.NEG_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bboxes)
neg_bboxes = candidates[ovs.max(axis=1) < cfg.NONFACE_OVERLAP, :]
if len(neg_bboxes) > 0:
np.random.shuffle(neg_bboxes)
for bbox in neg_bboxes[:cfg.NEG_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('negative', data)
# cv2.waitKey()
this_negatives.append((data, bbox))
random.shuffle(this_negatives)
negatives.extend(this_negatives[:cfg.NEG_PER_FACE])
# negatives from global image random crop
max_num_from_fr = int(cfg.NEG_PER_IMAGE * cfg.NEG_FROM_FR_RATIO)
if len(negatives) > max_num_from_fr:
random.shuffle(negatives)
negatives = negatives[:max_num_from_fr]
bbox_neg = []
range_x, range_y = width - cfg.NEG_MIN_SIZE, height - cfg.NEG_MIN_SIZE
for i in xrange(cfg.NEG_PROPOSAL_RATIO * cfg.NEG_PER_IMAGE):
x1, y1 = np.random.randint(range_x), np.random.randint(range_y)
w = h = np.random.randint(low=cfg.NEG_MIN_SIZE, high=min(width-x1, height-y1))
x2, y2 = x1 + w, y1 + h
bbox_neg.append([x1, y1, x2, y2])
if x2 > width or y2 > height:
print 'hhhh'
bbox_neg = np.asarray(bbox_neg, dtype=gt_bboxes.dtype)
ovs = bbox_overlaps(bbox_neg, gt_bboxes)
bbox_neg = bbox_neg[ovs.max(axis=1) < cfg.NONFACE_OVERLAP]
np.random.shuffle(bbox_neg)
if not cfg.NEG_FORCE_BALANCE:
remain = cfg.NEG_PER_IMAGE - len(negatives)
else:
# balance ratio from face region and global crop
remain = len(negatives) * (1. - cfg.NEG_FROM_FR_RATIO) / cfg.NEG_FROM_FR_RATIO
remain = int(remain)
bbox_neg = bbox_neg[:remain]
# for bbox in bbox_neg:
# x1, y1, x2, y2 = bbox
# x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# cv2.imshow('neg', img)
# cv2.waitKey()
for bbox in bbox_neg:
data = crop_face(img, bbox)
negatives.append((data, bbox))
return negatives, positives, part
# =========== WIDER ================
def gen_wider():
logger.info('loading WIDER')
train_data, val_data = load_wider()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
train_faces = reduce(lambda acc, x: acc + len(x[1]), train_data, 0)
val_faces = reduce(lambda acc, x: acc + len(x[1]), val_data, 0)
logger.info('total faces, train: %d, val: %d', train_faces, val_faces)
def gen(data, db_names):
for db_name in db_names: remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=wider_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=wider_writer_func, args=(q_out, db_names))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
db_names = ['data/%snet_positive_train'%cfg.NET_TYPE,
'data/%snet_negative_train'%cfg.NET_TYPE,
'data/%snet_part_train'%cfg.NET_TYPE]
gen(train_data, db_names)
logger.info('writing val data, %d images', len(val_data))
db_names = ['data/%snet_positive_val'%cfg.NET_TYPE,
'data/%snet_negative_val'%cfg.NET_TYPE,
'data/%snet_part_val'%cfg.NET_TYPE]
gen(val_data, db_names)
def wider_reader_func(q_in, q_out):
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter % 1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bboxes = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
negatives, positives, part = proposal(img, bboxes, detector)
for data, _ in negatives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
q_out.put(('negative', [data]))
for data, _, bbox_target in positives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('positive', [data, bbox_target]))
for data, _, bbox_target in part:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('part', [data, bbox_target]))
def wider_writer_func(q_out, db_names):
db_pos = lmdb.open(db_names[0], map_size=G16)
db_neg = lmdb.open(db_names[1], map_size=G16)
db_part = lmdb.open(db_names[2], map_size=G16)
txn_pos = db_pos.begin(write=True)
txn_neg = db_neg.begin(write=True)
txn_part = db_part.begin(write=True)
idx_pos, idx_neg, idx_part = 0, 0, 0
q_pos, q_neg, q_part = [], [], []
def fill(txn, items, idx, has_bbox=True):
random.shuffle(items)
for item in items:
data_key = '%08d_data'%idx
txn.put(data_key, item[0])
if has_bbox:
bbox_key = <KEY>
txn.put(bbox_key, item[1])
idx += 1
return idx
counter = 0
pos_counter, neg_counter, part_counter = 0, 0, 0
while True:
stat, item = q_out.get()
counter += 1
if counter % 10000 == 0:
logger.info('writes %d positives, %d negatives, %d part', pos_counter, neg_counter, part_counter)
if stat == 'positive':
pos_counter += 1
q_pos.append(item)
if len(q_pos) >= cfg.SHUFFLE_SIZE:
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
q_pos = []
elif stat == 'negative':
neg_counter += 1
q_neg.append(item)
if len(q_neg) >= cfg.SHUFFLE_SIZE:
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
q_neg = []
elif stat == 'part':
part_counter += 1
q_part.append(item)
if len(q_part) >= cfg.SHUFFLE_SIZE:
idx_part = fill(txn_part, q_part, idx_part, True)
q_part = []
else:
# stat == 'finish'
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
txn_pos.put('size', str(idx_pos))
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
txn_neg.put('size', str(idx_neg))
idx_part = fill(txn_part, q_part, idx_part, True)
txn_part.put('size', str(idx_part))
break
txn_pos.commit()
txn_neg.commit()
txn_part.commit()
db_pos.close()
db_neg.close()
db_part.close()
logger.info('Finish')
# =========== CelebA ===============
def gen_celeba():
logger.info('loading CelebA')
train_data, val_data = load_celeba()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
def gen(data, db_name):
remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=celeba_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=celeba_writer_func, args=(q_out, db_name))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
gen(train_data, 'data/%snet_landmark_train'%cfg.NET_TYPE)
logger.info('writing val data, %d images', len(val_data))
gen(val_data, 'data/%snet_landmark_val'%cfg.NET_TYPE)
def celeba_reader_func(q_in, q_out):
def vertify_bbox(bbox, landmark):
return True
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter%1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bbox, landmark = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
bbox = np.asarray(bbox, dtype=np.float32).reshape((1, -1))
_1, bboxes, _2 = proposal(img, bbox, detector)
np.random.shuffle(bboxes)
for data, bbox, _ in bboxes[:cfg.LANDMARK_PER_FACE]:
# make sure landmark points are in bbox
landmark1 = landmark.reshape((-1, 2)).copy()
if not vertify_bbox(bbox, landmark1):
continue
# # debug
# img1 = img.copy()
# x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
# cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 0, 255), 2)
# for x, y in landmark1:
# x, y = int(x), int(y)
# cv2.circle(img1, (x, y), 2, (0, 255, 0), -1)
# cv2.imshow('landmark', img1)
# cv2.waitKey(0)
# normalize landmark
w, h = bbox[2]-bbox[0], bbox[3]-bbox[1]
landmark1[:, 0] = (landmark1[:, 0] - bbox[0]) / w
landmark1[:, 1] = (landmark1[:, 1] - bbox[1]) / h
landmark1 = landmark1.reshape(-1)
# format data
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
landmark1 = landmark1.astype(np.float32).tostring() # float32
q_out.put(('data', [data, landmark1]))
def celeba_writer_func(q_out, db_name):
map_size = G16
db = lmdb.open(db_name, map_size=map_size)
counter = 0
with db.begin(write=True) as txn:
while True:
stat, item = q_out.get()
if stat == 'finish':
txn.put('size', str(counter))
break
data, landmark = item
data_key = '%08d_data'%counter
landmark_key = '%08d_landmark'%counter
txn.put(data_key, data)
txn.put(landmark_key, landmark)
counter += 1
if counter%1000 == 0:
logger.info('writes %d landmark faces', counter)
db.close()
logger.info('Finish')
def test():
os.system('rm -rf tmp/pos/*')
os.system('rm -rf tmp/neg/*')
os.system('rm -rf tmp/part/*')
logger.info('Load WIDER')
train_data, val_data = load_wider()
img_path, bboxes = train_data[np.random.choice(len(train_data))]
bboxes = np.asarray(bboxes)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
detector = JfdaDetector(cfg.PROPOSAL_NETS['r'])
negatives, positives, part = proposal(img, bboxes, detector)
logger.info('%d gt_bboxes', len(bboxes))
logger.info('%d negatives, %d positives, %d part', len(negatives), len(positives), len(part))
for i, (data, bbox_target) in enumerate(positives):
cv2.imwrite('tmp/pos/%03d.jpg'%i, data)
for i, (data) in enumerate(negatives):
cv2.imwrite('tmp/neg/%03d.jpg'%i, data)
for i, (data, bbox_target) in enumerate(part):
cv2.imwrite('tmp/part/%03d.jpg'%i, data)
cv2.imwrite('tmp/test.jpg', img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--net', type=str, default='p', help='net type')
parser.add_argument('--celeba', action='store_true', help='generate face data')
parser.add_argument('--wider', action='store_true', help='generate landmark data')
parser.add_argument('--gpu', type=int, default=0, help='gpu device')
parser.add_argument('--detect', action='store_true', help='use previous network detection')
parser.add_argument('--worker', type=int, default=8, help='workers to process the data')
parser.add_argument('--test', action='store_true', help='just simple test')
args = parser.parse_args()
cfg.GPU_ID = args.gpu
cfg.NET_TYPE = args.net
cfg.USE_DETECT = args.detect
cfg.WORKER_N = args.worker
if args.test:
test()
if args.wider:
gen_wider()
if args.celeba:
gen_celeba()
| StarcoderdataPython |
3431939 | <reponame>JohnOmernik/jupyter_mssql<filename>mssql_core/mssql_base.py<gh_stars>0
#!/usr/bin/python
# Base imports for all integrations, only remove these at your own risk!
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
import requests
from integration_core import Integration
from pyodbc_core import Pyodbc
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
import jupyter_integrations_utility as jiu
# Put any additional imports specific to your integration here:
import pyodbc as po
@magics_class
class Mssql(Pyodbc):
# Static Variables
# The name of the integration
# The class name (Start) should be changed to match the name_str, but with the first letter upper cased.
name_str = "mssql"
instances = {}
# These are the ENV variables the integration will check when starting up. The integration_base prefix will be prepended in checking (that defaults to JUPYTER_)
# So the following two items will look for:
# JUPYTER_START_BASE_URL and put it into the opts dict as start_base_url
# JUPYTER_START_USER as put it in the opts dict as start_user
custom_evars = ["mssql_conn_default"]
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# The three examples here would be "start_base_url, start_ignore_ssl_warn, and start_verbose_errors
# Make sure these are defined in myopts!
custom_allowed_set_opts = ["mssql_conn_default"]
# These are the custom options for your integration
myopts = {}
# These are the custom options for your integration
myopts = {}
myopts['mssql_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
myopts['mssql_conn_default'] = ["default", 'Default instance name for connections']
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Impala, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
# Overriding Custom Query to handle thrift errors and auto matic resubmit
def customQuery(self, query, instance):
mydf = None
status = ""
resubmit = False
try:
self.session.execute(query)
mydf = self.as_pandas_DataFrame()
if mydf is not None:
status = "Success"
else:
status = "Success - No Results"
except Exception as e:
mydf = None
str_err = str(e)
if self.debug:
print("Error: %s" % str_err)
if str_err.find("Impala Thrift API") >= 0 and str_err.find("SSL_write: bad write retry") >= 0:
if resubmit == False:
# This is an old connection, let's just resubmit it (once)
print("SSL_write Thrift error detected - Likely Stale Connection - Attempting 1 retry")
try:
resubmit = True # First we make sure we only resubmit once
self.session.execute(query)
mydf = self.as_pandas_DataFrame()
if mydf is not None:
status = "Success"
else:
status = "Success - No Results"
except Exception as e1:
mydf = None
str_err1 = str(e1)
final_err = "First Run: %s\nSecond Run: %s" % (str_err, str_err1)
if self.debug:
print("Second Run Error: %s" % str_err1)
status = "Failure - query_error: " % final_err
else:
status = "Failure - query_error: " + str_err
return mydf, status
# def customDisconnect - In pyodbc
# def customAuth - In pyodbc
# def validateQuery - In pyodbc
# def customQuery - In pyodbc
# def customHelp - In pyodbc
def retCustomDesc(self):
return "Jupyter integration for working with MSSQL via PyODBC based data sources"
# This is the magic name.
@line_cell_magic
def mssql(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
| StarcoderdataPython |
6402496 | <reponame>Chenglin-Yang/PatchAttack<filename>PatchAttack/utils.py
import os
import time
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as Transforms
import torchvision.models as Models
import torchvision.datasets as Datasets
from torch.utils.data import DataLoader
# global variables
eps = np.finfo(np.float32).eps.item()
torch_cuda = 0
class data_agent():
# common transformations
normalize = Transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
inv_normalize = Transforms.Normalize(mean=[-(0.485)/0.229, -(0.456)/0.224, -(0.406)/0.225],
std=[1/0.229, 1/0.224, 1/0.225])
process_PIL = Transforms.Compose([Transforms.Resize((224, 224)),
Transforms.ToTensor(),
normalize])
def __init__(self, ImageNet_train_dir, ImageNet_val_dir,
data_name='ImageNet', train_transform=None, val_transform=None,
):
self.data_name = data_name
self.ImageNet_train_dir = ImageNet_train_dir
self.ImageNet_val_dir = ImageNet_val_dir
if self.data_name == 'ImageNet':
if train_transform:
train_dataset = Datasets.ImageFolder(
root=self.ImageNet_train_dir,
transform=train_transform,
)
else:
train_dataset = Datasets.ImageFolder(
root=self.ImageNet_train_dir,
transform=Transforms.Compose([
Transforms.RandomResizedCrop(224),
Transforms.RandomHorizontalFlip(),
Transforms.ToTensor(),
self.normalize,
])
)
if val_transform:
val_dataset = Datasets.ImageFolder(
root=self.ImageNet_val_dir,
transform=val_transform,
)
else:
val_dataset = Datasets.ImageFolder(
root=self.ImageNet_val_dir,
transform=Transforms.Compose([
Transforms.Resize(256),
Transforms.CenterCrop(224),
Transforms.ToTensor(),
self.normalize,
])
)
self.train_dataset = train_dataset
self.val_dataset = val_dataset
# easy to update the loaders and save memory
self.train_loader = None
self.val_loader = None
print('Your {} dataset has been prepared, please remember to update the loaders with the batch size'
.format(self.data_name))
def update_loaders(self, batch_size):
self.batch_size = batch_size
train_loader = DataLoader(
dataset=self.train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=12,
pin_memory=True,
)
val_loader = DataLoader(
dataset=self.val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=12,
pin_memory=True,
)
# use del for safety
del self.train_loader
self.train_loader = train_loader
del self.val_loader
self.val_loader = val_loader
print('Your {0} dataloaders have been updated with batch size {1}'
.format(self.data_name, self.batch_size))
def get_indices(self, label, save_dir, correct=False, cnn=None,
train=True, process_PIL=process_PIL):
'''
input:
label: int
correct: flag to return the indices of the data point which is crrectly classified by the cnn
cnn: pytorch model
[old]model name, which model to use to justify whether the data points are correclty classified
[old]change from string to torch model in the function
process_PIL: transform used in the 'correct' mode
return:
torch.tensor containing the indices in the self.train_dataset or self.val_dataset,
or custom dataset when in 'correct' mode
'''
if not os.path.exists(save_dir):
os.makedirs(save_dir)
file_name = os.path.join(save_dir, 'label_{}_train-set_{}_correct_{}.pt'.format(label, train, correct))
if os.path.exists(file_name):
indices = torch.load(file_name)
return indices
else:
if train:
targets_tensor = torch.Tensor(self.train_dataset.targets)
else:
targets_tensor = torch.Tensor(self.val_dataset.targets)
temp = torch.arange(len(targets_tensor))
indices = temp[targets_tensor==label]
if correct:
cnn = cnn.cuda(torch_cuda).eval()
if train:
temp_dataset = Datasets.ImageFolder(
root=self.ImageNet_train_dir,
transform=process_PIL,
)
else:
temp_dataset = Datasets.ImageFolder(
root=self.ImageNet_val_dir,
transform=process_PIL,
)
with torch.no_grad():
wrong_set = []
label_tensor = torch.Tensor([label]).long().cuda(torch_cuda)
for index in indices:
input_tensor = temp_dataset.__getitem__(index)[0]
input_tensor = input_tensor.cuda(torch_cuda).unsqueeze(0)
output_tensor = cnn(input_tensor)
if output_tensor.argmax() != label_tensor:
wrong_set.append(index)
for item in wrong_set:
indices = indices[indices!=item]
torch.save(indices, file_name)
return indices
@staticmethod
def show_image_from_tensor(img, inv=False, save_dir=None, dpi=300, tight=True):
'''
inv: flag to recover the nomalization transformation on images from ImageNet
'''
if img.dim() == 4:
assert img.size(0) == 1, 'this function currently supports showing single image'
img = img.squeeze(0)
print('The batch dimension has been squeezed')
if inv:
img = data_agent.inv_normalize(img)
npimg = img.cpu().numpy()
#fig = plt.figure(figsize = (5, 15))
fit = plt.figure()
if len(npimg.shape) == 2:
print('It is a gray image')
plt.imshow(npimg, cmap='gray')
else:
plt.imshow(np.transpose(npimg,(1,2,0)))
#plt.show()
if save_dir is not None:
if tight:
plt.xticks([])
plt.yticks([])
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.savefig(fname=save_dir,
dpi=dpi, facecolor='w', edgecolor='w', format='png')
@staticmethod
def save_with_content(path, image, dpi=300):
'''
image: numpy image with shape (h, w, c)
'''
fig = plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(image)
plt.savefig(path, dpi=dpi, bbox_inches='tight', pad_inches=0)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
'''
This function comes from
https://github.com/bearpaw/pytorch-classification/blob/master/utils/eval.py
'''
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | StarcoderdataPython |
3249217 | # Copyright (c) 2016 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Team(object):
'''This class will hold a brief team description and the members of the each
respective team along with their sets.
'''
def __init__(self, team_description, team_members, team_sets):
'''(Team, str, [str], {str:str}) -> None
Creates a basic team
'''
self._team_description = team_description
self._team_members = team_members
self._team_sets = team_sets
# we do this to help with the hashing
self._team_members.sort()
def __hash__(self):
'''(Team) -> None
Uses pythons built in hash function to make a hash for the team
'''
return hash(str(team_members))
def __str__(self):
''' (Team) -> None
Returns a string representaiton of everything
'''
ret = 'Team Description:\n'
ret += str(self._team_description) + '\n'
ret += 'Team Sets:\n'
for i in range(len(self._team_members)):
ret += str(self._team_sets[self._team_members[i]]) + '\n'
return ret
# getters and setters
def getTeamDescription(self):
return self._team_description
def setTeamDescription(self, team_description):
self._team_description = team_description
def getTeamMembers(self):
return self._team_members
def setTeamMembers(self, team_members):
self._team_members = _team_members
def getTeamSets(self, team_sets):
self._team_sets = team_sets
# debugging goes on here
if __name__ == '__main__':
sample_set = 'Azumarill @ Choice Band \n \
Ability: Huge Power \n \
EVs: 172 HP / 252 Atk / 84 Spe \n \
Adamant Nature \n \
- Play Rough \n \
- Waterfall \n \
- Aqua Jet\n \
- Superpower'
team_description = 'testing team'
team_members = ['azumarill', 'charizard']
team_sets = {'azumarill': sample_set, 'charizard': 'dragon claw'}
myteam = Team(team_description, team_members, team_sets)
database = {myteam: 1}
print(myteam)
| StarcoderdataPython |
6449383 | class Add:
def add(self, n1, n2, n3=0):
print("add of Add")
return n1 + n2 + n3
class Add2:
def add(self, n1, n2, n3=0):
print("add of Add2")
return n1 * n2
# 부모들 중 같은 시그너쳐의 메서드가 있다면 먼저 상속선언된 부모의 메서드가 우선시 되는 것 같다
class Calculator(Add2, Add):
def sub(self, n1, n2):
return n1 - n2
o = Calculator()
print(o.add(1, 2))
print(o.sub(1, 2))
# print(o.add(1, 2, 3))
| StarcoderdataPython |
6410027 | <filename>storm_control/sc_library/datareader.py
#!/usr/bin/env python
"""
Classes that handles reading STORM movie files. This is used by
the Steve program and it assumes the existance of an XML file
that describes everything that one needs to know about a movie.
Hazen 07/15
"""
#
# FIXME: Why not just use the version if the storm-analysis project?
#
# Or maybe only support the .dax format as Steve is limited
# to whatever HALs current filetype is anyway?
#
import numpy
import os
from PIL import Image
import re
import storm_control.sc_library.parameters as parameters
def infToXmlObject(filename):
"""
Creates a StormXMLObject from a .inf file that can be
used by Steve. Note that this object is missing many
of the properties of the standard object created from
a setting xml file.
"""
xml = parameters.StormXMLObject([])
# Mark as "fake".
xml.set("faked_xml", True)
# Add acquisition sub-object.
xml.set("acquisition", parameters.StormXMLObject([]))
xml.set("acquisition.camera", "camera1")
# Add camera1 sub-object.
xml.set("camera1", parameters.StormXMLObject([]))
# Add film sub-object.
xml.set("film", parameters.StormXMLObject([]))
# Add mosaic sub-object.
xml.set("mosaic", parameters.StormXMLObject([]))
# Figure out movie type.
no_ext_name = os.path.splitext(filename)[0]
if os.path.exists(no_ext_name + ".dax"):
xml.set("film.filetype", ".dax")
elif os.path.exists(no_ext_name + ".spe"):
xml.set("film.filetype", ".spe")
elif os.path.exists(no_ext_name + ".tif"):
xml.set("film.filetype", ".tif")
else:
raise IOError("only .dax, .spe and .tif are supported (case sensitive..)")
# Extract the movie information from the associated inf file.
size_re = re.compile(r'frame dimensions = ([\d]+) x ([\d]+)')
length_re = re.compile(r'number of frames = ([\d]+)')
endian_re = re.compile(r' (big|little) endian')
stagex_re = re.compile(r'Stage X = ([\d\.\-]+)')
stagey_re = re.compile(r'Stage Y = ([\d\.\-]+)')
scalemax_re = re.compile(r'scalemax = ([\d\.\-]+)')
scalemin_re = re.compile(r'scalemin = ([\d\.\-]+)')
parameters_re = re.compile(r'parameters file = (.+)')
with open(filename) as fp:
for line in fp:
m = size_re.match(line)
if m:
xml.set("camera1.y_pixels", int(m.group(1)))
xml.set("camera1.x_pixels", int(m.group(2)))
m = length_re.match(line)
if m:
xml.set("acquisition.number_frames", int(m.group(1)))
m = endian_re.search(line)
if m:
if (m.group(1) == "big"):
xml.set("film.want_big_endian", True)
else:
xml.set("film.want_big_endian", False)
m = stagex_re.match(line)
if m:
stage_x = float(m.group(1))
m = stagey_re.match(line)
if m:
stage_y = float(m.group(1))
m = scalemax_re.match(line)
if m:
xml.set("camera1.scalemax", int(m.group(1)))
m = scalemin_re.match(line)
if m:
xml.set("camera1.scalemin", int(m.group(1)))
m = parameters_re.match(line)
if m:
xml.set("parameters_file", m.group(1))
pos_string = "{0:.2f},{1:.2f},0.00".format(stage_x, stage_y)
xml.set("acquisition.stage_position", pos_string)
return xml
def reader(filename):
"""
Returns the appropriate object based on the file type as
saved in the corresponding XML file.
"""
no_ext_name = os.path.splitext(filename)[0]
# Look for XML file.
if os.path.exists(no_ext_name + ".xml"):
xml = parameters.parameters(no_ext_name + ".xml", recurse = True)
# If it does not exist, then create the xml object
# from the .inf file.
#
# FIXME: This is not going to work correctly for films from a multiple
# camera setup where all of the cameras are saving films with
# an extension.
#
elif os.path.exists(no_ext_name + ".inf"):
xml = infToXmlObject(no_ext_name + ".inf")
else:
raise IOError("Could not find an associated .xml or .inf file for " + filename)
file_type = xml.get("film.filetype")
if (file_type == ".dax"):
return DaxReader(filename = filename,
xml = xml)
elif (file_type == ".spe"):
return SpeReader(filename = filename,
xml = xml)
elif (file_type == ".tif"):
return TifReader(filename = filename,
xml = xml)
else:
print(file_type, "is not a recognized file type")
raise IOError("only .dax, .spe and .tif are supported (case sensitive..)")
class DataReader(object):
"""
The superclass containing those functions that
are common to reading a STORM movie file.
Subclasses should implement:
1. __init__(self, filename, verbose = False)
This function should open the file and extract the
various key bits of meta-data such as the size in XY
and the length of the movie.
2. loadAFrame(self, frame_number)
Load the requested frame and return it as numpy array.
"""
def __init__(self, filename = None, xml = None, **kwds):
super().__init__(**kwds)
self.fileptr = False
self.filename = filename
self.xml = xml
#
# FIXME: What was this for? It is likely not that useful anymore
# with multiple camera setups. Now different cameras generate
# files with different extensions. There is only a single
# xml file with the basename, and each camera (at least for
# .dax) only has a very simple .inf file.
#
# This is all going to break unless the setup had a "camera1"
# camera.
#
self.camera = self.xml.get("acquisition.camera", "camera1")
# Close the file on cleanup.
def __del__(self):
self.closeFilePtr()
# Check the requested frame number to be sure it is in range.
def checkFrameNumber(self, frame_number):
if (frame_number < 0):
raise IOError("frame_number must be greater than or equal to 0")
if (frame_number >= self.number_frames):
raise IOError("frame number must be less than " + str(self.number_frames))
# Close the file.
def closeFilePtr(self):
if self.fileptr:
self.fileptr.close()
# Returns the film name.
def filmFilename(self):
return self.filename
# Returns the film parameters.
def filmParameters(self):
return self.xml
# Returns the film size.
def filmSize(self):
return [self.image_width, self.image_height, self.number_frames]
class DaxReader(DataReader):
"""
Dax reader class. This is a Zhuang lab custom format.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.bigendian = self.xml.get("film.want_big_endian", False)
self.image_height = self.xml.get(self.camera + ".y_pixels")
self.image_width = self.xml.get(self.camera + ".x_pixels")
#
# For a long time, HAL was recording the number of frames as a string, so
# we need to make sure this is int or this will cause trouble in Python3.
#
self.number_frames = int(self.xml.get("acquisition.number_frames"))
# open the dax file
self.fileptr = open(self.filename, "rb")
# load a frame & return it as a numpy array
def loadAFrame(self, frame_number):
if self.fileptr:
self.checkFrameNumber(frame_number)
self.fileptr.seek(frame_number * self.image_height * self.image_width * 2)
image_data = numpy.fromfile(self.fileptr, dtype=numpy.uint16, count = self.image_height * self.image_width)
image_data = numpy.transpose(numpy.reshape(image_data, [self.image_width, self.image_height]))
if self.bigendian:
image_data.byteswap(True)
return image_data
class SpeReader(DataReader):
"""
SPE (Roper Scientific) reader class.
"""
# Spe specific initialization.
def __init__(self, **kwds):
super().__init__(**kwds)
# Open the file & read the header.
self.header_size = 4100
self.fileptr = open(self.filename, "rb")
# FIXME: Should check that these match the XML file.
self.fileptr.seek(42)
self.image_width = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(656)
self.image_height = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(1446)
self.number_frames = int(numpy.fromfile(self.fileptr, numpy.uint32, 1)[0])
self.fileptr.seek(108)
image_mode = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
if (image_mode == 0):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.float32
elif (image_mode == 1):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.uint32
elif (image_mode == 2):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.int16
elif (image_mode == 3):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.uint16
else:
print("unrecognized spe image format: ", image_mode)
# load a frame & return it as a numpy array
def loadAFrame(self, frame_number, cast_to_int16 = True):
if self.fileptr:
self.checkFrameNumber(frame_number)
self.fileptr.seek(self.header_size + frame_number * self.image_size)
image_data = numpy.fromfile(self.fileptr, dtype=self.image_mode, count = self.image_height * self.image_width)
if cast_to_int16:
image_data = image_data.astype(numpy.int16)
image_data = numpy.transpose(numpy.reshape(image_data, [self.image_height, self.image_width]))
return image_data
class TifReader(DataReader):
"""
TIF reader class.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.fileptr = False
self.im = Image.open(filename)
self.isize = self.im.size
# FIXME: Should check that these match the XML file.
self.image_width = self.isize[1]
self.image_height = self.isize[0]
self.number_frames = self.xml.get("acquisition.number_frames")
def loadAFrame(self, frame_number, cast_to_int16 = True):
self.checkFrameNumber(frame_number)
self.im.seek(frame_number)
image_data = numpy.array(list(self.im.getdata()))
assert len(image_data.shape) == 1, "not a monochrome tif image."
if cast_to_int16:
image_data = image_data.astype(numpy.int16)
image_data = numpy.transpose(numpy.reshape(image_data, (self.image_width, self.image_height)))
return image_data
#
# The MIT License
#
# Copyright (c) 2013 <NAME>, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| StarcoderdataPython |
1675015 | <reponame>adeogliari/GeekUniversity_Python<filename>s05_estruturas_logicas_e_condicionais/s05_exercicios/s05_exercicio_20.py
"""
20) Dados três valores A, B, C, verificar se eles podem ser valores dos lados de um triângulo e, se forem,
se é um triângulo escaleno, equilátero ou isóscele, considerando os seguintes conceitos:
- O comprimento de cada lado de um triângulo é menor do que a soma dos outros dois lados.
- Chama-se equilátero o triângulo que tem três lados iguais
- Denominam-se isósceles o triângulo que tem o comprimento de dois lados iguais.
- Recebe o nome de escaleno o triângulo que tem os três lados diferentes.
"""
lado_a = float(input('Digite o tamanho do lado A \n'))
lado_b = float(input('Digite o tamanho do lado B \n'))
lado_c = float(input('Digite o tamanho do lado C \n'))
if (lado_a < (lado_b + lado_c)) \
and (lado_b < (lado_a + lado_c)) \
and (lado_c < (lado_a + lado_b)):
if (lado_a == lado_b) and (lado_a == lado_c):
print('Triângulo Equilátero')
elif ((lado_a == lado_b) and (lado_a != lado_c)) \
or ((lado_b == lado_c) and (lado_b != lado_a)) \
or ((lado_c == lado_a) and (lado_c != lado_b)):
print('Triângulo Isósceles')
elif lado_a != lado_b != lado_c:
print('Triângulo Escaleno')
else:
print('Não é um triângulo')
| StarcoderdataPython |
8045541 | """Dyson climate platform."""
import logging
from typing import List, Optional
from libdyson import DysonPureHotCoolLink
from custom_components.dyson_local.utils import environmental_property
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_DIFFUSE,
FAN_FOCUS,
SWING_ON,
SWING_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import Callable, HomeAssistant
from . import DysonEntity
from .const import DATA_DEVICES, DOMAIN
_LOGGER = logging.getLogger(__name__)
HVAC_MODES = [HVAC_MODE_OFF, HVAC_MODE_COOL, HVAC_MODE_HEAT]
FAN_MODES = [FAN_FOCUS, FAN_DIFFUSE]
SWING_MODES = [SWING_ON, SWING_OFF]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
SUPPORT_FLAGS_LINK = SUPPORT_FLAGS | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Dyson climate from a config entry."""
device = hass.data[DOMAIN][DATA_DEVICES][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
if isinstance(device, DysonPureHotCoolLink):
entity = DysonPureHotCoolLinkEntity(device, name)
else: # DysonPureHotCool
entity = DysonPureHotCoolEntity(device, name)
async_add_entities([entity])
class DysonClimateEntity(DysonEntity, ClimateEntity):
"""Dyson climate entity base class."""
@property
def hvac_mode(self) -> str:
"""Return hvac operation."""
if not self._device.is_on:
return HVAC_MODE_OFF
if self._device.heat_mode_is_on:
return HVAC_MODE_HEAT
return HVAC_MODE_COOL
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return HVAC_MODES
@property
def hvac_action(self) -> str:
"""Return the current running hvac operation."""
if not self._device.is_on:
return CURRENT_HVAC_OFF
if self._device.heat_mode_is_on:
if self._device.heat_status_is_on:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_COOL
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def target_temperature(self) -> int:
"""Return the target temperature."""
return self._device.heat_target - 273
@environmental_property
def _current_temperature_kelvin(self) -> int:
"""Return the current temperature in kelvin."""
return self._device.temperature
@property
def current_temperature(self) -> Optional[int]:
"""Return the current temperature."""
temperature_kelvin = self._current_temperature_kelvin
if isinstance(temperature_kelvin, str):
return None
return float(f"{(temperature_kelvin - 273.15):.1f}")
@environmental_property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def min_temp(self):
"""Return the minimum temperature."""
return 1
@property
def max_temp(self):
"""Return the maximum temperature."""
return 37
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
if target_temp is None:
_LOGGER.error("Missing target temperature %s", kwargs)
return
_LOGGER.debug("Set %s temperature %s", self.name, target_temp)
# Limit the target temperature into acceptable range.
target_temp = min(self.max_temp, target_temp)
target_temp = max(self.min_temp, target_temp)
self._device.set_heat_target(target_temp + 273)
def set_hvac_mode(self, hvac_mode: str):
"""Set new hvac mode."""
_LOGGER.debug("Set %s heat mode %s", self.name, hvac_mode)
if hvac_mode == HVAC_MODE_OFF:
self._device.turn_off()
elif not self._device.is_on:
self._device.turn_on()
if hvac_mode == HVAC_MODE_HEAT:
self._device.enable_heat_mode()
elif hvac_mode == HVAC_MODE_COOL:
self._device.disable_heat_mode()
class DysonPureHotCoolLinkEntity(DysonClimateEntity):
"""Dyson Pure Hot+Cool Link entity."""
@property
def fan_mode(self) -> str:
"""Return the fan setting."""
if self._device.focus_mode:
return FAN_FOCUS
return FAN_DIFFUSE
@property
def fan_modes(self) -> List[str]:
"""Return the list of available fan modes."""
return FAN_MODES
@property
def swing_mode(self) -> str:
"""Return the swing setting."""
if self._device.oscillation:
return SWING_ON
return SWING_OFF
@property
def swing_modes(self) -> List[str]:
"""Return the list of available swing modes."""
return SWING_MODES
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FLAGS_LINK
def set_fan_mode(self, fan_mode: str) -> None:
"""Set fan mode of the device."""
_LOGGER.debug("Set %s focus mode %s", self.name, fan_mode)
if fan_mode == FAN_FOCUS:
self._device.enable_focus_mode()
elif fan_mode == FAN_DIFFUSE:
self._device.disable_focus_mode()
def set_swing_mode(self, swing_mode: str) -> None:
"""Set swing mode of the device."""
_LOGGER.debug("Set %s oscillation mode %s", self.name, swing_mode)
if swing_mode == SWING_ON:
self._device.enable_oscillation()
elif swing_mode == SWING_OFF:
self._device.disable_oscillation()
class DysonPureHotCoolEntity(DysonClimateEntity):
"""Dyson Pure Hot+Cool entity."""
| StarcoderdataPython |
28585 | <filename>apps/my_app/handlers.py
import datetime
import fastapi
import pymongo
import pymongo.errors
import pymongo.results
from apps.common.enums import CodeAudiences
from apps.common.handlers import PasswordsHandler, TokensHandler
from fastapi_mongodb.exceptions import HandlerException, RepositoryException
from fastapi_mongodb.handlers import BaseHandler, mongo_duplicate_key_error_handler
from fastapi_mongodb.pagination import Paginator
from fastapi_mongodb.projectors import BaseProjector
from fastapi_mongodb.repositories import BaseRepositoryConfig
from fastapi_mongodb.sorting import SortBuilder
from fastapi_mongodb.my_types import OID
from apps.users.models import UserModel
from apps.users.repositories import UserRepository
from apps.users.schemas import JWTPayloadSchema, JWTRefreshSchema, UserCreateSchema, UserLoginSchema, UserUpdateSchema
from apps.my_app.models import DeviceModel
from apps.my_app.repositories import DeviceRepository
from apps.my_app.schemas import DeviceCreateSchema
__all__ = ["DeviceHandler"]
class DeviceHandler(BaseHandler):
def __init__(self, request: fastapi.Request):
super().__init__(request=request)
self.device_repository = DeviceRepository()
async def create_device(self, request: fastapi.Request, device: DeviceCreateSchema) -> dict:
"""Create new device"""
device_model = DeviceModel(**device.dict(exclude_unset=True))
try:
result: pymongo.results.InsertOneResult = await self.device_repository.insert_one(
document=device_model.to_db(),
session=request.state.db_session,
)
except pymongo.errors.DuplicateKeyError as error:
mongo_duplicate_key_error_handler(model_name="Device", fields=["name"], error=error)
else:
return {"acknowledged": result.acknowledged, "inserted_id": result.inserted_id}
#return {"acknowledged": "True", "inserted_id": "000000000000000000000000"}
| StarcoderdataPython |
5026515 | <gh_stars>1-10
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update personality command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from broker.grntest import VerifyGrnsMixin
from broker.personalitytest import PersonalityTestMixin
class TestUpdatePersonality(VerifyGrnsMixin, PersonalityTestMixin,
TestBrokerCommand):
def test_100_update_capacity(self):
command = ["update_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'memory': (memory - 1500) * 0.94}"] + self.valid_just_tcm
self.noouttest(command)
def test_115_verify_update_capacity(self):
command = ["show_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster"]
out = self.commandtest(command)
self.matchoutput(out,
"VM host capacity function: {'memory': (memory - 1500) * 0.94}",
command)
def test_120_update_basic_attributes(self):
command = ["promote", "--personality", "utunused/dev",
"--archetype=aquilon"]
self.successtest(command)
command = ["update_personality", "--personality", "utunused/dev",
"--archetype=aquilon",
"--cluster_required",
"--noconfig_override",
"--unstaged",
"--comments", "New personality comments"]
self.successtest(command)
def test_121_verify_updates(self):
command = ["show_personality", "--personality=utunused/dev",
"--archetype=aquilon"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: utunused/dev Archetype: aquilon",
command)
self.matchoutput(out, "Comments: New personality comments", command)
self.matchoutput(out, "Requires clustered hosts", command)
self.matchclean(out, "override", command)
self.verifycatpersonality("aquilon", "utunused/dev")
def test_125_restore_utunused_dev(self):
# Well, except the comments, which are removed
command = ["update_personality", "--personality", "utunused/dev",
"--archetype=aquilon",
"--nocluster_required",
"--config_override",
"--comments", ""]
self.successtest(command)
def test_126_verify_utunused_dev(self):
command = ["show_personality", "--personality=utunused/dev",
"--archetype=aquilon"]
out = self.commandtest(command)
self.matchclean(out, "Comments", command)
self.matchclean(out, "Requires clustered hosts", command)
self.matchoutput(out, "Config override: enabled", command)
self.verifycatpersonality("aquilon", "utunused/dev",
config_override=True)
def test_140_update_owner_grn(self):
command = ["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--grn", "grn:/ms/ei/aquilon/ut2"]
# Some hosts may emit warnings if 'aq make' was not run on them
self.successtest(command)
def test_141_verify_show_personality(self):
command = ["show_personality", "--personality", "compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
def test_141_verify_show_unittest02(self):
# Different owner, should not be updated
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.searchoutput(out, r"^ Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_141_verify_show_unittest21(self):
# Owner is the same as the personality - should be updated
command = ["show_host", "--hostname", "unittest21.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.searchoutput(out, r"^ Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
def test_141_verify_cat_personality(self):
command = ["cat", "--personality", "compileserver"]
out = self.commandtest(command)
self.searchoutput(out, r'"/system/personality/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/ut2"], command)
def test_141_verify_cat_unittest02(self):
# Different owner, should not be updated
command = ["cat", "--hostname", "unittest02.one-nyp.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/aqd"], command)
def test_141_verify_cat_unittest20(self):
# Inherited - should be updated
command = ["cat", "--hostname", "unittest20.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/ut2"], command)
def test_141_verify_cat_unittest21(self):
# Owner is the same as the personality - should be updated
command = ["cat", "--hostname", "unittest21.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/ut2"], command)
def test_142_update_owner_grn_nohosts(self):
command = ["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--grn", "grn:/ms/ei/aquilon/unittest",
"--leave_existing"]
self.statustest(command)
def test_143_verify_show_personality(self):
command = ["show_personality", "--personality", "compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest", command)
def test_143_verify_show_unittest02(self):
command = ["show_host", "--hostname", "unittest02.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.searchoutput(out, r"^ Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
def test_143_verify_show_unittest21(self):
command = ["show_host", "--hostname", "unittest21.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Personality: compileserver", command)
self.searchoutput(out, r"^ Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
def test_144_verify_cat_personality(self):
command = ["cat", "--personality", "compileserver"]
out = self.commandtest(command)
self.searchoutput(out, r'"/system/personality/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/unittest"], command)
def test_144_verify_cat_unittest02(self):
# Different owner, should not be updated
command = ["cat", "--hostname", "unittest02.one-nyp.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/aqd"], command)
def test_144_verify_cat_unittest20(self):
# Inherited, should be updated
command = ["cat", "--hostname", "unittest20.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/unittest"], command)
def test_144_verify_cat_unittest21(self):
# Should not be updated due to --leave_existing
command = ["cat", "--hostname", "unittest21.aqd-unittest.ms.com", "--data"]
out = self.commandtest(command)
self.searchoutput(out, r'"system/owner_eon_id" = %d;' %
self.grns["grn:/ms/ei/aquilon/ut2"], command)
def test_170_make_staged(self):
self.check_plenary_gone("aquilon", "personality",
"compileserver+next", "config")
self.noouttest(["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--staged"])
self.check_plenary_exists("aquilon", "personality",
"compileserver+next", "config")
def test_171_show_current(self):
command = ["show_personality", "--personality", "compileserver",
"--archetype", "aquilon"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: current", command)
def test_171_cat_current(self):
self.verifycatpersonality("aquilon", "compileserver", stage="current")
def test_172_show_next(self):
command = ["show_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--personality_stage", "next"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: next", command)
def test_172_cat_next(self):
self.verifycatpersonality("aquilon", "compileserver", stage="next")
def test_174_delete_next(self):
self.noouttest(["del_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--personality_stage", "next"])
def test_175_verify_next_gone(self):
command = ["show_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--personality_stage", "next"]
out = self.notfoundtest(command)
self.matchoutput(out, "Personality aquilon/compileserver does not have "
"stage next.", command)
self.check_plenary_gone("aquilon", "personality",
"compileserver+next", "config")
def test_176_create_next_again(self):
self.noouttest(["update_personality", "--personality", "compileserver",
"--archetype", "aquilon"])
def test_178_make_unstaged(self):
self.check_plenary_exists("aquilon", "personality",
"compileserver+next", "config")
self.noouttest(["update_personality", "--personality", "compileserver",
"--archetype", "aquilon", "--unstaged"])
self.check_plenary_gone("aquilon", "personality",
"compileserver+next", "config")
def test_179_verify_unstaged(self):
command = ["show_personality", "--personality", "compileserver",
"--archetype", "aquilon"]
out = self.commandtest(command)
self.matchclean(out, "Stage:", command)
def test_179_cat_unstaged(self):
self.verifycatpersonality("aquilon", "compileserver")
def test_200_invalid_function(self):
""" Verify that the list of built-in functions is restricted """
command = ["update_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "locals()"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "name 'locals' is not defined", command)
def test_200_invalid_type(self):
command = ["update_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "memory - 100"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "The function should return a dictonary.", command)
def test_200_invalid_dict(self):
command = ["update_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'memory': 'bar'}"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out,
"The function should return a dictionary with all "
"keys being strings, and all values being numbers.",
command)
def test_200_missing_memory(self):
command = ["update_personality", "--personality", "vulcan-10g-server-prod",
"--archetype", "esx_cluster",
"--vmhost_capacity_function", "{'foo': 5}"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out,
"The memory constraint is missing from the returned "
"dictionary.", command)
def test_200_update_cluster_inuse(self):
command = ["update_personality", "--personality=vulcan-10g-server-prod",
"--archetype=esx_cluster",
"--cluster"] + self.valid_just_tcm
out = self.badrequesttest(command)
self.matchoutput(out, "Personality esx_cluster/vulcan-10g-server-prod is in use", command)
def test_200_missing_personality(self):
command = ["update_personality", "--archetype", "aquilon",
"--personality", "personality-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out, "Personality personality-does-not-exist, "
"archetype aquilon not found.", command)
def test_200_missing_personality_stage(self):
command = ["update_personality", "--archetype", "aquilon",
"--personality", "nostage",
"--personality_stage", "previous"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality aquilon/nostage does not have stage "
"previous.",
command)
def test_200_change_environment(self):
command = ["update_personality", "--personality=utunused/dev",
"--archetype=aquilon", "--host_environment=infra"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Personality aquilon/utunused/dev already has "
"its environment set to dev, and cannot be updated.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdatePersonality)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.