text stringlengths 38 1.54M |
|---|
import functools
from typing import Type
from ._Test import Test
from .. import AbstractTest
def ExceptionTest(*exceptions: Type[Exception]):
"""
Decorator that specifies a test that should raise one
of the given exception classes.
:param exceptions: The exceptions, one of which should be raised.
"""
def applicator(method):
# Make the method a test
method = Test(method)
# Wrap the method with the exception infrastructure
@functools.wraps(method)
def when_called(test: AbstractTest):
with test.assertRaises(exceptions):
return method(test)
return when_called
return applicator
|
t = [[False for i in range(5)] for j in range(17)]
t[0] = ['A','B','C','D','F']
for i in range(8,17): t[i][0] = True
for i in range(5,14,8):
for j in range(4):
t[i+j][1] = True
for i in range(3,17,4):
t[i][3] = True
t[i+1][3] = True
for el in t:
el[4] = ((el[0] and el[3] or el[3]) and (el[1] != el[0]))
for el in t: print(el)
|
# Environment variables that affects:
# KF_HOME - base folder for kungfu files
# KF_LOG_LEVEL - logging level
# KF_NO_EXT - disable extensions if set
import kungfu.command as kfc
from kungfu.command import __all__
def main():
kfc.execute()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
# Flask with necessary methods for route handling and logging session data
from flask import Flask, render_template, url_for, request, redirect, flash, \
jsonify, session as login_session, make_response
# SQLAlchemy for configuring database schema and CRUD operations on the data
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem, User
# Oauth2 to create correct credentials for Google+ and Faceook
# registration and login
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
# Tools for extracting responses from API calls and decoding JSON objects into
# usuable data
import httplib2
import json
import requests
# Generate random values for users' state keys
import random
import string
"""This is a multi-user restaurant app that utilizes flask, PostgresSQL, the
SQLAlchemy ORM and oauth2 for Google+ and Facebook account
authentication/authorization.
"""
app = Flask(__name__)
# Reference client_secrets.json objects as 'CLIENT_ID'
CLIENT_ID = json.loads(
open('/var/www/html/qmenu/client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Quang's Restaurant Menu App"
# Bind database file and configure SQLAlchemy session
engine = create_engine('postgresql+psycopg2://postgres:password@localhost/restaurants')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# ===================================
# HELPER FUNCTIONS FOR ACCOUNT ACCESS
# ===================================
# 1. Create a new user
def createUser(login_session):
"""Function to pull user data from the session to add into database"""
newUser = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
# 2. Grab user info
def getUserInfo(user_id):
"""Function to grab user info from database using a user id"""
user = session.query(User).filter_by(id=user_id).one()
return user
# 3. Check if user exists
def getUserID(email):
"""Function returns a user.id if user is found using email or None if user
doesn't exist in database. (Use email so that it works with Facebook too).
"""
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# ======================
# LOGIN, SIGNUP, PROFILE
# ======================
@app.route('/login')
def showLogin():
"""Login route to direct users to Google+ or Facebook login prompts
depending on what they choose
"""
# Create a state key for current session
if 'username' not in login_session:
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
else:
response = make_response(
json.dumps('Invalid request made.', 400)
)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""Route for connecting Google+ User by going through OAuth flow,
providing credentials and retrieving and storing user data sent back from
from Google+ API
"""
# Validate state key
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application/json'
return response
else:
# Obtain authorization code
code = request.data
try:
# Exchange authorization code for credentials object
oauth_flow = flow_from_clientsecrets('/var/www/html/qmenu/client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
# Handle any errors from flow exchange
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify token with Google+ API
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Handle for error with Google+ API token verification
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the token belongs to the user currently logging in
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."),
401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the token is valid for the app
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match the app's."), 401)
print("Token's client ID does not match the app's.")
response.headers['Content-Type'] = 'application/json'
return response
# Handle if a user is already logged in
stored_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store access token in session for later use
login_session['provider'] = 'google'
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Make request to pull user info
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
# Store user info
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# See if a user exists, if not, make a new one.
# Then store user_id into the login_session
user_id = getUserID(login_session['email'])
if user_id is None:
user_id = createUser(login_session)
login_session['user_id'] = user_id
return redirect(url_for('viewProfile'))
@app.route('/gdisconnect')
def gdisconnect():
"""Route for disconnecting Google+ user by requesting Google+ API to revoke
the user's access_token from the session
"""
access_token = login_session.get('access_token')
# Only disconnect a connected user
if access_token is None:
response = make_response(json.dumps('Current user not connected'), 401)
response.headers['Content/Type'] = 'application/json'
return response
url = ('https://accounts.google.com/o/oauth2/revoke?token=%s'
% access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[0]
# Error
if result['status'] != "200":
response = make_response(
json.dumps('Failed to revoke token for given user.', 400)
)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
"""Route for connecting Facebook user by going through OAuth flow,
providing credentials and retrieving and storing user data sent back from
from Facebook API
"""
# Validate state key (protect against cross-site forgery attacks)
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application/json'
return response
else:
# Obtain access_token for long-lived server token with GET method
access_token = request.data
# Pass client_secrets to verify server's identity
app_id = json.loads(
open('/var/www/html/qmenu/fb_client_secrets.json', 'r').read())['web']['app_id']
app_secret = json.loads(
open('/var/www/html/qmenu/fb_client_secrets.json', 'r').read())['web']['app_secret']
url = ('https://graph.facebook.com/v2.9/oauth/access_token?grant_type'
'=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_'
'token=%s' % (app_id, app_secret, access_token))
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
# Extract access_token from response
token = "access_token=" + data['access_token']
# If token works, we can use it to make API calls with this new token
# And store the call result data into data
url = ('https://graph.facebook.com/v2.9/me?%s&fields=name,id,email'
% token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
# Store data into our session
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session to properly logout
login_session['access_token'] = token
# Facebook uses a separate API call to retrieve a profile picture
url = ('https://graph.facebook.com/v2.9/me/picture?%s&redirect=0&'
'height=200&width=200' % token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
# Store picture data into session
login_session['picture'] = data["data"]["url"]
# Check if user doesn't exist, store in database
user_id = getUserID(login_session['email'])
if user_id is None:
user_id = createUser(login_session)
login_session['user_id'] = user_id
return redirect(url_for('viewProfile'))
@app.route('/fbdisconnect')
def fbdisconnect():
"""Route for disconnecting Google+ user by sending a delete request
to Facebook in order to remove user's data and credentials from the session
"""
facebook_id = login_session['facebook_id']
# The access token must also be included to successfully logout
access_token = login_session['access_token']
url = ('https://graph.facebook.com/%s/permissions?access_token=%s'
% (facebook_id, access_token))
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
@app.route('/disconnect')
def disconnect():
"""Route to check provider and implement appropriate disconnecting function
so that users can uniformly sign out of accounts regardless of whether
they used Facebook or Google+ accounts
"""
if 'provider' in login_session:
# If user signed in with Google
if login_session['provider'] == 'google':
# Run Google disconnect function
gdisconnect()
# Clear Google-exclusive session data
del login_session['access_token']
del login_session['gplus_id']
# If user signed in with Facebook
if login_session['provider'] == 'facebook':
# Run Facebook disconnect function
fbdisconnect()
# Clear Facebook-exclusive session data
del login_session['facebook_id']
# Then clear session data that was stored for either provider
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash('Successfully logged out. See you again soon!')
return redirect(url_for('showRestaurants'))
# Error if no data in session (user was not even logged in)
else:
response = make_response(
json.dumps('Invalid request made.', 400)
)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/profile')
def viewProfile():
"""Page that shows a logged-in user's profile, logout option and his or her
posted restaurants
"""
# Confirm there is a user logged in
if 'username' in login_session:
# Pull current user and restaurants belonging to user
current_user = getUserInfo(login_session['user_id'])
restaurants = session.query(Restaurant).filter_by(
user_id=current_user.id).all()
return render_template('profile.html', restaurants=restaurants)
# Error if user is not logged in
else:
flash('Please log in to view your profile!')
return redirect(url_for('showLogin'))
# =============
# API ENDPOINTS
# =============
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
"""Shows this apps' API endpoint for all menu items for a restaurant with
the specific id
"""
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
"""Shows this apps' API endpoint for one menu item with the specific id"""
Menu_Item = session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(Menu_Item=Menu_Item.serialize)
# API Endpoint for all restaurants
@app.route('/restaurant/JSON')
def restaurantsJSON():
"""Shows this apps' API endpoint for all restaurants saved in the database
"""
restaurants = session.query(Restaurant).all()
return jsonify(restaurants=[r.serialize for r in restaurants])
# ==============
# REGULAR ROUTES
# ==============
@app.route('/')
@app.route('/restaurant/')
def showRestaurants():
"""The homepage for this app that has a public template that prompts login
and an exclusive template for logged in users to post a new restaurant.
Associated with two different routes.
"""
# Grab restaurants
restaurants = session.query(Restaurant).order_by(Restaurant.name).all()
# Show logged-in restaurant view
if 'username' in login_session:
return render_template('restaurants.html', restaurants=restaurants)
# Show public restaurant view
else:
return render_template('publicrestaurants.html',
restaurants=restaurants)
@app.route('/restaurant/new/', methods=['GET', 'POST'])
def newRestaurant():
"""Route that displays the form only for logged-in users to create a new
restaurant to post onto the site
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to do that!')
return redirect('/login')
else:
# POST
if request.method == 'POST':
newRestaurant = Restaurant(name=request.form['name'],
user_id=login_session['user_id'])
session.add(newRestaurant)
flash('New restaurant, "%s", successfully created!'
% newRestaurant.name)
session.commit()
return redirect(url_for('showRestaurants'))
# GET
else:
return render_template('newRestaurant.html')
@app.route('/restaurant/<int:restaurant_id>/edit/', methods=['GET', 'POST'])
def editRestaurant(restaurant_id):
"""Route for logged-in users to edit the information of a restaurant only
if this restaurant belongs to them
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to do that!')
return redirect('/login')
else:
# Restaurant to edit
editedRestaurant = session.query(Restaurant).filter_by(
id=restaurant_id).one()
# Confirm user is owner of restaurant
current_user = getUserInfo(login_session['user_id'])
if current_user.id == editedRestaurant.user_id:
# POST
if (request.method == 'POST' and request.form['name']):
editedRestaurant.name = request.form['name']
flash('Restaurant successfully edited to "%s"'
% editedRestaurant.name)
return redirect(url_for('showRestaurants'))
# GET
else:
return render_template('editRestaurant.html',
restaurant=editedRestaurant)
# Error if not owner of restaurant
else:
response = make_response(
json.dumps('Malicious request detected. You are not authorized'
'. Your IP has been logged for security purposes.',
400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/restaurant/<int:restaurant_id>/delete/', methods=['GET', 'POST'])
def deleteRestaurant(restaurant_id):
"""Route for logged-in users to delete a restaurant from the app only if
this restaurant belongs to them
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to do that!')
return redirect('/login')
else:
# Restaurant to delete
restaurantToDelete = session.query(Restaurant).filter_by(
id=restaurant_id).one()
# Confirm user is owner of restaurant
current_user = getUserInfo(login_session['user_id'])
if current_user.id == restaurantToDelete.user_id:
# POST
if request.method == 'POST':
session.delete(restaurantToDelete)
flash('Restaurant, "%s", successfully deleted!'
% restaurantToDelete.name)
session.commit()
return redirect(url_for('showRestaurants',
restaurant_id=restaurant_id))
# GET
else:
return render_template('deleteRestaurant.html',
restaurant=restaurantToDelete)
# Error if not owner of restaurant
else:
response = make_response(
json.dumps('Malicious request detected. You are not authorized'
'. Your IP has been logged for security purposes.',
400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
"""A restaurant's main page with list of menu items. Loads a public
template or an exclusive template for logged in users to edit, add items
or delete the restaurant. Associated with two different routes.
"""
# Grab restaurant and menu items
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
# Show completely public menu
if 'username' not in login_session:
return render_template('publicmenu.html', items=items,
restaurant=restaurant)
else:
# Check current user
current_user = getUserInfo(login_session['user_id'])
# Show exclusive menu for users who own restaurant
if current_user.id == restaurant.user_id:
return render_template('menu.html', items=items,
restaurant=restaurant)
# Show public menu for users logged in but not owner
else:
return render_template('publicmenu.html', items=items,
restaurant=restaurant,
user=current_user)
@app.route('/restaurant/<int:restaurant_id>/menu/new/',
methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
"""Route that displays the form only for logged-in users to create a new
menu item to post onto the specific restaurant
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to do that!')
return redirect('/login')
else:
# Grab restaurant
restaurant = session.query(Restaurant).filter_by(
id=restaurant_id).one()
# Confirm user is owner of restaurant
current_user = getUserInfo(login_session['user_id'])
if current_user.id == restaurant.user_id:
# POST
if request.method == 'POST':
newItem = MenuItem(name=request.form['name'],
description=request.form['description'],
price=request.form['price'],
course=request.form['course'],
restaurant_id=restaurant_id,
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New menu item, "%s", successfully created!'
% newItem.name)
return redirect(url_for('showMenu',
restaurant_id=restaurant_id))
# GET
else:
return render_template('newmenuitem.html',
restaurant_id=restaurant_id)
# Error if not owner of restaurant
else:
response = make_response(
json.dumps('Malicious request detected. You are not authorized'
'. Your IP has been logged for security purposes.',
400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit',
methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
"""Route for logged-in users to edit the information of a menu item only
if the specific restaurant belongs to them
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to do that!')
return redirect('/login')
else:
# Grab edit item and its restaurant
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
restaurant = session.query(Restaurant).filter_by(
id=restaurant_id).one()
# Confirm user is owner of restaurant
current_user = getUserInfo(login_session['user_id'])
if current_user.id == restaurant.user_id:
# POST
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
flash('Menu item successfully edited!')
return redirect(url_for('showMenu',
restaurant_id=restaurant_id))
# GET
else:
return render_template('editmenuitem.html',
restaurant_id=restaurant_id,
menu_id=menu_id,
item=editedItem)
# Error if not owner of restaurant
else:
response = make_response(
json.dumps('Malicious request detected. You are not authorized'
'. Your IP has been logged for security purposes.',
400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete',
methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
"""Route for logged-in users to delete a menu item only if the specific
restaurant belongs to them
"""
# Reroute if not logged in
if 'username' not in login_session:
flash('You have to be logged in to delete a menu item.')
return redirect('/login')
else:
# Grab restaurant and item to delete
restaurant = session.query(Restaurant).filter_by(
id=restaurant_id).one()
itemToDelete = session.query(MenuItem).filter_by(id=menu_id).one()
# Confirm user is owner of restaurant
current_user = getUserInfo(login_session['user_id'])
if current_user.id == restaurant.user_id:
# POST
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Menu item successfully deleted!')
return redirect(url_for('showMenu',
restaurant_id=restaurant_id))
# GET
else:
return render_template('deleteMenuItem.html',
item=itemToDelete)
# Error if not owner of restaurant
else:
response = make_response(
json.dumps('Malicious request detected. You are not authorized'
'. Your IP has been logged for security purposes.',
400))
response.headers['Content-Type'] = 'application/json'
return response
# Server configuration
if __name__ == '__main__':
app.secret_key = 'shhhhhh-secret'
app.debug = True
app.run()
|
import torch
import torch.nn as nn
import math
from .DCNv2.dcn_v2 import DCN_ID
class DenseBlock(torch.nn.Module):
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
super(DenseBlock, self).__init__()
self.fc = torch.nn.Linear(input_size, output_size, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm1d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm1d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.fc(x))
else:
out = self.fc(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
torch.nn.init.kaiming_normal_(self.conv.weight)
if bias:
self.conv.bias.data.zero_()
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
torch.nn.init.kaiming_normal_(self.deconv.weight)
if bias:
self.deconv.bias.data.zero_()
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ResnetBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'):
super(ResnetBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.conv2 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(num_filter)
elif norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(num_filter)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
residual = x
if self.norm is not None:
out = self.bn(self.conv1(x))
else:
out = self.conv1(x)
if self.activation is not None:
out = self.act(out)
if self.norm is not None:
out = self.bn(self.conv2(out))
else:
out = self.conv2(out)
out = torch.add(out, residual)
if self.activation is not None:
out = self.act(out)
return out
class ResnetBlock_fixup_init(torch.nn.Module):
def __init__(self, num_filter, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch', num_residual=20):
super(ResnetBlock_fixup_init, self).__init__()
self.conv1 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=False)
self.conv2 = torch.nn.Conv2d(num_filter, num_filter, kernel_size, stride, padding, bias=False)
torch.nn.init.kaiming_normal_(self.conv1.weight)
for param in self.conv1.parameters():
param = param / ( num_residual ** (1 / 2) )
self.conv2.weight.data.zero_()
if bias:
self.conv1_bias = nn.Parameter(torch.zeros(1))
self.conv2_bias = nn.Parameter(torch.zeros(1))
self.act1_bias = nn.Parameter(torch.zeros(1))
self.act2_bias = nn.Parameter(torch.zeros(1))
self.multiplier = nn.Parameter(torch.ones(1))
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(num_filter)
elif norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(num_filter)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
residual = x
if self.norm is not None:
out = self.bn(self.conv1(x))
else:
out = self.conv1(x + self.conv1_bias)
if self.activation is not None:
out = self.act(out + self.act1_bias)
if self.norm is not None:
out = self.bn(self.conv2(out))
else:
out = self.conv2(out + self.conv2_bias)
out = out * self.multiplier
out = torch.add(out, residual)
if self.activation is not None:
out = self.act(out + self.act2_bias)
return out
class UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(UpBlock, self).__init__()
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class UpBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, scale=4, bias=True, activation='prelu', norm=None):
super(UpBlockPix, self).__init__()
self.up_conv1 = Upsampler(scale,num_filter)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = Upsampler(scale,num_filter)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class D_UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None):
super(D_UpBlock, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class D_UpBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, scale=4, bias=True, activation='prelu', norm=None):
super(D_UpBlockPix, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.up_conv1 = Upsampler(scale,num_filter)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = Upsampler(scale,num_filter)
def forward(self, x):
x = self.conv(x)
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class DownBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(DownBlock, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class DownBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, scale=4,bias=True, activation='prelu', norm=None):
super(DownBlockPix, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = Upsampler(scale,num_filter)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class D_DownBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None):
super(D_DownBlock, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class D_DownBlockPix(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, scale=4, bias=True, activation='prelu', norm=None):
super(D_DownBlockPix, self).__init__()
self.conv = ConvBlock(num_filter*num_stages, num_filter, 1, 1, 0, activation, norm=None)
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = Upsampler(scale,num_filter)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class PSBlock(torch.nn.Module):
def __init__(self, input_size, output_size, scale_factor, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'):
super(PSBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size * scale_factor**2, kernel_size, stride, padding, bias=bias)
self.ps = torch.nn.PixelShuffle(scale_factor)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.ps(self.conv(x)))
else:
out = self.ps(self.conv(x))
if self.activation is not None:
out = self.act(out)
return out
class Upsampler(torch.nn.Module):
def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True):
super(Upsampler, self).__init__()
modules = []
for _ in range(int(math.log(scale, 2))):
modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None))
modules.append(torch.nn.PixelShuffle(2))
if bn: modules.append(torch.nn.BatchNorm2d(n_feat))
#modules.append(torch.nn.PReLU())
self.up = torch.nn.Sequential(*modules)
self.activation = act
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
out = self.up(x)
if self.activation is not None:
out = self.act(out)
return out
class Upsample2xBlock(torch.nn.Module):
def __init__(self, input_size, output_size, bias=True, upsample='deconv', activation='relu', norm='batch'):
super(Upsample2xBlock, self).__init__()
scale_factor = 2
# 1. Deconvolution (Transposed convolution)
if upsample == 'deconv':
self.upsample = DeconvBlock(input_size, output_size,
kernel_size=4, stride=2, padding=1,
bias=bias, activation=activation, norm=norm)
# 2. Sub-pixel convolution (Pixel shuffler)
elif upsample == 'ps':
self.upsample = PSBlock(input_size, output_size, scale_factor=scale_factor,
bias=bias, activation=activation, norm=norm)
# 3. Resize and Convolution
elif upsample == 'rnc':
self.upsample = torch.nn.Sequential(
torch.nn.Upsample(scale_factor=scale_factor, mode='nearest'),
ConvBlock(input_size, output_size,
kernel_size=3, stride=1, padding=1,
bias=bias, activation=activation, norm=norm)
)
def forward(self, x):
out = self.upsample(x)
return out
class PyramidModule(torch.nn.Module):
def __init__(self, num_inchannels, activation='prelu'):
super(PyramidModule, self).__init__()
self.l1_1 = ResnetBlock(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_2 = ResnetBlock(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_3 = ResnetBlock(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_4 = ResnetBlock(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_5 = ResnetBlock(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_1 = ResnetBlock(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_2 = ResnetBlock(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_3 = ResnetBlock(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_4 = ResnetBlock(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_1 = ResnetBlock(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_2 = ResnetBlock(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_3 = ResnetBlock(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.down1 = ConvBlock(num_inchannels, num_inchannels*2, 4, 2, 1, bias=True, activation=activation, norm=None)
self.down2 = ConvBlock(num_inchannels*2, num_inchannels*4, 4, 2, 1, bias=True, activation=activation, norm=None)
self.up1 = DeconvBlock(num_inchannels*2, num_inchannels, 4, 2, 1, bias=True, activation=activation, norm=None)
self.up2 = DeconvBlock(num_inchannels*4, num_inchannels*2, 4, 2, 1, bias=True, activation=activation, norm=None)
self.final = ConvBlock(num_inchannels, num_inchannels, 3, 1, 1, bias=True, activation=activation, norm=None)
def forward(self, x):
out1_1 = self.l1_1(x)
out2_1 = self.l2_1(self.down1(out1_1))
out3_1 = self.l3_1(self.down2(out2_1))
out1_2 = self.l1_2(out1_1 + self.up1(out2_1))
out2_2 = self.l2_2(out2_1 + self.down1(out1_2) + self.up2(out3_1))
out3_2 = self.l3_2(out3_1 + self.down2(out2_2))
out1_3 = self.l1_3(out1_2 + self.up1(out2_2))
out2_3 = self.l2_3(out2_2 + self.down1(out1_3) + self.up2(out3_2))
out3_3 = self.l3_3(out3_2 + self.down2(out2_3))
out1_4 = self.l1_4(out1_3 + self.up1(out2_3))
out2_4 = self.l2_4(out2_3 + self.down1(out1_4) + self.up2(out3_3))
out1_5 = self.l1_5(out1_4 + self.up1(out2_4))
final = self.final(out1_5)
return final
class PyramidModule_fixup_init(torch.nn.Module):
def __init__(self, num_inchannels, activation='prelu'):
super(PyramidModule_fixup_init, self).__init__()
self.l1_1 = ResnetBlock_fixup_init(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_2 = ResnetBlock_fixup_init(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_3 = ResnetBlock_fixup_init(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_4 = ResnetBlock_fixup_init(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l1_5 = ResnetBlock_fixup_init(num_inchannels, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_1 = ResnetBlock_fixup_init(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_2 = ResnetBlock_fixup_init(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_3 = ResnetBlock_fixup_init(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l2_4 = ResnetBlock_fixup_init(num_inchannels*2, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_1 = ResnetBlock_fixup_init(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_2 = ResnetBlock_fixup_init(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.l3_3 = ResnetBlock_fixup_init(num_inchannels*4, kernel_size=3, stride=1, padding=1, bias=True, activation=activation, norm=None)
self.down1 = ConvBlock(num_inchannels, num_inchannels*2, 4, 2, 1, bias=True, activation=activation, norm=None)
self.down2 = ConvBlock(num_inchannels*2, num_inchannels*4, 4, 2, 1, bias=True, activation=activation, norm=None)
self.up1 = DeconvBlock(num_inchannels*2, num_inchannels, 4, 2, 1, bias=True, activation=activation, norm=None)
self.up2 = DeconvBlock(num_inchannels*4, num_inchannels*2, 4, 2, 1, bias=True, activation=activation, norm=None)
self.final = ConvBlock(num_inchannels, num_inchannels, 3, 1, 1, bias=True, activation=activation, norm=None)
def forward(self, x):
out1_1 = self.l1_1(x)
out2_1 = self.l2_1(self.down1(out1_1))
out3_1 = self.l3_1(self.down2(out2_1))
out1_2 = self.l1_2(out1_1 + self.up1(out2_1))
out2_2 = self.l2_2(out2_1 + self.down1(out1_2) + self.up2(out3_1))
out3_2 = self.l3_2(out3_1 + self.down2(out2_2))
out1_3 = self.l1_3(out1_2 + self.up1(out2_2))
out2_3 = self.l2_3(out2_2 + self.down1(out1_3) + self.up2(out3_2))
out3_3 = self.l3_3(out3_2 + self.down2(out2_3))
out1_4 = self.l1_4(out1_3 + self.up1(out2_3))
out2_4 = self.l2_4(out2_3 + self.down1(out1_4) + self.up2(out3_3))
out1_5 = self.l1_5(out1_4 + self.up1(out2_4))
final = self.final(out1_5)
return final
# borrow from https://github.com/xinntao/EDVR
class PCDAlignment(nn.Module):
"""Alignment module using Pyramid, Cascading and Deformable convolution
(PCD). It is used in EDVR.
Ref:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
Args:
num_feat (int): Channel number of middle features. Default: 64.
deformable_groups (int): Deformable groups. Defaults: 8.
"""
def __init__(self, num_feat=64, deformable_groups=8, activation='prelu'):
super(PCDAlignment, self).__init__()
# Pyramid has three levels:
# L3: level 3, 1/4 spatial size
# L2: level 2, 1/2 spatial size
# L1: level 1, original spatial size
self.conv_l2 = nn.Sequential(
ConvBlock(num_feat, num_feat, kernel_size=3, stride=2, padding=1, norm=None, activation=activation),
ConvBlock(num_feat, num_feat, kernel_size=3, stride=1, padding=1, norm=None, activation=activation),
)
self.conv_l3 = nn.Sequential(
ConvBlock(num_feat, num_feat, kernel_size=3, stride=2, padding=1, norm=None, activation=activation),
ConvBlock(num_feat, num_feat, kernel_size=3, stride=1, padding=1, norm=None, activation=activation),
)
self.offset_conv1 = nn.ModuleDict()
self.offset_conv2 = nn.ModuleDict()
self.offset_conv3 = nn.ModuleDict()
self.dcn_pack = nn.ModuleDict()
self.feat_conv = nn.ModuleDict()
# Pyramids
for i in range(3, 0, -1):
level = f'l{i}'
self.offset_conv1[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1,
1)
if i == 3:
self.offset_conv2[level] = nn.Conv2d(num_feat, num_feat, 3, 1,
1)
else:
self.offset_conv2[level] = nn.Conv2d(num_feat * 2, num_feat, 3,
1, 1)
self.offset_conv3[level] = nn.Conv2d(num_feat, num_feat, 3, 1,
1)
self.dcn_pack[level] = DCN_ID(num_feat, num_feat, num_feat, 3, 1, padding=1, deformable_groups=deformable_groups)
if i < 3:
self.feat_conv[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1,
1)
# Cascading dcn
self.cas_offset_conv1 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
self.cas_offset_conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.cas_dcnpack = DCN_ID(num_feat, num_feat, num_feat, 3, 1, padding=1, deformable_groups=deformable_groups)
self.upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=False)
if activation == 'relu':
self.act = nn.ReLU(True)
elif activation == 'prelu':
self.act = nn.PReLU()
elif activation == 'lrelu':
self.act = nn.LeakyReLU(negative_slope=0.1, inpcale=True)
else:
raise NotImplementedError
def forward(self, base_feat, neigbor_feat):
"""Align neighboring frame features to the reference frame features.
Args:
nbr_feat_l (list[Tensor]): Neighboring feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
ref_feat_l (list[Tensor]): Reference feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
Returns:
Tensor: Aligned features.
"""
nbr_feat_l = [neigbor_feat]
ref_feat_l = [base_feat]
nbr_feat_l.append(self.conv_l2(nbr_feat_l[0]))
nbr_feat_l.append(self.conv_l3(nbr_feat_l[1]))
ref_feat_l.append(self.conv_l2(ref_feat_l[0]))
ref_feat_l.append(self.conv_l3(ref_feat_l[1]))
# Pyramids
upsampled_offset, upsampled_feat = None, None
for i in range(3, 0, -1):
level = f'l{i}'
offset = torch.cat([nbr_feat_l[i - 1], ref_feat_l[i - 1]], dim=1)
offset = self.act(self.offset_conv1[level](offset))
if i == 3:
offset = self.act(self.offset_conv2[level](offset))
else:
offset = self.act(self.offset_conv2[level](torch.cat(
[offset, upsampled_offset], dim=1)))
offset = self.act(self.offset_conv3[level](offset))
feat = self.dcn_pack[level](nbr_feat_l[i - 1], offset)
if i < 3:
feat = self.feat_conv[level](
torch.cat([feat, upsampled_feat], dim=1))
if i > 1:
feat = self.act(feat)
if i > 1: # upsample offset and features
# x2: when we upsample the offset, we should also enlarge
# the magnitude.
upsampled_offset = self.upsample(offset) * 2
upsampled_feat = self.upsample(feat)
# Cascading
offset = torch.cat([feat, ref_feat_l[0]], dim=1)
offset = self.act(
self.cas_offset_conv2(self.act(self.cas_offset_conv1(offset))))
feat = self.act(self.cas_dcnpack(feat, offset))
return feat
class TSAFusion(nn.Module):
"""Temporal Spatial Attention (TSA) fusion module.
Temporal: Calculate the correlation between center frame and
neighboring frames;
Spatial: It has 3 pyramid levels, the attention is similar to SFT.
(SFT: Recovering realistic texture in image super-resolution by deep
spatial feature transform.)
Args:
num_feat (int): Channel number of middle features. Default: 64.
num_frame (int): Number of frames. Default: 5.
center_frame_idx (int): The index of center frame. Default: 2.
"""
def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2):
super(TSAFusion, self).__init__()
self.center_frame_idx = center_frame_idx
# temporal attention (before fusion conv)
self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
# spatial attention (after fusion conv)
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1)
self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1)
self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1)
self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1)
self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1)
self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=False)
def forward(self, aligned_feat):
"""
Args:
aligned_feat (Tensor): Aligned features with shape (b, t, c, h, w).
Returns:
Tensor: Features after TSA with the shape (b, c, h, w).
"""
b, t, c, h, w = aligned_feat.size()
# temporal attention
embedding_ref = self.temporal_attn1(
aligned_feat[:, self.center_frame_idx, :, :, :].clone())
embedding = self.temporal_attn2(aligned_feat.view(-1, c, h, w))
embedding = embedding.view(b, t, -1, h, w) # (b, t, c, h, w)
corr_l = [] # correlation list
for i in range(t):
emb_neighbor = embedding[:, i, :, :, :]
corr = torch.sum(emb_neighbor * embedding_ref, 1) # (b, h, w)
corr_l.append(corr.unsqueeze(1)) # (b, 1, h, w)
corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) # (b, t, h, w)
corr_prob = corr_prob.unsqueeze(2).expand(b, t, c, h, w)
corr_prob = corr_prob.contiguous().view(b, -1, h, w) # (b, t*c, h, w)
aligned_feat = aligned_feat.view(b, -1, h, w) * corr_prob
# fusion
feat = self.lrelu(self.feat_fusion(aligned_feat))
# spatial attention
attn = self.lrelu(self.spatial_attn1(aligned_feat))
attn_max = self.max_pool(attn)
attn_avg = self.avg_pool(attn)
attn = self.lrelu(
self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1)))
# pyramid levels
attn_level = self.lrelu(self.spatial_attn_l1(attn))
attn_max = self.max_pool(attn_level)
attn_avg = self.avg_pool(attn_level)
attn_level = self.lrelu(
self.spatial_attn_l2(torch.cat([attn_max, attn_avg], dim=1)))
attn_level = self.lrelu(self.spatial_attn_l3(attn_level))
attn_level = self.upsample(attn_level)
attn = self.lrelu(self.spatial_attn3(attn)) + attn_level
attn = self.lrelu(self.spatial_attn4(attn))
attn = self.upsample(attn)
attn = self.spatial_attn5(attn)
attn_add = self.spatial_attn_add2(
self.lrelu(self.spatial_attn_add1(attn)))
attn = torch.sigmoid(attn)
# after initialization, * 2 makes (attn * 2) to be close to 1.
feat = feat * attn * 2 + attn_add
return feat |
from sqlmodel import Session, select
from warehouse import engine
from warehouse.models import Customer
from warehouse.ultis import update_attr
def get_all():
with Session(engine) as session:
statement = select(Customer)
results = session.exec(statement)
return results.fetchall()
def get_by_id():
with Session(engine) as session:
statement = select(Customer)
result = session.exec(statement)
return result.one_or_none()
def create(customer: Customer):
with Session(engine) as session:
session.add(customer)
session.commit()
session.refresh(customer)
return customer
def delete(customer_id: int):
with Session(engine) as session:
statement = select(Customer).where(Customer.id == customer_id)
customer = session.exec(statement).one_or_none()
if customer is not None:
session.delete(customer)
session.commit()
return True
return False
def update(customer_id: int, customer_update):
with Session(engine) as session:
statement = select(Customer).where(Customer.id == customer_id)
old_customer = session.exec(statement)
update_attr(old_customer, customer_update)
session.add(old_customer)
session.commit()
session.refresh(old_customer)
return old_customer
|
# #自己组装一个类
# class A():
# pass
# def say(self):
# print("saying")
# say(9)
#
# A.say = say
# a = A()
# a.say()
# 自定义类
# def say(self):
# print("Saying ")
# def talk(self):
# print("talking")
# A = type("AName",(object,),{"class_say":say,"class_talk":talk})
# a = A()
#
# a.class_say()
# a.class_talk()
# A.__dict__
from types import MethodType
class A():
pass
def say(self):
print("Saying")
a = A()
a.say = MethodType(say,A)
a.say()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("JERDBLocalReader")
process.load('Configuration.StandardSequences.Services_cff')
process.load("JetMETCorrections.Modules.JetResolutionESProducer_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import *
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.source = cms.Source("EmptySource")
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
CondDBSetup,
toGet = cms.VPSet(
# Resolution
cms.PSet(
record = cms.string('JetResolutionRcd'),
tag = cms.string('JetResolutionObject_Summer15_V0_MC_JER_AK4PFchs'),
label = cms.untracked.string('AK4PFchs')
),
# Scale factors
cms.PSet(
record = cms.string('JetResolutionScaleFactorRcd'),
tag = cms.string('JetResolutionObject_Summer12_V1_MC_JER_SF_AK5PFchs'),
label = cms.untracked.string('AK5PFchs')
),
),
connect = cms.string('sqlite:Summer15_V0_MC_JER.db')
)
process.demo1 = cms.EDAnalyzer('JetResolutionDBReader',
era = cms.untracked.string('Summer15_V0_MC_JER'),
label = cms.untracked.string('AK4PFchs'),
dump = cms.untracked.bool(True),
saveFile = cms.untracked.bool(True)
)
process.demo2 = cms.EDAnalyzer('JetResolutionScaleFactorDBReader',
era = cms.untracked.string('Summer12_V1_MC_JER_SF'),
label = cms.untracked.string('AK5PFchs'),
dump = cms.untracked.bool(True),
saveFile = cms.untracked.bool(True)
)
process.p = cms.Path(process.demo1 * process.demo2)
|
'''
Leia quatro valores inteiros A, B, C e D. A seguir, calcule e mostre a diferença do produto de A e B pelo produto de C e D
segundo a fórmula: DIFERENCA = (A * B - C * D).
Entrada: Saida:
5 DIFERENCA = -26
6
7
8
'''
A = int(input())
B = int(input())
C = int(input())
D = int(input())
calculo = ( A * B ) - ( C * D )
print("DIFERENCA =" , calculo ) |
import pytest
import numpy as np
import torch
from itertools import product
from ding.model import MAPPO
from ding.torch_utils import is_differentiable
B = 32
agent_obs_shape = [216, 265]
global_obs_shape = [264, 324]
agent_num = 8
action_shape = 14
args = list(product(*[agent_obs_shape, global_obs_shape]))
@pytest.mark.unittest
@pytest.mark.parametrize('agent_obs_shape, global_obs_shape', args)
class TestVAC:
def output_check(self, model, outputs, action_shape):
if isinstance(action_shape, tuple):
loss = sum([t.sum() for t in outputs])
elif np.isscalar(action_shape):
loss = outputs.sum()
is_differentiable(loss, model)
def test_vac(self, agent_obs_shape, global_obs_shape):
data = {
'agent_state': torch.randn(B, agent_num, agent_obs_shape),
'global_state': torch.randn(B, agent_num, global_obs_shape),
'action_mask': torch.randint(0, 2, size=(B, agent_num, action_shape))
}
model = MAPPO(agent_obs_shape, global_obs_shape, action_shape, agent_num)
logit = model(data, mode='compute_actor_critic')['logit']
value = model(data, mode='compute_actor_critic')['value']
outputs = value.sum() + logit.sum()
self.output_check(model, outputs, action_shape)
for p in model.parameters():
p.grad = None
logit = model(data, mode='compute_actor')['logit']
self.output_check(model.actor, logit, model.action_shape)
for p in model.parameters():
p.grad = None
value = model(data, mode='compute_critic')['value']
assert value.shape == (B, agent_num)
self.output_check(model.critic, value, action_shape)
|
# Compute final course grades for students based on percentage scores using a function.
score = float(input("Enter the numerical score: "))
def letterGrade(score):
if score >= 95:
return 'A+'
elif score >= 90 and score < 95:
return 'A'
elif score >= 85 and score < 90:
return 'A-'
elif score >= 80 and score < 85:
return 'B+'
elif score >= 75 and score < 80:
return 'B'
elif score >= 70 and score < 75:
return 'B-'
elif score >= 65 and score < 70:
return 'C+'
elif score >= 60 and score < 65:
return 'C'
elif score >= 55 and score < 60:
return 'C-'
else:
return 'F'
result = letterGrade(score)
print("The letter grade for %.2f percent is %s"%(score, result))
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
# reverse() 함수를 사용하기 위해 임포트
from django.core.urlresolvers import reverse
# tagging를 사용하기 위한 임포트
from tagging.fields import TagField
# Create your models here.
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField('TITLE', max_length=50)
# allow_unicode=True 는 한글 처리를 가능하게 한다.
slug = models.SlugField('SLUG', unique=True, allow_unicode=True, help_text='one word for title alias.')
description = models.CharField('DESCRIPTION', max_length=100, blank=True, help_text='simple description text.abs')
content = models.TextField('CONTENT')
create_date = models.DateTimeField('Create Date', auto_now_add=True)
modify_date = models.DateTimeField('Modify Date', auto_now=True)
tag = TagField()
class Meta:
verbose_name = 'post'
verbose_name_plural = 'posts'
db_table = 'my_post'
ordering = ('-modify_date',) # 모델 객체의 리스트 출력시 modify_date 컬럼을 기준으로 내림차순으로 정렬한다.
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def get_absolute_url(self):
# reverse() 함수는 URL 패턴을 만들어주는 장고의 내장함수이다.
return reverse('blog:post_detail', args=(self.slug,))
def get_previous_post(self):
return self.get_previous_by_modify_date()
def get_next_post(self):
return self.get_next_by_modify_date()
|
from unittest import TestCase
from tcontrol import frequency
import tcontrol as tc
class TestFrequency(TestCase):
def test_nyquist(self):
frequency.nyquist(tc.tf([0.5], [1, 2, 1, 0.5]), plot=False)
def test_bode(self):
frequency.bode(tc.zpk([], [0, -1, -2], 2), plot=False)
frequency.bode(tc.tf([1], [1, 1]), plot=False)
def test_evalfr(self):
tf = tc.tf([1, -1], [1, 1, 1])
frequency.evalfr(tf, 1 + 1j) - (0.23077 + 0.15385j)
ss = tc.ss([[1, 2], [4, 0]], [[0], [1]], [[1, 1]], 0)
frequency.evalfr(ss, 1 + 1j)
|
"""
ipfjes - Our Opal Application
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ipfjes.settings")
from opal.core import application, menus
class Application(application.OpalApplication):
javascripts = [
'js/ipfjes/routes.js',
'js/ipfjes/occupational_history.js',
'js/ipfjes/asbestos_exposure_history.js',
'js/opal/controllers/discharge.js',
'js/ipfjes/soc_code.js',
# Uncomment this if you want to implement custom dynamic flows.
# 'js/ipfjes/flow.js',
]
default_episode_category = "ipfjes"
styles = [
'css/ipfjes.css'
]
@classmethod
def get_menu_items(klass, user=None):
items = [
menus.MenuItem(
href="/pathway/#/new", activepattern="/pathway/#/new",
icon="fa-plus", display="Add participant",
index=0
),
menus.MenuItem(
href="/#/list/interviews/", activepattern="/#/list/interviews",
icon="fa-phone", display="Interview list",
index=2
)
]
if user.is_staff:
items.append(
menus.MenuItem(
href="/admin/", icon="fa-cogs", display="Admin",
index=999
)
)
return items
|
# loops man
'''
for variable in list_name:
# Do stuff!
A variable name follows the for keyword; it will be assigned the value of each list item in turn.
'''
'''
iteratition:
for item in list:
print item
for i in range(len(list)):
print list[i]
for somthing
printing 'somthing',
puts ' ' between words and not down a line
'''
# FOR
# running on idexes:
n = [3, 5, 7]
for i in range(0, len(n)):
print('i am runnig on the indexes of a list: ', n[i])
n = [3, 5, 7]
def total(numbers):
result = 0
for i in range(len(numbers)):
result += numbers[i]
return result
# more for
hobbies = []
# Add your code below!
for i in range(4):
hobby = input('Enter hobbie: ')
hobbies.append(hobby)
print(hobbies)
# DOUBLE FOR
# make list out of 2 list of 2 lists
n = [[1, 2, 3], [4, 5, 6, 7, 8, 9]]
def flatten(lists):
results = []
for lst in lists:
for numbers in lst:
results.append(numbers)
return results
print
flatten(n)
# While - counter loop
# while condition:
# do this
# condition += 1
# while True:
# do this
# while else: while true do this but else = while false do this
'''
from random import randint
# Generates a number from 1 through 10 inclusive
random_number = randint(1, 10)
guesses_left = 3
# Start your game!
while guesses_left > 0:
guess = raw_input('enter guess: ')
if guess == random_number:
print 'You Win!'
break
guesses_left -= 1
else:
print 'You lose.'
'''
# BREAK
# CONTINUE
# bring you back immidiatly to the begnning of the loop without executing the rest of what is written in the loop after continue
# PASS
# do nothing, move forward in the loop ( if the pass is in if statemnet it ignores the if statement)
def string_match(a, b):
short_str_len = min(len(a), len(b))
count = 0
for i in range(short_str_len - 1):
sub_a = a[i: i + 2]
sub_b = b[i: i + 2]
if sub_a == sub_b:
count += 1
return count
print(string_match('ama', 'amaxa'))
strs = ['gagag', 'cruel', 'no', 'ahahaha']
strs.append('lala')
print(strs)
def str_list_returner(list_of_str):
new_list = []
for dstring in list_of_str:
if dstring[0] == dstring[-1]:
new_list.append(dstring)
return new_list
print(str_list_returner(strs))
# FOR ELSE
'''
just like with while, for loops may have an else associated with them.
In this case, the else statement is executed after the for, but only if the for ends normally—that is, not with a break.
'''
|
import requests
import json
from xlwt import *
url = "https://prodapi.metweb.ie/observations/valentia/yesterday"
response = requests.get(url)
data = response.json()
#creating a workbook
w = Workbook()
ws = w.add_sheet('weather')
rowNumber = 0;
ws.write(rowNumber,0,"name")
ws.write(rowNumber,1,"temperature")
ws.write(rowNumber,2,"weather")
ws.write(rowNumber,3,"wind speed")
ws.write(rowNumber,4,"date")
rowNumber += 1
#adding data into rows
for i in data:
#print(i["name"])
#print(i["temperature"])
#print(i["weatherDescription"])
#print(i["windSpeed"])
#print(i["date"])
ws.write(rowNumber,0,i["name"])
ws.write(rowNumber,1,i["temperature"])
ws.write(rowNumber,2,i["weatherDescription"])
ws.write(rowNumber,3,i["windSpeed"])
ws.write(rowNumber,4,i["date"])
rowNumber += 1
w.save('weather.xls')
#save it to a file
#file = 'weatherReport.json'
#write JSON data
#f = open(file, 'w')
#json.dump(data, f, indent=4) |
from slackclient import SlackClient
import copy
import sys
import time
import os
import re
import importlib
import InitModule
import ColorPrint
import password_crypt
sys.path.insert(0, './modules/')
sys.path.insert(0, './common/')
# wantname = ["REGEXBOT","CustomResponse"]
wantname = ["FBTOSLACK"]
class Slack_RTM:
def __init__(self):
self.colorPrint = ColorPrint.setPrint("Root")
self.colorPrint("Test Unit", wantname)
modules = InitModule.modulesGet()
privacy = password_crypt.logIn(InitModule.requiresGet(modules))
# self.colorPrint("Secret",privacy,color="WARNING")
modules = [i for i in modules if i['name'] in ["", *wantname]]
self.modules, base = InitModule.initSet(privacy, modules)
self.slack = SlackClient(base.get('TOKEN'))
self.ignoretype = [
'user_typing',
'reconnect_url',
'pref_change',
'presence_change',
'emoji_changed',
'desktop_notification']
def startRTM(self):
if self.slack.rtm_connect():
self.colorPrint("Start")
while True:
data = self.slack.rtm_read()
if data and data[0]['type'] not in self.ignoretype:
self.colorPrint("GET", data)
if not data:
time.sleep(1)
else:
self.commandSelect(data[0])
else:
self.colorPrint("Connect Error!", "Please Reconnect", color="ERR")
def start(self):
self.startRTM()
def commandSelect(self, data):
for mod in self.modules:
mod.main(copy.deepcopy(data))
if __name__ == "__main__":
slack_rtm = Slack_RTM()
slack_rtm.start()
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Chilled_Water_Pump_Differential_Pressure_Dead_Band_Setpoint import Chilled_Water_Pump_Differential_Pressure_Dead_Band_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.CWS_Chilled_Water_Differential_Pressure_Dead_Band_Setpoint import CWS_Chilled_Water_Differential_Pressure_Dead_Band_Setpoint
class CWS_Chilled_Water_Pump_Differential_Pressure_Dead_Band_Setpoint(Chilled_Water_Pump_Differential_Pressure_Dead_Band_Setpoint,CWS_Chilled_Water_Differential_Pressure_Dead_Band_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').CWS_Chilled_Water_Pump_Differential_Pressure_Dead_Band_Setpoint
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext': build_ext},
# .so name, [.pyx files,.pyx files]
ext_modules = [Extension("helloworld", ["HWorld.pyx"])]
)
|
#!/usr/bin/python3
# Lists all states from a database
if __name__ == "__main__":
import MySQLdb
from sys import argv, exit
if len(argv) != 5:
print("Usage: ./5.py <username> <password> <database> <search>")
exit(1)
usr, pwd, dbe = argv[1], argv[2], argv[3]
sch = argv[4].split("'")[0]
try:
database = MySQLdb.Connect(user=usr, passwd=pwd, db=dbe, port=3306)
except Exception as err:
print(err)
exit(1)
cursor = database.cursor()
cursor.execute("""
SELECT cities.name FROM cities
JOIN states ON cities.state_id = states.id
WHERE states.name = %s
ORDER BY cities.id ASC
""", (sch,))
print(", ".join([row[0] for row in cursor.fetchall()]))
cursor.close()
database.close()
|
import urllib
import re
for lang in file("good.txt"):
(name, selfname, code, url) = lang.strip().split(',')
page = urllib.urlopen("http://watchtower.org" + url + "index.htm")
# if page.getcode() != 200:
# print "FAIL:", code
# else:
chaps = re.findall(r'\b([a-z0-9]+)/chapter(?:s|_\d\d\d).htm">([^<]*)</?a', page.read())
if len(chaps) != 66:
print "INVALID CHAPS #:", code
out = file("chaps/" + code, "w")
for (chap, name) in chaps:
name = name.replace(" ", " ").strip()
out.write(chap + "=" + name + "\n")
print "OK:", code
|
from django.conf import settings
from rest_framework import serializers
from openbook_auth.models import User, UserProfile
from openbook_categories.models import Category
from openbook_categories.validators import category_name_exists
from openbook_common.models import Badge
from openbook_common.serializers_fields.community import IsCommunityReportedField, CommunityPostsCountField
from openbook_common.serializers_fields.request import RestrictedImageFileSizeField
from openbook_common.serializers_fields.user import IsFollowingField, AreNewPostNotificationsEnabledForUserField
from openbook_common.validators import hex_color_validator
from openbook_communities.models import Community, CommunityMembership
from openbook_communities.serializers_fields import IsInvitedField, \
IsCreatorField, RulesField, ModeratorsField, CommunityMembershipsField, IsFavoriteField, AdministratorsField, \
AreNewPostNotificationsEnabledForCommunityField
from openbook_communities.validators import community_name_characters_validator, community_name_exists
class GetCommunitySerializer(serializers.Serializer):
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class DeleteCommunitySerializer(serializers.Serializer):
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class UpdateCommunitySerializer(serializers.Serializer):
# The name of the community to update
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
validators=[community_name_characters_validator, community_name_exists],
required=True)
type = serializers.ChoiceField(choices=Community.COMMUNITY_TYPES, required=False)
name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
validators=[community_name_characters_validator], required=False)
title = serializers.CharField(max_length=settings.COMMUNITY_TITLE_MAX_LENGTH, required=False)
description = serializers.CharField(max_length=settings.COMMUNITY_DESCRIPTION_MAX_LENGTH, required=False,
allow_blank=True)
rules = serializers.CharField(max_length=settings.COMMUNITY_RULES_MAX_LENGTH, required=False, allow_blank=True)
user_adjective = serializers.CharField(max_length=settings.COMMUNITY_USER_ADJECTIVE_MAX_LENGTH, required=False,
allow_blank=True)
users_adjective = serializers.CharField(max_length=settings.COMMUNITY_USERS_ADJECTIVE_MAX_LENGTH, required=False,
allow_blank=True)
invites_enabled = serializers.BooleanField(required=False)
categories = serializers.ListField(
required=False,
min_length=settings.COMMUNITY_CATEGORIES_MIN_AMOUNT,
max_length=settings.COMMUNITY_CATEGORIES_MAX_AMOUNT,
child=serializers.CharField(max_length=settings.HASHTAG_NAME_MAX_LENGTH, validators=[category_name_exists]),
)
color = serializers.CharField(max_length=settings.COLOR_ATTR_MAX_LENGTH, required=False,
validators=[hex_color_validator])
class UpdateCommunityAvatarSerializer(serializers.Serializer):
avatar = RestrictedImageFileSizeField(allow_empty_file=False, required=True,
max_upload_size=settings.COMMUNITY_AVATAR_MAX_SIZE)
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class UpdateCommunityCoverSerializer(serializers.Serializer):
cover = RestrictedImageFileSizeField(allow_empty_file=False, required=True,
max_upload_size=settings.COMMUNITY_COVER_MAX_SIZE)
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class FavoriteCommunitySerializer(serializers.Serializer):
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class TopPostCommunityExclusionSerializer(serializers.Serializer):
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
required=True,
validators=[community_name_characters_validator, community_name_exists])
class GetCommunityCommunityCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = (
'id',
'name',
'title',
'color'
)
class GetCommunityStaffUserBadgeSerializer(serializers.ModelSerializer):
class Meta:
model = Badge
fields = (
'keyword',
'keyword_description'
)
class GetCommunityModeratorProfileSerializer(serializers.ModelSerializer):
badges = GetCommunityStaffUserBadgeSerializer(many=True)
class Meta:
model = UserProfile
fields = (
'avatar',
'name',
'badges'
)
class GetCommunityStaffUserSerializer(serializers.ModelSerializer):
profile = GetCommunityModeratorProfileSerializer(many=False)
is_following = IsFollowingField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForUserField()
class Meta:
model = User
fields = (
'id',
'username',
'profile',
'is_following',
'are_new_post_notifications_enabled',
)
class GetCommunityCommunityMembershipSerializer(serializers.ModelSerializer):
class Meta:
model = CommunityMembership
fields = (
'id',
'user_id',
'community_id',
'is_administrator',
'is_moderator',
)
class GetCommunityCommunitySerializer(serializers.ModelSerializer):
categories = GetCommunityCommunityCategorySerializer(many=True)
is_invited = IsInvitedField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForCommunityField()
is_creator = IsCreatorField()
is_favorite = IsFavoriteField()
is_reported = IsCommunityReportedField()
moderators = ModeratorsField(moderator_serializer=GetCommunityStaffUserSerializer)
administrators = AdministratorsField(administrator_serializer=GetCommunityStaffUserSerializer)
memberships = CommunityMembershipsField(community_membership_serializer=GetCommunityCommunityMembershipSerializer)
rules = RulesField()
class Meta:
model = Community
fields = (
'id',
'title',
'name',
'avatar',
'cover',
'members_count',
'color',
'description',
'rules',
'user_adjective',
'users_adjective',
'categories',
'moderators',
'administrators',
'type',
'invites_enabled',
'is_invited',
'are_new_post_notifications_enabled',
'is_creator',
'is_favorite',
'is_reported',
'memberships',
)
class LegacyGetCommunityCommunitySerializer(serializers.ModelSerializer):
categories = GetCommunityCommunityCategorySerializer(many=True)
is_invited = IsInvitedField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForCommunityField()
is_creator = IsCreatorField()
is_favorite = IsFavoriteField()
is_reported = IsCommunityReportedField()
posts_count = CommunityPostsCountField()
moderators = ModeratorsField(moderator_serializer=GetCommunityStaffUserSerializer)
administrators = AdministratorsField(administrator_serializer=GetCommunityStaffUserSerializer)
memberships = CommunityMembershipsField(community_membership_serializer=GetCommunityCommunityMembershipSerializer)
rules = RulesField()
class Meta:
model = Community
fields = (
'id',
'title',
'name',
'avatar',
'cover',
'members_count',
'posts_count',
'color',
'description',
'rules',
'user_adjective',
'users_adjective',
'categories',
'moderators',
'administrators',
'type',
'invites_enabled',
'is_invited',
'are_new_post_notifications_enabled',
'is_creator',
'is_favorite',
'is_reported',
'memberships',
)
class CommunityAvatarCommunitySerializer(serializers.ModelSerializer):
class Meta:
model = Community
fields = (
'id',
'name',
'avatar',
)
class CommunityCoverCommunitySerializer(serializers.ModelSerializer):
class Meta:
model = Community
fields = (
'id',
'name',
'cover',
)
class FavoriteCommunityCommunitySerializer(serializers.ModelSerializer):
is_favorite = IsFavoriteField()
class Meta:
model = Community
fields = (
'id',
'is_favorite',
)
class SubscribeToCommunityNotificationsSerializer(serializers.Serializer):
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
validators=[community_name_characters_validator, community_name_exists])
class SubscribeToCommunityNotificationsCommunitySerializer(serializers.ModelSerializer):
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForCommunityField()
class Meta:
model = Community
fields = (
'id',
'are_new_post_notifications_enabled',
)
|
from gensim.models import Word2Vec
path = "mat2vec/training/models/"
model = "processedCorpusSG100"
#model = "FlowModelNew"
print(path+model+'\n')
w2v_model = Word2Vec.load(path+model)
w2v_model.wv.save_word2vec_format(path+'CompleteProjectorModel')
|
# print the pascal's triangle
l1 = [1,1]
l2 = [1,2,1]
l3 = [1,3,3,1]
n = int(input("# of levels?: "))
if n == 1:
print(l1)
if n == 2:
print(l2)
print(l2)
if n == 3:
print(l1)
print(l2)
print(l3) |
import time
import sys
import psutil
import threading
sys.path.append("../lib/")
import server as SC
import responses as RE
services = [["HTTP80", 80, RE.DLink_200],
["TR069", 7547, RE.generic],
["TOMCAT", 8080, RE.DLink_200],
["WSD", 5358, RE.generic],
["P500", 500, RE.generic]]
pidSafe = {}
process = {}
def stop():
for service in process:
print "Stopping process %s" % service
process[service].do_run = False
process[service].join()
print process[service].isAlive()
def start():
du_run = True
for service in services:
print "Starting Service %s ..." % service[0]
this = threading.Thread(target=SC.run, args=(service[0],service[1],service[2]))
this.start()
process.update({service[0]: this})
while (True):
start()
print str(process)
print "Kill all processes? (Y/N) "
keystroke = raw_input()
if keystroke is "Y" or keystroke is "y":
stop()
sys.exit()
|
# programmers lv2 다리를 지나는 트럭
# https://programmers.co.kr/learn/courses/30/lessons/42583
def solution(bridge_length, max_weight, truck_weights):
bridge = deque([0]*bridge_length, maxlen=bridge_length)
bridge_current_weight = 0
time = 0
truck_weights.reverse()
while truck_weights:
time += 1
next_truck = bridge.popleft()
bridge_current_weight -= next_truck
if bridge_current_weight + truck_weights[-1] > max_weight:
bridge.append(0)
else:
truck = truck_weights.pop()
bridge.append(truck)
bridge_current_weight += truck
while bridge_current_weight > 0:
time += 1
next_truck = bridge.popleft()
bridge_current_weight -= next_truck
return time |
def findDecision(obj): #obj[0]: Outlook, obj[1]: Temp., obj[2]: Humidity, obj[3]: Wind
# {"feature": "Outlook", "instances": 14, "metric_value": 0.9403, "depth": 1}
if obj[0] == 'Sunny':
# {"feature": "Humidity", "instances": 5, "metric_value": 0.971, "depth": 2}
if obj[2] == 'High':
return 'No'
elif obj[2] == 'Normal':
return 'Yes'
else: return 'Yes'
elif obj[0] == 'Rain':
# {"feature": "Wind", "instances": 5, "metric_value": 0.971, "depth": 2}
if obj[3] == 'Weak':
return 'Yes'
elif obj[3] == 'Strong':
return 'No'
else: return 'No'
elif obj[0] == 'Overcast':
return 'Yes'
else: return 'Yes'
|
import sys
from jinja2 import Template
from utils import LogTable
class TableRenderer:
def __init__(self, table):
self.table = table
def render(self, output_name):
with open('log_template.tex') as inf:
template = Template(inf.read())
with open(output_name, 'w') as outf:
outf.write(template.render(table=self.table))
if __name__ == '__main__':
precision = int(sys.argv[1])
digits = int(sys.argv[2])
per_page = int(sys.argv[3])
output_filename = sys.argv[4]
start = 10 ** (precision - 1)
end = start * 10
table = LogTable(start, end, digits, per_page)
renderer = TableRenderer(table)
renderer.render(output_filename)
|
import logging
import logging.handlers
logger = logging.getLogger(__name__.split('.')[0])
def setup_logging(syslog=False, debug=False):
'''
Configures logging
'''
# logger.propagate = False
print(logger.name)
formatter = logging.Formatter('[%(levelname)s] %(asctime)s - %(message)s')
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("Set log level to debug")
else:
logger.setLevel(logging.INFO)
streamh = logging.StreamHandler()
streamh.setFormatter(formatter)
logger.addHandler(streamh)
if syslog:
syslogh = logging.handlers.SysLogHandler(address='/dev/log')
syslogh.setFormatter(formatter)
logger.addHandler(syslogh) |
from django.db.models import Q
from rcreg_project.settings import (CUSTOM_SITE_ADMIN_EMAIL,
RC_GENERAL_ADMIN_EMAIL)
def all_challenge_notifications(request):
"""user notified via sidebar if:
user has unaccepted or unsubmitted challenges, or
submission window is closed due to too many challenges captained by user.
"""
from django.conf import settings # avoid circular import
from scheduler.models import Challenge, Roster # avoid circular import
user = request.user
NOTIFY_PENDING_CHALLENGES = None
NOTIFY_UNACCEPTED_CHALLENGES = None
ALL_CHALLENGE_NOTIFY = 0
if user.is_authenticated():
try:
registrant_upcoming_con = (
user.get_registrant_for_most_upcoming_con())
if not Challenge.objects.submission_full(
registrant_upcoming_con.con):
con = registrant_upcoming_con.con
if con.can_submit_chlg():
# Note: Games will not be notified if submission is closed,
# but submisison button won'tbe disabled.
my_rosters = list(Roster.objects.filter(
captain=registrant_upcoming_con))
NOTIFY_PENDING_CHALLENGES = len(list(
Challenge.objects.filter(Q(roster1__in=my_rosters) |
Q(roster2__in=my_rosters)).filter(submitted_on=None)))
NOTIFY_UNACCEPTED_CHALLENGES = len(list(
Challenge.objects.filter(Q(
roster1__captain=registrant_upcoming_con) |
Q(roster2__captain=registrant_upcoming_con)).
filter(Q(captain1accepted=False) |
Q(captain2accepted=False))))
except:
pass
if NOTIFY_PENDING_CHALLENGES:
ALL_CHALLENGE_NOTIFY += NOTIFY_PENDING_CHALLENGES
if NOTIFY_UNACCEPTED_CHALLENGES:
ALL_CHALLENGE_NOTIFY += NOTIFY_UNACCEPTED_CHALLENGES
if ALL_CHALLENGE_NOTIFY <= 0:
ALL_CHALLENGE_NOTIFY = None
return {'NOTIFY_PENDING_CHALLENGES': NOTIFY_PENDING_CHALLENGES,
'NOTIFY_UNACCEPTED_CHALLENGES': NOTIFY_UNACCEPTED_CHALLENGES,
'ALL_CHALLENGE_NOTIFY': ALL_CHALLENGE_NOTIFY
}
def get_rc_admin_email(request):
"""Gets RC_GENERAL_ADMIN_EMAIL from settings, used in templates."""
return {'RC_GENERAL_ADMIN_EMAIL': RC_GENERAL_ADMIN_EMAIL}
def get_site_admin_email(request):
"""Gets CUSTOM_SITE_ADMIN_EMAIL from settings, used in templates."""
return {'CUSTOM_SITE_ADMIN_EMAIL': CUSTOM_SITE_ADMIN_EMAIL}
def get_upcoming_con_context(request):
from con_event.models import Con # avoid circular import
upcoming_con_context = Con.objects.most_upcoming()
return {'up_con_year': upcoming_con_context.start.year,
'up_con_month': upcoming_con_context.start.month
}
def upcoming_days(request):
"""Gets most upcoming con, then days in that con.
Returns list of Date objects.
"""
from con_event.models import Con # avoid circular import
upcoming = Con.objects.most_upcoming()
upcoming_days = upcoming.get_date_range()
return {'upcoming_days': upcoming_days}
|
import cProfile
import pstats
from io import StringIO
import re
def profile(func):
"""A decorator that uses cProfile to profile a function"""
def wrapper(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
ret = func(*args, **kwargs)
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return ret
return wrapper
@profile
def loop(count):
# result = []
# space_pattern = re.compile(r" ")
for i in range(count):
# result.append(i)
st = " faei jflaie fealeij f"
# re.sub(r" ", "", st)
# space_pattern.sub("", st)
a = [x for x in range(100)]
loop(1000000)
|
#!/usr/bin/python
import mkit.inference.ip_to_asn as ip2asn
import socket
import socket
import alexa
import json
from networkx.readwrite import json_graph
import networkx as nx
import pdb
from graph_tool.all import *
import os
import settings
EYEBALL_THRES = 500
files = [ x for x in os.listdir( settings.GRAPH_DIR_FINAL ) \
if os.path.isfile( os.path.join( settings.GRAPH_DIR_FINAL, x ) ) ]
files = [ os.path.join( settings.GRAPH_DIR_FINAL, f ) for f in files ]
all_graphs = {}
for f in files:
asn = f.split( '/' )[ -1 ].split('.')[0]
print "COMBINED graph for ASN", asn
gr = load_graph(f, fmt="gt")
remove_parallel_edges(gr)
remove_self_loops(gr)
overall_origins = {}
all_graphs[int(asn)] = gr
def path_in_cache( src, dst ):
if dst in all_graphs:
gr = all_graphs[dst]
src = find_vertex(gr, gr.vp.asn, src)
if src:
return True
return False
src_ases_by_country = {}
top_eyeballs = []
f = open("data/aspop")
entries = list()
for table in f:
records = table.split("[")
for record in records:
record = record.split("]")[0]
entry = dict()
try:
entry["rank"] = int(record.split(",")[0])
entry["as"] = record.split(",")[1].strip("\"")
entry["country"] = ((record.split(",")[3]).split("=")[2]).split("\\")[0]
entry["users"] = int(record.split(",")[4])
if entry["rank"] > 5000:
continue
entries.append(entry)
except (IndexError, ValueError):
continue
f.close()
for entry in entries:
if len(top_eyeballs) < EYEBALL_THRES:
top_eyeballs.append(int(entry['as'].split('AS')[-1]))
if entry['country'] not in src_ases_by_country:
src_ases_by_country[entry['country']] = []
if len(src_ases_by_country[entry['country']]) >= 20:
continue
src_ases_by_country[entry['country']].append(int(entry['as'].split('AS')[-1]))
pdb.set_trace()
#count = 0
#for asn in top_eyeballs:
# num_probes = get_probes_in_asn(asn)
# if len(num_probes) > 0:
# count += 1
#print count
#rank_user = {}
#for entry in entries:
# rank = entry["rank"]
# users = entry["users"]
# rank_user[rank] = users
#ranks = rank_user.keys()
#num_users = []
#for rank in ranks:
# num_users.append(rank_user[rank])
#with open("cipollino-verify/geoff_ranks", "w") as fi:
# json.dump(ranks, fi)
#with open("cipollino-verify/geoff_num_users", "w") as fi:
# json.dump(num_users, fi)
CONTENT_THRES = 10
websites = alexa.top_list(CONTENT_THRES)
content_asns = []
for w in websites:
try:
asn = int(ip2asn.ip2asn_bgp(socket.gethostbyname(w[1])))
content_asns.append(asn)
except:
print w
continue
#src_ases_by_country_new = {}
#for cc in src_ases_by_country:
# if len(src_ases_by_country[cc]) > 10:
# src_ases_by_country_new[cc] = src_ases_by_country[cc]
#src_ases_by_country = src_ases_by_country_new
print "getting overall coverage"
forward_count_overall = 0
rev_count_overall = 0
for src_eyeball in top_eyeballs:
for dst_content in content_asns:
if path_in_cache(src_eyeball, dst_content):
forward_count_overall += 1
print forward_count_overall
print forward_count_overall*1.0/(len(top_eyeballs)*len(content_asns))
country_coverage = {}
for code, eyeballs in src_ases_by_country.iteritems():
eyeballs = eyeballs[:EYEBALL_THRES]
forward_count = 0
for src in eyeballs:
for dst in content_asns:
if path_in_cache(src, dst):
forward_count += 1
for_fraction = forward_count*1.0/(len(eyeballs)*len(content_asns))
country_coverage[code] = for_fraction
with open("cipollino-verify/pc_coverage_country_all_alexa_fw_only_%s" % CONTENT_THRES, "w") as fi:
json.dump(country_coverage, fi)
CONTENT_THRES = 20
websites = alexa.top_list(CONTENT_THRES)
content_asns = []
for w in websites:
try:
asn = int(ip2asn.ip2asn_bgp(socket.gethostbyname(w[1])))
content_asns.append(asn)
except:
print w
continue
print "getting overall coverage"
forward_count_overall = 0
rev_count_overall = 0
for src_eyeball in top_eyeballs:
for dst_content in content_asns:
if path_in_cache(src_eyeball, dst_content):
forward_count_overall += 1
print forward_count_overall
print forward_count_overall*1.0/(len(top_eyeballs)*len(content_asns))
country_coverage = {}
for code, eyeballs in src_ases_by_country.iteritems():
eyeballs = eyeballs[:EYEBALL_THRES]
forward_count = 0
for src in eyeballs:
for dst in content_asns:
if path_in_cache(src, dst):
forward_count += 1
for_fraction = forward_count*1.0/(len(eyeballs)*len(content_asns))
country_coverage[code] = for_fraction
with open("cipollino-verify/pc_coverage_country_all_alexa_fw_only_%s" % CONTENT_THRES, "w") as fi:
json.dump(country_coverage, fi)
CONTENT_THRES = 30
websites = alexa.top_list(CONTENT_THRES)
content_asns = []
for w in websites:
try:
asn = int(ip2asn.ip2asn_bgp(socket.gethostbyname(w[1])))
content_asns.append(asn)
except:
print w
continue
print "getting overall coverage"
forward_count_overall = 0
rev_count_overall = 0
for src_eyeball in top_eyeballs:
for dst_content in content_asns:
if path_in_cache(src_eyeball, dst_content):
forward_count_overall += 1
print forward_count_overall
print forward_count_overall*1.0/(len(top_eyeballs)*len(content_asns))
country_coverage = {}
for code, eyeballs in src_ases_by_country.iteritems():
eyeballs = eyeballs[:EYEBALL_THRES]
forward_count = 0
for src in eyeballs:
for dst in content_asns:
if path_in_cache(src, dst):
forward_count += 1
for_fraction = forward_count*1.0/(len(eyeballs)*len(content_asns))
country_coverage[code] = for_fraction
with open("cipollino-verify/pc_coverage_country_all_alexa_fw_only_%s" % CONTENT_THRES, "w") as fi:
json.dump(country_coverage, fi)
CONTENT_THRES = 40
websites = alexa.top_list(CONTENT_THRES)
content_asns = []
for w in websites:
try:
asn = int(ip2asn.ip2asn_bgp(socket.gethostbyname(w[1])))
content_asns.append(asn)
except:
print w
continue
print "getting overall coverage"
forward_count_overall = 0
rev_count_overall = 0
for src_eyeball in top_eyeballs:
for dst_content in content_asns:
if path_in_cache(src_eyeball, dst_content):
forward_count_overall += 1
print forward_count_overall
print CONTENT_THRES, forward_count_overall*1.0/(len(top_eyeballs)*len(content_asns))
country_coverage = {}
for code, eyeballs in src_ases_by_country.iteritems():
eyeballs = eyeballs[:EYEBALL_THRES]
forward_count = 0
for src in eyeballs:
for dst in content_asns:
if path_in_cache(src, dst):
forward_count += 1
for_fraction = forward_count*1.0/(len(eyeballs)*len(content_asns))
country_coverage[code] = for_fraction
with open("cipollino-verify/pc_coverage_country_all_alexa_fw_only_%s" % CONTENT_THRES, "w") as fi:
json.dump(country_coverage, fi)
CONTENT_THRES = 50
websites = alexa.top_list(CONTENT_THRES)
content_asns = []
for w in websites:
try:
asn = int(ip2asn.ip2asn_bgp(socket.gethostbyname(w[1])))
content_asns.append(asn)
except:
print w
continue
print "getting overall coverage"
forward_count_overall = 0
rev_count_overall = 0
for src_eyeball in top_eyeballs:
for dst_content in content_asns:
if path_in_cache(src_eyeball, dst_content):
forward_count_overall += 1
print forward_count_overall
print CONTENT_THRES, forward_count_overall*1.0/(len(top_eyeballs)*len(content_asns))
country_coverage = {}
for code, eyeballs in src_ases_by_country.iteritems():
eyeballs = eyeballs[:EYEBALL_THRES]
forward_count = 0
for src in eyeballs:
for dst in content_asns:
if path_in_cache(src, dst):
forward_count += 1
for_fraction = forward_count*1.0/(len(eyeballs)*len(content_asns))
country_coverage[code] = for_fraction
with open("cipollino-verify/pc_coverage_country_all_alexa_fw_only_%s" % CONTENT_THRES, "w") as fi:
json.dump(country_coverage, fi)
pdb.set_trace()
|
#[<開始位置>:<終了位置>:<ステップ幅>]
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[:])
print(test_list[::])
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[:4])
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[2:])
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[3:5])
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[-1:]) # 末尾から全ての要素
print(test_list[:-1]) # 末尾まで全ての要素
print(test_list[::-1]) # 末尾から全ての逆順要素
test_list = ['https', 'www', 'python', 'izm', 'com']
print(test_list[:999])
test_list = ['https', 'www', 'python', 'izm', 'com']
test_list[1:3] = ('WWW', 'PYTHON')
print(test_list) |
matrix = [[int(n) for n in input().split(", ")] for _ in range(int(input()))]
primary_diagonal = [matrix[r][r] for r in range(len(matrix))]
secondary_diagonal = [matrix[r][len(matrix[r]) - r - 1] for r in range(len(matrix))]
print(f"First diagonal: {', '.join([str(n) for n in primary_diagonal])}. Sum: {sum(primary_diagonal)}")
print(f"Second diagonal: {', '.join([str(n) for n in secondary_diagonal])}. Sum: {sum(secondary_diagonal)}")
|
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='speed-tracker',
version='1.0',
description='Check and save internet speed',
author='Kyle Ramey',
author_email='hello@kyleramey.dev',
url='https://kyleramey.dev',
packages=find_packages(),
install_requires=['speedtest-cli', 'aiohttp', 'google-api-python-client', 'google-auth-httplib2', 'google-auth-oauthlib', 'python-dotenv']
)
|
from dataprocess import *
import matplotlib.pyplot as plt
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import cufflinks as cf
df_certified, totaldf, Companyd, jobs, sorted_Company_count, sorted_Site_count = load_data_company('totalnew.csv')
new_input_new, new_list_name = data_for_bar(totaldf)
Salary, Salary_dict = Salary_Statistic(df_certified)
accecpted,denied,jobs_name_list,top_com_list = data_best_company_for_top10jobs(jobs,totaldf,df_certified)
################Jobs distribution plot for top 5 company#######################
plotly.tools.set_credentials_file(username='Lanceljc', api_key='9om7ohKNz7yvpYZ7ehlJ')
cf.set_config_file(offline=False, world_readable=True, theme='ggplot')
df = pd.DataFrame(new_input_new, columns=new_list_name)
df.iplot(kind='barh',barmode='stack', bargap=.1)
#################plot for distribution of Salary###############################
import matplotlib.mlab as mlab
x = list(Salary)
num_bins = 40
plt.figure(figsize=(20,10))
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, facecolor='orange', alpha=0.5)
# add a 'best fit' line
plt.xlabel('Annual Salary',fontsize=20)
plt.ylabel('Counts',fontsize=20)
plt.title(r'Distribution of Salary',fontsize=32)
plt.axvline(sum(x)/len(x), color='k', linestyle='dashed', linewidth=1)
plt.xlim((0, 430000))
plt.text(sum(x)/len(x) + sum(x)/len(x)/10,
sum(x)/len(x) - sum(x)/len(x)/10,
'Mean: {:.2f}'.format(sum(x)/len(x)))
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
####################Plot of best company for top 10 jobs#######################
import plotly.graph_objs as go
N = len(accecpted)
x = jobs_name_list[:N]#np.linspace(1, N, N)
y = np.array(accecpted)
x2 = top_com_list
y2 = np.array(denied)
for i in range(N):
x[i]=x[i]+'/// '+top_com_list[i]
df = pd.DataFrame({'x': x, 'y': y, 'y2':y2})
df.head()
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
),
go.Bar(
x=df['x'],
y=df['y2']
)
]
layout = go.Layout(
barmode='stack',
title='Stacked Bar with Pandas'
)
fig = go.Figure(data=data, layout=layout)
# IPython notebook
py.plot(fig)#, filename='pandas-bar-chart-layout')
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 21 14:51:19 2016
@author: caoxiang
"""
import numpy as np
import pandas as pd
result1 = pd.read_csv('submission_10fold-average-xgb_fairobj_1130.892212_2016-12-12-10-50.csv')
result1_1133 = pd.read_csv('submission_5fold-average-xgb_1133.827593_2016-12-12-01-30.csv')
result1_1112 = pd.read_csv('submission_1fold-average-xgb__2016-11-26-21-18.csv')
result2 = pd.read_csv('submission_keras_5flolds_7bags_100epochs_1132.85267681_2016-12-12-08-51.csv')
#result1 = pd.read_csv('submission_1fold-average-xgb__2016-11-26-21-18.csv')
result3 = pd.read_csv('submission_keras_5flolds_1bags_100epochs_1139.02321274_2016-12-11-21-52.csv')
result4 = pd.read_csv('submission_keras_4flolds_4bags_20epochs_1136.67827642_2016-10-27-19-31.csv')
result_id = result1['id'].values
result_pred = (0.25 * result1['loss'].values
+ 0.15 * result1_1133['loss'].values
+ 0.15 * result1_1112['loss'].values
+ 0.25* (7*result2['loss'].values + result3['loss'].values)/8
+ 0.25* result4['loss'].values)
df = pd.DataFrame({'id': result_id, 'loss': result_pred})
df.to_csv('xgboost_nn_ensemble7.csv', index = False) |
# -*- coding: utf-8 -*-
#
# License:
#
# Copyright (c) 2013 AlienVault
# All rights reserved.
#
# This package is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 dated June, 1991.
# You may not use, modify or distribute this program under any other version
# of the GNU General Public License.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
#
# On Debian GNU/Linux systems, the complete text of the GNU General
# Public License can be found in `/usr/share/common-licenses/GPL-2'.
#
# Otherwise you can read it here: http://www.gnu.org/licenses/gpl-2.0.txt
#
from augeas import Augeas
from utils import is_ipv4
from netinterfaces import get_network_interfaces
from configparsererror import AVConfigParserErrors
class SysConfig (object):
def __init__ (self, system_ip = None, system_id = None, system_type = None):
"""
Initialize this object with non system related data, like the OSSIM administration IP address.
"""
self.__system_ip = system_ip if is_ipv4(system_ip) else None
self.__system_id = system_id
self.__system_type = system_type
self.__augeas = Augeas()
self.__pending = {}
# System data
self.__net_ifaces = {}
self.__hosts_entries = {}
# Initialize pure system data.
self.__reload_config__ ()
#
# Public methods
#
def is_pending (self):
"""
Are there pending changes?
"""
return self.__pending != {}
def get_pending (self):
"""
Get which changes are pending
"""
return self.__pending
def get_pending_str (self):
"""
Same as get_pending(), but in human format.
"""
data = ''
for key, value in self.__pending.iteritems():
data += '\n[%s]\n%s' % (key, value)
return data
def apply_changes (self):
"""
Apply pending changes and reload configuration.
"""
if not self.is_pending():
return AVConfigParserErrors.ALL_OK
try:
self.__augeas.save()
except IOError, msg:
return AVConfigParserErrors.get_error_msg(AVConfigParserErrors.CANNOT_SAVE_SYSCONFIG, str(msg))
self.__pending = {}
self.__reload_config__ ()
return AVConfigParserErrors.ALL_OK
### Related to /etc/network/interfaces
def get_net_iface_config_all (self, include_unconfigured = True, include_lo = False):
"""
Return a dict with all network interface configurations, in the form {'iface name': 'configuration parameters'}
"""
net_ifaces = self.__net_ifaces
if not include_unconfigured:
net_ifaces = dict([(x, y) for (x, y) in net_ifaces.items() if y['address'] != ''])
if not include_lo:
net_ifaces = dict([(x, y) for (x, y) in net_ifaces.items() if x != 'lo'])
return net_ifaces
def get_net_iface_config (self, iface):
"""
Return a dict with the network interface name 'iface' as key, and its configuration attributes as values.
"""
return {iface: self.__net_ifaces.get(iface)}
def set_net_iface_config (self, iface, address = None, netmask = None, gateway = None, \
dns_search= None, dns_nameservers = None, \
broadcast = None, network = None, \
is_new = True):
"""
Set the network configuration for the interface 'iface'.
"""
iface_path_list = self.__augeas.match("/files/etc/network/interfaces/iface[. = '%s']" % iface)
if iface_path_list == []:
if is_new:
self.__augeas.set("/files/etc/network/interfaces/iface[last() + 1]", iface)
self.__augeas.set("/files/etc/network/interfaces/auto[last() + 1]/1", iface)
iface_path = "/files/etc/network/interfaces/iface[last()]"
self.__augeas.set(iface_path + '/family', 'inet')
self.__augeas.set(iface_path + '/method', 'static')
self.__pending['%s family' % iface] = 'inet'
self.__pending['%s method' % iface] = 'static'
else:
return AVConfigParserErrors.get_error_msg(AVConfigParserErrors.NETWORK_INTERFACE_DOWN, additional_message=str(iface))
else:
iface_path = iface_path_list[0]
if address != None:
self.__augeas.set(iface_path + '/address', address)
self.__pending['%s address' % iface] = address
if netmask != None:
self.__augeas.set(iface_path + '/netmask', netmask)
self.__pending['%s netmask' % iface] = netmask
if gateway != None:
self.__augeas.set(iface_path + '/gateway', gateway)
self.__pending['%s gateway' % iface] = gateway
if dns_search != None:
self__augeas.set(iface_path + '/dns-search', dns_search)
self.__pending['%s domain' % iface] = dns_search
if dns_nameservers != None:
self.__augeas.set(iface_path + '/dns-nameservers', dns_nameservers)
self.__pending['%s nameserver(s)' % iface] = dns_nameservers
if broadcast != None:
self.__augeas.set(iface_path + '/broadcast', broadcast)
self.__pending['%s broadcast' % iface] = broadcast
if network != None:
self.__augeas.set(iface_path + '/network', network)
self.__pending['%s network' % iface] = network
return AVConfigParserErrors.ALL_OK
### Related to /etc/hosts
def get_hosts_config_all (self):
"""
Return a dict with all entries in /etc/hosts, in the form {'entry': 'configuration parameters'}
"""
return self.__hosts_entries
def get_hosts_config (self, entry):
"""
Return a dict with the /etc/hosts entry 'entry' as key, and its configuration attributes as values.
"""
return {str(entry): self.__hosts_entries.get(str(entry))}
def set_hosts_config (self, entry = "2", \
ipaddr = None, canonical = None, aliases = [], \
is_new = True):
"""
Set the configuracion for a /etc/hosts entry.
ToDo: be able to set new values.
"""
hosts_entry_path = "/files/etc/hosts/%s" % entry
hosts_entry_list = self.__augeas.match(hosts_entry_path)
if hosts_entry_list == []:
return AVConfigParserErrors.get_error_msg(AVConfigParserErrors.HOSTS_ENTRY_NOT_FOUND, additional_message=str(entry))
if ipaddr != None:
self.__augeas.set(hosts_entry_path + '/ipaddr', ipaddr)
self.__pending['host %s address' % entry] = ipaddr
if canonical != None:
self.__augeas.set(hosts_entry_path + '/canonical', canonical)
self.__pending['host %s canonical name' % entry] = canonical
if aliases != []:
for counter, alias in enumerate(aliases, start = 1):
self.__augeas.set(hosts_entry_path + '/alias[%d]' % counter, alias)
self.__pending['host %s alias[%d]' % (entry, counter)] = alias
return AVConfigParserErrors.ALL_OK
#
# Private methods
#
def __get_net_iface_config_all__ (self):
"""
Return a dict with all the network interface names as keys, and their configuration attributes as values.
"""
# Get all the configured and unconfigured interfaces
configured_ifaces = self.__augeas.match("/files/etc/network/interfaces/iface[*]")
all_ifaces = get_network_interfaces()
# Build the response dictionary.
response = {}
for iface_path in configured_ifaces:
name = self.__augeas.get(iface_path)
address = self.__augeas.get("%s/address" % iface_path)
netmask = self.__augeas.get("%s/netmask" % iface_path)
gateway = self.__augeas.get("%s/gateway" % iface_path)
dns_search = self.__augeas.get("%s/dns-search" % iface_path)
dns_nameservers = self.__augeas.get("%s/dns-nameservers" % iface_path)
broadcast = self.__augeas.get("%s/broadcast" % iface_path)
network = self.__augeas.get("%s/network" % iface_path)
response[name] = {'address': address if address != None else '',
'netmask': netmask if netmask != None else '',
'gateway': gateway if gateway != None else '',
'dns_search': dns_search if dns_search != None else '',
'dns_nameservers': dns_nameservers if dns_nameservers != None else '',
'broadcast': broadcast if broadcast != None else '',
'network': network if network != None else ''
}
for iface in all_ifaces:
if iface.name not in response.keys():
response[iface.name] = {'address': '', 'netmask': '', 'gateway': '', 'dns_search': '', 'dns_nameservers': '', 'broadcast': '', 'network': ''}
return response
def __get_hosts_config_all__ (self):
"""
Return a dict with all the entries in /etc/hosts as keys, and their configuration attributes as values.
"""
# Get all the configured and unconfigured interfaces
configured_hosts = self.__augeas.match("/files/etc/hosts/*")
# Build the response dictionary.
response = {}
for counter, entry_path in enumerate(configured_hosts, start = 1):
ipaddr = self.__augeas.get("%s/ipaddr" % entry_path)
canonical = self.__augeas.get("%s/canonical" % entry_path)
if self.__augeas.match("%s/alias" % entry_path) != None:
aliases = [self.__augeas.get(x) for x in self.__augeas.match("%s/alias" % entry_path)]
else:
aliases = []
response[str(counter)] = {'ipaddr': ipaddr if ipaddr != None else '',
'canonical': canonical if canonical != None else '',
'aliases': aliases
}
return response
def __reload_config__ (self):
self.__net_ifaces = self.__get_net_iface_config_all__ ()
self.__hosts_entries = self.__get_hosts_config_all__ ()
|
import discord
import json
import random
from discord.ext import commands #算是一種導入 class 或 function 的方法
import os
with open('setting.json', mode='r', encoding='utf-8') as jFile: #用來開啟 json 檔案用的
jdata = json.load(jFile)
bot = commands.Bot(command_prefix='~') #輸入指令前必須先輸入 ~ 字元
@bot.event
async def on_ready(): # 當 bot 開機後
print(">> Bot is online <<")
@bot.command()
async def load(ctx,extension): # 因為有 Cog 所以可以 ~load / unload / reload 各 py 用
bot.load_extension(f'cmds.{extension}')
await ctx.channel.send(f'Loaded {extension} done.')
@bot.command()
async def unload(ctx,extension):
bot.unload_extension(f'cmds.{extension}')
await ctx.channel.send(f'unLoaded {extension} done.')
@bot.command()
async def reload(ctx,extension):
bot.reload_extension(f'cmds.{extension}')
await ctx.channel.send(f'reLoaded {extension} done.')
for filename in os.listdir('./cmds'): # 用來把 cmds 資料夾裡面的所有 py 檔 輸入進來
if filename.endswith('.py'):
bot.load_extension(f'cmds.{filename[:-3]}')
if __name__ == '__main__': #進入點? 還不太懂 python
bot.run(jdata['TOKEN']) |
'''
Problem 5 - Rope
Make use of the modified splay tree from the previous problem
#This works but using code from previous example, could definetly speed it up and maybe condense the process function
'''
import sys
from collections import deque
# Vertex of a splay tree
#################################################################################################### splay tree implementation
class Vertex:
def __init__(self, key, letter, left, right, parent):
(self.key,self.letter, self.left, self.right, self.parent) = (key, letter, left, right, parent)
def update(v):
if v == None:
return
if v.left != None:
v.left.parent = v
if v.right != None:
v.right.parent = v
def smallRotation(v):
parent = v.parent
if parent == None:
return
grandparent = v.parent.parent
if parent.left == v:
m = v.right
v.right = parent
parent.left = m
else:
m = v.left
v.left = parent
parent.right = m
update(parent)
update(v)
v.parent = grandparent
if grandparent != None:
if grandparent.left == parent:
grandparent.left = v
else:
grandparent.right = v
def bigRotation(v):
if v.parent.left == v and v.parent.parent.left == v.parent:
# Zig-zig
smallRotation(v.parent)
smallRotation(v)
elif v.parent.right == v and v.parent.parent.right == v.parent:
# Zig-zig
smallRotation(v.parent)
smallRotation(v)
else:
# Zig-zag
smallRotation(v)
smallRotation(v)
# Makes splay of the given vertex and makes
# it the new root.
def splay(v):
if v == None:
return None
while v.parent != None:
if v.parent.parent == None:
smallRotation(v)
break
bigRotation(v)
return v
# Searches for the given key in the tree with the given root
# and calls splay for the deepest visited node after that.
# Returns pair of the result and the new root.
# If found, result is a pointer to the node with the given key.
# Otherwise, result is a pointer to the node with the smallest
# bigger key (next value in the order).
# If the key is bigger than all keys in the tree,
# then result is None.
def find(root, key):
v = root
last = root
next = None
while v != None:
if v.key >= key and (next == None or v.key < next.key):
next = v
last = v
if v.key == key:
break
if v.key < key:
v = v.right
else:
v = v.left
root = splay(last)
return (next, root)
def split(root, key):
(result, root) = find(root, key)
if result == None:
return (root, None)
right = splay(result)
left = right.left
right.left = None
if left != None:
left.parent = None
update(left)
update(right)
return (left, right)
def merge(left, right):
if left == None:
return right
if right == None:
return left
while right.left != None:
right = right.left
right = splay(right)
right.left = left
update(right)
return right
root = None
################################################################
# Code that uses splay tree to solve the problem
class Rope:
def __init__(self, s):
global root
self.s = s
prev = None #left Node
p = None #parent Node
for i, c in enumerate(s): ##apparently faster to not use insert here but create manually, tree will look like 0-0-0-0-0-0-0
cur = Vertex(i, c, prev, None, None)
if prev:
prev.parent = cur
prev = cur
root = cur
def result(self, q, node):
if node:
q = self.result(q, node.left)
q=q+node.letter
q = self.result(q, node.right)
return q
def updateNode(self, top, i):
q = deque()
q.append(top)
while q:
deq = q.popleft()
deq.key = deq.key + i
if deq.left:
q.append(deq.left)
if deq.right:
q.append(deq.right)
return top
def process(self, i, j, k):
global root
(left, middle) = split(root, i) #split into elements that will be left of first index of current string
(middle, right) = split(middle, j+1) #
middle = self.updateNode(middle, k-j)
if k==j:
k+=1
if k > j:
(right, rightright) = split(right, k+1)
right = self.updateNode(right,j-k)
m1 = merge(left,right)
m2 = merge(middle, rightright)
root = merge(m1, m2)
if k < i:
if k>0: #could remove this but saves having to do a split, may have to move this
(leftleft, left) = split(left, k)
else:
leftleft = None
left = self.updateNode(left,i-k)
m1 = merge(leftleft,middle)
m2 = merge(left, right)
root = merge(m1, m2)
rope = Rope(sys.stdin.readline().strip())
q = int(sys.stdin.readline())
for _ in range(q):
i, j, k = map(int, sys.stdin.readline().strip().split())
rope.process(i, j, k)
print(rope.result("", root)) |
import numpy as np
import os
from utils import *
root_path = "/data/share/frame_border_detection_db_v6/results/experiments_20200404_higher_features"
experiment_list = os.listdir(root_path)
experiment_list = [name for name in experiment_list if os.path.isdir(os.path.join(root_path, name))]
loss_history_all = np.zeros((51, 2))
for exp in experiment_list:
experiment_path = os.path.join(root_path, exp)
metrics_log_path = os.path.join(experiment_path, "metrics_history.log")
entries_np = csvReader(metrics_log_path)
loss_history = np.round(entries_np[:, 10:12].astype('float32'), 5)
header = np.array([[exp + "_val_jacc", exp + "_val_dice"]])
print(header.shape)
print(loss_history.shape)
if(len(loss_history) != len(loss_history_all)):
tmp = np.zeros((loss_history_all.shape[0] - 1, loss_history.shape[1]))
#print(tmp.shape)
#print(tmp)
tmp[:loss_history.shape[0]] = loss_history
loss_history = tmp
#print(loss_history.shape)
#print(tmp.shape)
#print(tmp)
#print(loss_history.shape)
#exit()
loss_history = np.concatenate((header, loss_history), axis=0)
print(loss_history.shape)
loss_history_all = np.concatenate((loss_history_all, loss_history), axis=1)
print(loss_history.shape)
print(loss_history_all.shape)
loss_history_all = loss_history_all[:, 2:]
print("----------------------------------")
#print(experiment_path)
#print(loss_history)
print(loss_history_all)
for i in range(0, len(loss_history_all)):
entry = loss_history_all[i].tolist()
csvWriter(dst_folder="../", name="summary_hf_dice.csv", entries_list=entry)
#exit() |
import os
import torch
import pickle
import numpy as np
from mmcnn import MMCNN
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class Util():
def __init__(self):
# 记录每次训练过程中的几个关键值
self.train_loss_log = []
self.val_loss_log = []
self.val_acc_log = []
def train(self, model, device, train_loader, optimizer, epoch):
criterion = nn.CrossEntropyLoss()
model.train()
train_loss = 0.0
for step, (data, label) in enumerate(train_loader):
train_data, train_label = data.to(device), label.to(device)
optimizer.zero_grad()
out = model(train_data)
loss = criterion(out, train_label)
loss.backward()
optimizer.step()
train_loss += loss.data / len(train_data)
self.train_loss_log.append(float(train_loss.data))
def val(self, model, device, test_loader, epoch):
model.eval()
predict = []
label = []
val_loss = 0.0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for step, (data, labels) in enumerate(test_loader):
val_data, val_label = data.to(device), labels.to(device)
out = model(val_data)
loss = criterion(out, val_label)
val_loss += loss.data / len(val_data) # len(val_data)
output = torch.max(out.to('cpu'), 1)[1].numpy().tolist()
val_label = val_label.to('cpu').numpy().tolist()
predict = predict + output
label = label + val_label
val_acc = accuracy_score(predict, label)
self.val_loss_log.append(float(val_loss.data))
self.val_acc_log.append(float(val_acc))
print("Epoch {} Average Validation loss: {} Validation acc: {}".format(
epoch, val_loss, val_acc))
return val_loss, val_acc
def test(self, model, device, test_loader):
model.eval()
predict = []
label = []
with torch.no_grad():
for step, (data, labels) in enumerate(test_loader):
test_data, test_label = data.to(device), labels.to(device)
test_label = test_label.unsqueeze(1)
out = model(test_data)
output = torch.max(out.to('cpu'), 1)[1].numpy().tolist()
test_label = test_label.to('cpu').numpy().tolist()
predict = predict + output
label = label + test_label
accuracy = accuracy_score(predict, label)
f1 = f1_score(label, predict, average='micro')
maxtrix = confusion_matrix(label, predict)
# sensitivity = tp / (tp + fn)
# specificity = tn / (fp + tn)
print("实验准确率为: {} F1: {}".format(accuracy, f1))
return accuracy, f1, maxtrix
def setup_seed(self, seed):
'''
固定训练过程的随机量
'''
if seed == None:
pass
else:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
# random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_dataloader(self, Input_pickle):
with open(Input_pickle, 'rb') as f:
train_data, train_label, test_data, test_label = pickle.load(f)
train_data = torch.tensor(train_data, dtype=torch.float)
test_data = torch.tensor(test_data, dtype=torch.float)
print("train_data.shape: {} test_data.shape: {}".format(
train_data.shape, test_data.shape))
train_label = torch.tensor(train_label, dtype=torch.long)
test_label = torch.tensor(test_label, dtype=torch.long)
#生成dataloader
train_data = TensorDataset(train_data, train_label)
test_data = TensorDataset(test_data, test_label)
train_loader = DataLoader(train_data, shuffle=True,
batch_size=64)
test_loader = DataLoader(test_data, shuffle=True,
batch_size=64)
return train_loader, test_loader
def run(self, input_pickle, lr, epochs, early_stop, seed):
self.setup_seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader, test_loader = self.get_dataloader(input_pickle)
model = MMCNN(channels=62).to(device)
best_acc = 0
best_loss = float('inf')
plateau_period = 0
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.04)
# 用于调整学习率
scheduler = ReduceLROnPlateau(optimizer, mode='min',
patience=10, verbose=True,
min_lr=0.000001, factor=0.5)
for epoch in range(1, epochs + 1):
self.train(model, device, train_loader, optimizer, epoch)
val_loss, val_acc = self.val(model, device, test_loader, epoch)
scheduler.step(val_loss)
if best_acc < val_acc:
best_acc = val_acc
best_loss = val_loss
plateau_period = 0
torch.save(model, 'best.pth')
elif best_acc >= val_acc:
plateau_period += 1
# 连续50次验证集误差不再减小就终止训练
if plateau_period >= early_stop:
model = torch.load('best.pth')
accuracy, F1, maxtrix = self.test(model, device, test_loader)
torch.save(model, 'best.pth')
print('''\n Epoch {} >>>>>>>>> Best Validation loss: {} Validation Acc: {}
Test Acc: {} \n Confusion Matrix: \n {} <<<<<<<<<'''.format(epoch, best_loss, best_acc, accuracy, maxtrix))
break
# return accuracy, F1, sen, spe
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 09:51:20 2018
@author: philippe
example of usage: python SC_bempp.py 450 meshes/cylinders_coarse.msh
"""
import bempp.api
from bempp.api.operators.potential import helmholtz as helmholtz_potential
import numpy as np
import time
import matplotlib.cm as cm
from matplotlib.patches import Circle
from matplotlib import pyplot as plt
import sys
import os
from scipy.sparse.linalg import gmres
print("modules imported !")
def pressure_db(u,p0):
return 10*np.log10(np.abs(u)**2/p0**2)
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
self.residuals = []
def __call__(self, rk=None):
self.niter += 1
self.residuals.append(rk)
if self._disp:
print('iteration %3i: residual = %s' % (self.niter, str(rk)))
def run_bempp_sc(freq,meshfile):
################# define parameters ###################
freq = float(freq)
omega = 2*np.pi*freq
c = 343.0
k = omega/c # wavenumber
d = np.array([0., 1., 0]) # direction of the incident plane wave
d /= np.linalg.norm(d)
Ampli = 1.0
p0 = 2e-5
z_cut = 0.0
tol_gmres = 1e-5
vis_figs = 1 # set 1 to visualize the results with matplotlib
# location of the source
xs = 0.6
ys = -0.5
zs = 0.5
source = "monopole" # monopole or plane wave
############### define input pressure ################
def u_inc(x):
if source == "plane_wave":
return np.exp(1j * k * (d[0]*x[0] + d[1]*x[1] + d[2]*x[2]))
elif source == "monopole":
r = np.sqrt((x[0]-xs)**2+(x[1]-ys)**2+(x[2]-zs)**2)
return Ampli*np.exp(1j * k * r)/r
else:
raise ValueError('not implemented yet')
def dirichlet_fun(x, n, domain_index, result):
result[0] = np.exp(1j * k * np.dot(x, d))
def neumann_fun(x, n, domain_index, result):
result[0] = 1j * k * np.dot(n, d) * np.exp(1j * k * np.dot(x, d))
def dirichlet_fun_monopole(x, n, domain_index, result):
r = np.sqrt((x[0]-xs)**2+(x[1]-ys)**2+(x[2]-zs)**2)
result[0] = Ampli*np.exp(1j * k * r)/r
def neumann_fun_monopole(x, n, domain_index, result):
r = np.sqrt((x[0]-xs)**2+(x[1]-ys)**2+(x[2]-zs)**2)
result[0]= Ampli/r*(1j*k-1/r)*np.exp(1j*k*r)* (((x[0]-xs)*n[0] + (x[1]-ys)*n[1] + (x[2]-zs)*n[2])/r)
################# load mesh #########################################
grid = bempp.api.import_grid(meshfile)
space = bempp.api.function_space(grid, "P", 1)
print("Mesh successfully loaded !")
print("The space has {0} dofs".format(space.global_dof_count))
################# define BEM formulation ###################################
identity = bempp.api.operators.boundary.sparse.identity(
space, space, space)
dlp = bempp.api.operators.boundary.helmholtz.double_layer(
space, space, space, k)
hyp = bempp.api.operators.boundary.helmholtz.hypersingular(
space, space, space, k, use_slp=True)
ntd = bempp.api.operators.boundary.helmholtz.osrc_ntd(space, k)
burton_miller = .5 * identity - dlp - ntd * hyp
if source == "plane_wave":
dirichlet_grid_fun = bempp.api.GridFunction(space, fun=dirichlet_fun)
neumann_grid_fun = bempp.api.GridFunction(space, fun=neumann_fun)
rhs_fun = dirichlet_grid_fun - ntd * neumann_grid_fun
elif source == "monopole":
dirichlet_grid_fun_monopole = bempp.api.GridFunction(space, fun=dirichlet_fun_monopole)
neumann_grid_fun_monopole = bempp.api.GridFunction(space, fun=neumann_fun_monopole)
rhs_fun = dirichlet_grid_fun_monopole - ntd * neumann_grid_fun_monopole
else:
raise ValueError('not implemented yet')
# bem assembling
print("Assembling BEM operator...")
t = time.time()
discrete_op = burton_miller.strong_form()
coeffs = rhs_fun.coefficients
elapsed = time.time() - t
print("Bem operator assembled in %1.1f sec" %elapsed)
# solve linear system
t = time.time()
counter = gmres_counter()
x, info = gmres(discrete_op, coeffs,x0 = coeffs, maxiter = 200,restrt=100,tol = tol_gmres,callback=counter)
elapsed = time.time() - t
print("Gmres solving time: %1.1f sec" %elapsed)
It = counter.niter
Residuals = np.asarray(counter.residuals)
total_field = bempp.api.GridFunction(discrete_op, coefficients=x)
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Post processing %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
############ compute results on a polar grid ###########
theta = np.linspace(0, 2 * np.pi, 600)
R = 1.45
xR = 0.6
yR = 0.45
points = np.array([xR + R*np.cos(theta), yR + R*np.sin(theta), z_cut*np.ones(len(theta))])
slp_pot_polar = helmholtz_potential.double_layer(
space, points, k)
res_polar = u_inc(points) + slp_pot_polar.evaluate(total_field)
u_polar = np.abs(res_polar.flat)
u_in = u_inc(points)
if vis_figs == 1:
plt.figure(figsize=(10, 8))
plt.polar(theta, u_polar,"r-",linewidth=2.0)
plt.polar(theta, np.abs(u_in),"g-.",linewidth=2.0)
############ compute results on grid #################
t = time.time()
print("Computing the external field...")
Nx = 100
Ny = 100
xmin, xmax, ymin, ymax = [-0.5, 1.7,-0.5,2.2]# limits of the pressure map #0.5
plot_grid = np.mgrid[xmin:xmax:Nx*1j , ymin:ymax:Ny*1j] #* 1j]
points_grid = np.vstack((plot_grid[0].ravel(),
plot_grid[1].ravel(),
z_cut*np.ones(plot_grid[0].size)))
u_evaluated = np.zeros(points_grid.shape[1], dtype=np.complex128)
u_evaluated[:] = np.nan
x, y, z = points_grid
slp_pot = helmholtz_potential.double_layer(
space, points_grid, k)
res = u_inc(points_grid) + slp_pot.evaluate(total_field)
u_evaluated = res.reshape((Nx, Ny))
elapsed = time.time() - t
print("Time: %1.1f sec" %elapsed)
############ plot results on grid #################
if vis_figs == 1:
fig,ax = plt.subplots(1,figsize=(10, 8))
ax.set_aspect('equal')
cmap = cm.magma
mini = 35
maxi = 110
levels = np.linspace(mini,maxi,24) #maxi-mini+1
Z = pressure_db(u_evaluated,p0)
p = ax.contourf(x.reshape((Nx, Ny)), y.reshape((Nx, Ny)), Z, levels,
cmap=cm.get_cmap(cmap, len(levels)))
p2 = ax.contour(x.reshape((Nx, Ny)), y.reshape((Nx, Ny)), Z, p.levels, colors='white',linewidths=0.5,linestyles='solid',alpha=0.4)
a = 0.3 # lattice constant
x_coor = np.array([0.0,a,2*a,3*a,4*a])
x_coor = np.repeat(x_coor,4)
y_coor = np.array([0.0,0.3,0.6,0.9])
y_coor = np.tile(y_coor,5)
R_circ = 0.1
for i in range(len(x_coor)):
ax.add_patch(Circle((x_coor[i],y_coor[i]),R_circ*(1+0.01),color="white"))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title("Total Pressure map, solution in plane z=%1.1f" %z_cut)
cbar = fig.colorbar(p)
cbar.set_label('Pressure (dB)', rotation=90)
plt.show()
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if __name__ == "__main__":
import resource
if len(sys.argv) > 2:
print("***************************************************************")
print("************* bempp for high frequency scattering *************")
print("***************************************************************")
print("Running case " + str(sys.argv[2]) )
print("Running frequency " + str(sys.argv[1]) + " Hz")
run_bempp_sc(sys.argv[1],sys.argv[2])
print("frequency " +str(sys.argv[1]) + " Hz finished")
used_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print("used memory: " +str(used_mem/1000)+ " Mb")
print("---------------------------------------------------------------")
print("---------------------------------------------------------------")
else:
raise sys.exit("usage: python " +str(sys.argv[0])+ " <frequency> <path_to_the_mesh_file.msh>")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# mcp3008_lm35.py - read an LM35 on CH0 of an MCP3008 on a Raspberry Pi
# mostly nicked from
# http://jeremyblythe.blogspot.ca/2012/09/raspberry-pi-hardware-spi-analog-inputs.html
import spidev
import time
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import GPIO_PORT
spi = spidev.SpiDev()
spi.open(GPIO_PORT.ANALOG_OUTPUT, 0)
def readadc(adcnum):
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
if adcnum > 7 or adcnum < 0:
return -1
r = spi.xfer2([1, 8 + adcnum << 4, 0])
adcout = ((r[1] & 3) << 8) + r[2]
return adcout
value = readadc(GPIO_PORT.ANALOG_OUTPUT)
volts = (value * 4.7) / 1024
temperature = volts / (10.0 / 1000)
print ("Temperature: %4.1f °C" % (temperature))
|
__author__ = 'lukas'
from sklearn.metrics import accuracy_score
def optimize_clustering(sub_dir, inputs):
iterations = inputs['iterations']
n_test = inputs['n_test']
fs_method = inputs['fs_method']
fs_arg = inputs['fs_arg']
cluster_min = inputs['cluster_min']
print "performing analysis with zval = %f and cluster_min = %i" % (fs_arg, cluster_min)
# Definition of classifier
clf = svm.SVC(kernel='linear')
# Unpacking subject-data into header and actual data
header_path, data_path = sub_dir
mvp = cPickle.load(open(header_path))
mvp.data = h5py.File(data_path, 'r')['data'][:]
# Containers for classification data, tracked over iterations
conf_mat = np.zeros((mvp.n_class, mvp.n_class))
clusters = np.zeros(iterations)
n_features = np.sum(mvp.mask_index)
fs_data = {'count': np.zeros(n_features),
'score': np.zeros(n_features)}
vox_score = np.zeros(n_features)
vox_idx = np.zeros(vox_score.shape)
cluster_cleanup = False
folds = sss(mvp.num_labels, iterations, n_test * mvp.n_class,
random_state=0)
skip = 0
accuracy = np.zeros(iterations)
for i, (train_idx, test_idx) in enumerate(folds):
print "iteration %i" % (i+1)
# Index data (X) and labels (y)
train_data = mvp.data[train_idx, :]
test_data = mvp.data[test_idx, :]
train_labels = np.asarray(mvp.num_labels)[train_idx]
test_labels = np.asarray(mvp.num_labels)[test_idx]
selector = fs_method(fs_arg)
selector.fit(train_data, train_labels)
# Cluster feature selection, if specified
if np.sum(selector.idx) == 0:
final_score = 0
clusters = 0
fs_success = False
cluster_success = False
skip = 1
break
test_demean_clust = False
inpt = {'mvp': mvp, 'train_data': train_data,
'test_data': test_data, 'fs_arg': fs_arg,
'cluster_min': cluster_min, 'selector': selector,
'fs_data': fs_data, 'vox_idx': vox_idx,
'cluster_cleanup': cluster_cleanup,
'train_labels': train_labels, 'test_demean_clust': test_demean_clust}
# Cluster data & return averaged (if not cluster_cleanup) ftrs
output = clustercorrect_feature_selection(**inpt)
train_data, test_data, cl_idx, fs_data, vox_idx = output
if train_data.shape[1] == 0:
final_score = 0
clusters = 0
fs_success = True
cluster_success = False
skip = 1
break
clusters[i] = train_data.shape[1]
clf.fit(train_data, train_labels)
test_pred = clf.predict(test_data)
accuracy[i] = accuracy_score(test_labels, test_pred)
if skip == 0:
final_score = np.mean(accuracy)
fs_success = True
cluster_success = True
# Write out classification results as pandas dataframe
df = {'sub_name': mvp.subject_name,
'zval': fs_arg,
'cluster_min': cluster_min,
'n_clust': np.round(np.mean(clusters), 3),
'cluster_success': cluster_success,
'fs_success': fs_success,
'score': np.round(final_score, 3)}
df = pd.DataFrame(df, index=[0])
fn = opj(os.getcwd(),'opt_clust', 'results_z%f_cmin%i_%s.csv' % (fs_arg, cluster_min, mvp.subject_name))
with open(fn, 'w') as f:
df.to_csv(f, header=True, sep='\t', index=False)
if __name__ == '__main__':
import sys
import time
import pandas as pd
from os.path import join as opj
from sklearn.cross_validation import StratifiedShuffleSplit as sss
import psutil
import numpy as np
sys.path.append('/home/c6386806/LOCAL/Analysis_scripts')
from joblib import Parallel, delayed
from modules.glm2mvpa import MVPHeader
from modules.main_classify import *
# Information about which data to use
home = os.path.expanduser('~')
feat_dir = opj(home, 'DecodingEmotions')
ROI_dir = opj(home, 'ROIs')
os.chdir(feat_dir)
identifier = 'merged'
mvp_dir = opj(os.getcwd(), 'mvp_mats')
header_dirs = sorted(glob.glob(opj(mvp_dir, '*%s*cPickle' % identifier)))
data_dirs = sorted(glob.glob(opj(mvp_dir, '*%s*hdf5' % identifier)))
subject_dirs = zip(header_dirs, data_dirs)
# Parameters for classification
inputs = {}
inputs['iterations'] = 250
inputs['n_test'] = 4
inputs['fs_method'] = SelectAboveZvalue
debug = False
n_proc = 1 if debug else len(subject_dirs)
for fs_arg in np.arange(1, 3, 0.1):
inputs['fs_arg'] = fs_arg
for cluster_min in np.arange(10, 310, 10):
inputs['cluster_min'] = cluster_min
Parallel(n_jobs=n_proc) \
(delayed(optimize_clustering)(sub_dir, inputs) for sub_dir in subject_dirs)
|
import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import operators.blurs as blurs
from operators.operator import OperatorPlusNoise
from utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGrad
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.9)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_blur_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 3
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = 0.2
initial_data_points = 10000
# point this towards your celeba files
data_location = "/share/data/vision-greg2/mixpatch/img_align_celeba/"
kernel_size = 5
kernel_sigma = 5.0
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/celeba_denoiser_normunet_3.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
transform = transforms.Compose(
[
transforms.Resize((128, 128)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
celeba_train_size = 162770
total_data = initial_data_points
total_indices = random.sample(range(celeba_train_size), k=total_data)
initial_indices = total_indices
dataset = CelebaTrainingDatasetSubset(data_location, subset_indices=initial_indices, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True,
)
test_dataset = CelebaTestDataset(data_location, transform=transform)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,
)
### Set up solver and problem setting
forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
if os.path.exists(load_location):
if torch.cuda.is_available():
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumProxGrad(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-5)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond1(
single_iterate_solver=solver, train_dataloader=dataloader, test_dataloader=test_dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=noise_sigma,
precond_iterates=60)
|
import scratch
import serial
import sys
import time
ScratchConnect = False
ColiasConnect = False
port = '/dev/rfcomm0'
#Initialise Colias Connection
while ColiasConnect == False:
print '-------------------------------------------------------\nAttempting to connect to port: %s' % port
#Connect to default serial port for Bluetooth module
try:
ser = serial.Serial(
port=port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
ColiasConnect = True
print 'Port connection succesfull.\n'
except:
ColiasConnect = False
print 'Error connecting to port.\n'
if ColiasConnect == True:
print'Attempting to initialise Colias'
#Initialise settings and test if Communcation channel is working
try:
time.sleep(3)
ser.write('MD2 30 30' + '\r\n')
time.sleep(1)
ser.write('MD3 50 50' + '\r\n')
time.sleep(1)
ser.write('MD4 33 38' + '\r\n')
time.sleep(1)
ser.write('MD5 35 35' + '\r\n')
print 'Colias initialised'
break
except:
print 'initialisation failed, Ensure Colias is switched on\n'
ColiasConnect = False
print 'Enter new port, leave blank to retry default port, type \'wired\' to try default wired port, type \'exit\' to Quit'
userinput = raw_input()
if userinput == '':
port = '/dev/rfcomm0'
elif userinput == 'wired':
port = '/dev/ttyUSB0'
elif userinput == 'exit':
sys.exit(0)
else:
port = userinput
#Initialise Scratch Connection
while ScratchConnect == False:
try:
s = scratch.Scratch()
ScratchConnect = True
print 'Scratch Connected'
except:
print 'Error Connecting to Scratch, Ensure Remote Sensor Connections are enabled'
time.sleep(5)
#Detect Broadcasts
def listen():
while True:
try:
yield s.receive()
except scratch.ScrachError:
raise StopIteration
if ColiasConnect == True and ScratchConnect == True:
print 'Interface Initialised, listening for Scratch'
for msg in listen():
#seperate broadcasts by 1 millisecond to prevent traffic
time.sleep(0.1)
if msg[0] == 'broadcast':
if msg[1] == 'Foward':
print 'foward message received'
ser.write('MF' + '\r\n')
elif msg[1] == 'Left':
print 'Left message received'
ser.write('ML' + '\r\n')
elif msg[1] == 'Right':
print 'Right message received'
ser.write('MR' + '\r\n')
elif msg[1] == 'Backward':
print 'Backward message received'
ser.write('MB' + '\r\n')
elif msg[1] == 'Stop':
print 'Stop message received'
ser.write('MS' + '\r\n')
elif msg[1] == 'Exit':
False
elif msg [0] == 'sensor-update':
print 'sensor broadcast received: %s' % msg[1]
else:
print 'Connections could not be initialised, Exiting interface'
sys.exit(0)
|
import requests
import json
from enum import Enum
import config
# import boto3
# from botocore.config import Config
# iot_client = boto3.client('iot')
# iot = boto3.client(
# "iot",
# config=Config(
# retries={
# 'max_attempts': 7,
# 'mode': 'standard'
# }
# )
# )
# iot_data = boto3.client(
# "iot-data",
# config=Config(
# retries={
# 'max_attempts': 7,
# 'mode': 'standard'
# }
# )
# )
class AssetType(Enum):
"""AssetType class represents the possible values of an asset can get."""
PGCONNECT_ANDROID_APP = "PGCONNECT_ANDROID_APP"
TRADE_FAIR_DEMO_APP = "TRADE_FAIR_DEMO_APP"
INSIGHT_MOBILE_APP = "INSIGHT_MOBILE_APP"
MARK_FIRMWARE = "MARK_FIRMWARE"
GATEWAY1_APPLICATION = "GATEWAY1_APPLICATION_FIRMWARE"
INSIGHT_MOBILE_IOS_SDK = "INSIGHT_MOBILE_IOS_SDK"
class InsightBE():
def __init__(self):
self.auth_token = self.auth_token()
def get_gateway_info(self, gw: str):
end_point = "{}/{}/gateways/{}".format(
config.BASE_URL, config.CUSTOMER_ID, gw)
resp = requests.get(url=end_point, headers=self.get_headers())
assert resp.status_code == 200
assert resp.ok
assert resp.headers["content-type"] == "application/json"
resp_body = json.loads(resp.text)
return resp_body
def update_gw_config(self, gw: str, url: str):
request = {
"mark_fw": "",
"gateway_fw": "",
"gateway_config": url
}
end_point = "{}/{}/gateways/{}".format(
config.BASE_URL, config.CUSTOMER_ID, gw)
resp = requests.post(url=end_point, headers=self.post_headers(
), data=json.dumps(request),)
assert resp.status_code == 200
assert resp.ok
assert resp.headers["content-type"] == "application/json"
resp_body = json.loads(resp.text)
return resp_body
def update_mark_be(self, gw: str, url: str):
request = {
"mark_fw": url,
"gateway_fw": "",
"gateway_config": ""
}
end_point = "{}/{}/gateways/{}".format(
config.BASE_URL, config.CUSTOMER_ID, gw)
resp = requests.post(url=end_point, headers=self.post_headers(
), data=json.dumps(request),)
assert resp.status_code == 200
assert resp.ok
assert resp.headers["content-type"] == "application/json"
resp_body = json.loads(resp.text)
return resp_body
def update_mark(self, gw: str, url: str):
iot_data.update_thing_shadow(
thingName=gw,
payload=json.dumps(
{
"state": {
"desired": {
"mark_fw": url,
"gateway_fw": "",
"gateway_config": ""
}
}
}
),
)
def update_gw_be(self, gw: str, url: str):
request = {
"gateway_fw": url,
"mark_fw": ""
}
end_point = "{}/{}/gateways/{}".format(
config.BASE_URL, config.CUSTOMER_ID, gw)
resp = requests.post(url=end_point, headers=self.post_headers(
), data=json.dumps(request),)
assert resp.status_code == 200
assert resp.ok
assert resp.headers["content-type"] == "application/json"
resp_body = json.loads(resp.text)
return resp_body
def update_gw(self, gw: str, url: str):
iot_data.update_thing_shadow(
thingName=gw,
payload=json.dumps(
{
"state": {
"desired": {
"gateway_fw": url,
"mark_fw": "" }
}
}
),
)
def get_url(self, asset: AssetType, version: str):
request = {
"eula_version": "1.0.0",
"eula_checksum": "FDABC1234",
"agreed_by": "test@test.com",
"asset_type": asset.value,
"asset_version": version
}
end_point = "{}/{}/generate-download-url".format(
config.BASE_URL, config.CUSTOMER_ID)
resp = requests.post(url=end_point, headers=self.post_headers(
), data=json.dumps(request),)
assert resp.status_code == 200
assert resp.ok
assert resp.headers["content-type"] == "application/json"
resp_body = json.loads(resp.text)
return resp_body["asset_download_url"]
def get_headers(self):
headers = {
"Authorization": self.auth_token,
}
return headers
def post_headers(self):
headers = {
"Authorization": self.auth_token,
"Content-Type": "application/json",
}
return headers
@staticmethod
def auth_token():
# Additional headers.
headers = {
"Content-Type": "application/x-amz-json-1.1",
"x-amz-target": "AWSCognitoIdentityProviderService.InitiateAuth",
}
# Body
payload = {
"AuthFlow": "USER_PASSWORD_AUTH",
"ClientId": config.COGNITO_CLIENT_ID,
"AuthParameters": {
"USERNAME": config.AUTH_USERNAME,
"PASSWORD": config.AUTH_PASSWORD,
},
}
# convert dict to json by json.dumps() for body data.
resp = requests.post(
config.AUTH_URL, headers=headers, data=json.dumps(payload))
# Validate response headers and body contents, e.g. status code.
assert resp.status_code == 200
resp_body = resp.json()
# access_token = resp_body["AuthenticationResult"]["AccessToken"]
id_token = resp_body["AuthenticationResult"]["IdToken"]
return id_token
|
from tkinter import *
from tkinter import filedialog as fd
from pathlib import Path
root = Tk()
file1 = StringVar()
file1.set("File1")
file2 = StringVar()
file2.set("File2")
filename1 = ""
filename2 = ""
root.geometry('400x400')
topFrame = Frame(root)
topFrame.pack()
bottomFrame = Frame(root)
bottomFrame.pack(side=BOTTOM)
def buttonClick1():
global filename2
seqeunce1 = fd.askopenfilename()
path = Path(seqeunce1)
file1.set(path.name)
filename2 = path.name
def buttonClick2():
global filename2
seqeunce1 = fd.askopenfilename()
path = Path(seqeunce1)
file2.set(path.name)
filename2 = path.name
chooseFileButton1 = Button(topFrame, command=buttonClick1, text = "Choose file")
chooseFileButton1.pack()
labelFile1 = Label(topFrame, textvariable=file1)
labelFile1.pack()
chooseFileButton2 = Button(topFrame, command=buttonClick2,text = "Choose file")
chooseFileButton2.pack()
labelFile2 = Label(topFrame, textvariable=file2)
labelFile2.pack()
runMSA = Button(topFrame, text = "Run MSA")
runMSA.pack()
root.mainloop()
print(filename2)
print(type(filename2)) |
"""Script to take an excel worlist generated by LIMS at ALS and convert it into a filterable and more condensed
excel sheet for ease of use"""
from tkinter import filedialog
from tkinter import *
import xlrd, re, xlsxwriter, tkinter as tk
#regex setup to recognize different portions of a samples input data
regexDict = {'sampleRegex':re.compile('^L\d\d\d\d\d\d\d-\d|^WG\d\d\d\d\d\d\d\d-\d'), 'workgroupRegex':re.compile("^WG\d\d\d\d\d\d\d$|WG\d\d\d\d\d\d\d\'"), 'productCodeRegex':re.compile('MS(-\w+)?-VA$|-ICP-'),
'conductivityRegex':re.compile('Conductivity'), 'turbidityRegex':re.compile('Turbidity'), 'dateRegex':re.compile('^20\d\d-\d\d-\d\d'),
'priorityRegex':re.compile('^E(\d)?$|^EP?$|^P(\w)?$'), 'matrixRegex':re.compile('Water|Tissue|Soil|Seawater|Dustfall|Urine|Filter|Food|Bulk'), 'rawRegex':re.compile('M\d\d\d|NP\d\d\d'),
'statusRegex':re.compile('AVAL|NEED|REDO|WIP|APPR|CALC'), 'unitRegex':re.compile('\d+ \w+'), 'chlorideRegex':re.compile('Chloride'),
'excelDateRegex':re.compile('\d\d\d\d\d\.(\d+)?')}
clientNameIndex = 1
clientIdIndex = 3
accManagerIndex = 2
#extract needed data from excel sheet and save each sample as a list
def extractData(sheetName, arrayName, worklist):
sampleIndex = []
def extractRows(startIndex, endIndex):
global sheet
for z in range(startIndex, endIndex):
for x in range(0, sheetName.ncols):
if sheet.cell_type(z, x) != xlrd.XL_CELL_EMPTY:
if regexDict['excelDateRegex'].search(str(sheet.cell(z, x))) and not re.search(' ', str(sheet.cell(z,x))):
info.append(str(xlrd.xldate_as_datetime(float(sheet.cell(z, x).value), worklist.datemode)))
else:
info.append(sheetName.cell(z, x).value)
for i in range(sheetName.nrows):
if regexDict['sampleRegex'].search(sheetName.cell(i, 0).value):
sampleIndex.append(i)
for i in range(0, len(sampleIndex)):
info = []
if i < len(sampleIndex) - 1:
extractRows(sampleIndex[i], sampleIndex[i + 1])
else:
extractRows(sampleIndex[i], sheetName.nrows)
arrayName.append(info)
#format the basic sample lists to make sure that each item is in the correct index
def formatData(arrayNameIn, arrayNameOut):
global priority
def regexAdd(regexIn):
global priority
if regexDict[regexIn].search(str(arrayNameIn[i][z])):
info.append(arrayNameIn[i][z])
if regexIn == 'priorityRegex' and regexDict[regexIn].search(str(arrayNameIn[i][z])):
priority = True
def indexAdd(indexIn):
if z == indexIn:
info.append(arrayNameIn[i][z])
def regexValAdd(regexIn):
if regexDict[regexIn].search(str(arrayNameIn[i][z])):
if (regexIn == 'conductivityRegex' and re.search('uS/cm', str(info))):
d = re.search(':', str(arrayNameIn[i][z]))
info[len(info) - 1] = arrayNameIn[i][z][d.start() + 2:]
else:
d = re.search(':', str(arrayNameIn[i][z]))
info.append(arrayNameIn[i][z][d.start() + 2:])
for i in range(0, len(arrayNameIn)):
info = []
priority = False
for z in range(0, len(arrayNameIn[i])):
regexAdd('sampleRegex')
indexAdd(clientNameIndex)
indexAdd(accManagerIndex)
indexAdd(clientIdIndex)
regexAdd('priorityRegex')
regexValAdd('conductivityRegex')
regexValAdd('turbidityRegex')
regexValAdd('chlorideRegex')
if re.search('Comments', str(arrayNameIn[i][z])):
info.append(arrayNameIn[i][z+1])
if priority is False:
info.insert(4, '')
if not re.search('uS/cm', str(info)):
info.insert(5, '')
if not re.search('mg/L|mg/kg', str(info)):
info.insert(6, '')
if not re.search('NTU', str(info)):
info.insert(7, '')
copyNum = 0
productCodes = []
matrix = []
status = []
dates = []
wgCount = 0
workGroups = []
for z in range(0, len(arrayNameIn[i])):
if regexDict['productCodeRegex'].search(str(arrayNameIn[i][z])):
productCodes.append(arrayNameIn[i][z])
continue
if regexDict['matrixRegex'].search(str(arrayNameIn[i][z])):
matrix.append(arrayNameIn[i][z])
continue
if regexDict['statusRegex'].search(str(arrayNameIn[i][z])):
status.append(arrayNameIn[i][z])
continue
if regexDict['dateRegex'].search(str(arrayNameIn[i][z])):
dates.append(arrayNameIn[i][z])
continue
if regexDict['workgroupRegex'].search(str(arrayNameIn[i][z])):
workGroups.append(arrayNameIn[i][z])
while copyNum < len(productCodes):
if copyNum > 0:
info = info.copy()
del info[5:10]
del info[-2]
info.insert(5, productCodes[copyNum])
info.insert(6, matrix[copyNum])
info.insert(7, status[copyNum])
info.insert(8, dates[copyNum * 2 + 1])
wgpresent = False
for z in range(0, len(arrayNameIn[i]) - 6):
if re.search(productCodes[copyNum], arrayNameIn[i][z]) and wgCount < len(workGroups):
if re.search(workGroups[wgCount], arrayNameIn[i][z + 5]) or re.search(workGroups[wgCount], arrayNameIn[i][z + 6]):
info.insert(9, workGroups[wgCount])
wgCount += 1
wgpresent = True
if wgpresent == False:
info.insert(9, '')
if len(info) < 13:
info.append('')
if re.search('-D-', str(info)) and re.search('D\d\d\d', str(arrayNameIn[i])):
d = re.search('D\d\d\d', str(arrayNameIn[i]))
info.insert(13, str(arrayNameIn[i])[d.start():d.start() + 4])
else:
if re.search('-T-', str(info)) and re.search('T\d\d\d', str(arrayNameIn[i])):
d = re.search('T\d\d\d', str(arrayNameIn[i]))
info.insert(13, str(arrayNameIn[i])[d.start():d.start() + 4])
else:
info.insert(13, '')
arrayNameOut.append(info)
copyNum += 1
#write the new excel file
def writeFile(newFileName, arrayName):
workbook = xlsxwriter.Workbook(newFileName + '.xlsx')
unlocked = workbook.add_format({'locked': False})
allInfo = workbook.add_worksheet('Worklist')
allInfo.set_column('A:XDF', None, unlocked)
#formats for different cell types
boldFormat = workbook.add_format({'bold':True, 'bottom':True, 'locked':True})
problemFormat = workbook.add_format({'bg_color':'yellow', 'bottom':True, 'locked':True})
screenFormat = workbook.add_format({'bg_color':'black', 'font_color':'white', 'bottom':True, 'locked':True})
standardFormat = workbook.add_format({'bottom':True, 'locked':True})
twoFormat = workbook.add_format({'bg_color':'yellow', 'bottom':True, 'locked':True})
fiveFormat = workbook.add_format({'bg_color':'blue', 'font_color':'white', 'bottom':True, 'locked':True})
tenFormat = workbook.add_format({'bg_color':'purple', 'font_color':'white', 'bottom':True, 'locked':True})
#list of problem clients to recognize and automatically list as screen
problemClients = {'aecom':re.compile('aecom', re.I),
'alexco':re.compile('alexco', re.I), 'indequim':re.compile('indequim', re.I), 'ch2m hill':re.compile('ch2m hill', re.I),
'city of kelowna':re.compile('city of kelowna', re.I), 'core labs':re.compile('core labs', re.I), 'corix utilities':re.compile('corix utilities', re.I),
'diand':re.compile('diand', re.I), 'diane c&w':re.compile('diane(.*)c&w', re.I), 'district of nanaimo':re.compile('district of nanaimo', re.I),
'fmf cape breton':re.compile('fmf cape breton', re.I), 'fuchs lubricants':re.compile('fuchs lubricants', re.I), 'funsin':re.compile('funsin', re.I),
'gibraltar mines':re.compile('gibraltar mines', re.I), 'goldcorp':re.compile('goldcorp', re.I), 'golder':re.compile('golder(.*)edmonton|golder(.*)yellowknife', re.I),
'hcell':re.compile('hcell', re.I), 'highland':re.compile('highland valley copper', re.I), 'huckleberry mine':re.compile('huckleberry mine', re.I),
'km canada marine tlp':re.compile('km canada marine tlp', re.I), 'lorax':re.compile('lorax', re.I), 'minera penasquito':re.compile('minera penasquito', re.I),
'miramar':re.compile('miramar', re.I), 'nautilus':re.compile('nautilus', re.I), 'tetra tech':re.compile('tetra tech', re.I),
'thomson creek':re.compile('thomson creek', re.I), 'tuprag':re.compile('tuprag', re.I), 'wood environment':re.compile('wood environment', re.I)}
def fillWorksheetTitles(worksheetName):
titles = [
'Sample ID', 'Client Name', 'Acc Manager', 'Client ID', 'Priority', 'Product Code', 'Matrix', 'Status',
'Due Date', 'Workgroup', 'Conductivity', 'Chloride', 'Turbidity', 'Raw Location', 'Suggested Dilution', 'Comments', 'Notes']
col = 0
for title in titles:
worksheetName.write(0, col, title, boldFormat)
col += 1
fillWorksheetTitles(allInfo)
allInfo.protect('', {'autofilter':True, 'sort':True})
row = 1
for i in range(0, len(arrayName)):
problem = False
conductivity = 'screen'
blank = False
col = 0
water = False
for z in range(0, len(arrayName[i])):
if z == 1:
for client in problemClients:
if problemClients[client].search(arrayName[i][z]):
problem = True
if problem == True:
allInfo.write(row, col, arrayName[i][z], problemFormat)
else:
allInfo.write(row, col, arrayName[i][z], standardFormat)
else:
if z == 3 and re.search('BLANK|^FB$|^TB$', arrayName[i][z]):
blank = True
allInfo.write(row, col, arrayName[i][z], standardFormat)
else:
if z == 4:
if not arrayName[i][z] == '':
allInfo.write(row, col, arrayName[i][z], problemFormat)
if z == 6:
if arrayName[i][z] == 'Water':
water = True
allInfo.write(row, col, arrayName[i][z], standardFormat)
else:
if z == 10 and re.search('uS/cm', str(arrayName[i][z])):
tempStr = str(arrayName[i][z]).replace(',', '')
temp = re.findall('\d+', tempStr)
if len(temp) > 0:
conductivity = float(temp[0])
allInfo.write(row, col, arrayName[i][z], standardFormat)
else:
if z == 14:
allInfo.write(row, col + 1, arrayName[i][z], standardFormat)
else:
allInfo.write(row, col, arrayName[i][z], standardFormat)
col += 1
if water == True:
if blank is True or re.search('<', arrayName[i][10]) and re.search('<', arrayName[i][12]):
allInfo.write(row, 14, 'Blank', standardFormat)
else:
if problem is True or conductivity == 'screen' or conductivity >= 20000:
allInfo.write(row, 14, 'Screen/History', screenFormat)
else:
if conductivity >= 10000:
allInfo.write(row, 14, 'X10', tenFormat)
else:
if conductivity >= 4000:
allInfo.write(row, 14, 'X5', fiveFormat)
else:
if conductivity >= 2000:
allInfo.write(row, 14, 'X2', twoFormat)
else:
allInfo.write(row, 14, 'Straight', standardFormat)
else:
allInfo.write(row, 14, '', standardFormat)
row += 1
allInfo.autofilter(0, 0, 0, len(arrayName[0]) + 1)
allInfo.freeze_panes(1, 0)
allInfo.set_column(0, 0, 11)
allInfo.set_column(1, 1, 20)
allInfo.set_column(2, 2, 14)
allInfo.set_column(3, 3, 20)
allInfo.set_column(4, 4, 10)
allInfo.set_column(5, 5, 20)
allInfo.set_column(8, 8, 20)
allInfo.set_column(9, 9, 13)
allInfo.set_column(10, 10, 13)
allInfo.set_column(11, 12, 10)
allInfo.set_column(13, 13, 12)
allInfo.set_column(14, 14, 17)
allInfo.set_column(15, 15, 60)
allInfo.set_column(16, 16, 30, unlocked)
workbook.close()
#browse for existing file in the system
def browsefunc():
global fileName
fileName = filedialog.askopenfilename()
label.configure(text=fileName)
#browse for previous extracted file to import comments
def browsefunc2():
global priorFile
global priorList
priorList = filedialog.askopenfilename()
label3.configure(text=priorList)
priorFile = True
#convert the full file
def convertFile():
global outputName
global rawInfo
global sampleInfo
global sheet
global worklist
worklist = xlrd.open_workbook(fileName)
sheet = worklist.sheet_by_index(0)
extractData(sheet, rawInfo, worklist)
formatData(rawInfo, sampleInfo)
if priorFile == True:
commentList = xlrd.open_workbook(priorList)
commentSheet = commentList.sheet_by_index(0)
commentExtraction(commentSheet, sampleInfo)
outputName = filedialog.askdirectory() + '/' + txt.get()
writeFile(outputName, sampleInfo)
worklist.release_resources()
rawInfo = []
sampleInfo = []
#extract comments from previously extracted file
def commentExtraction(sheetName, arrayName):
sampleComments = []
for i in range(1, sheetName.nrows):
data = []
if not re.search('empty', str(sheetName.cell(i, 15))):
data.append(sheetName.cell(i, 0).value)
data.append(sheetName.cell(i, 5).value)
data.append(sheetName.cell(i, 16).value)
sampleComments.append(data)
for i in range(0, len(arrayName)):
for z in range(0, len(sampleComments)):
if sampleComments[z][0] == arrayName[i][0] and sampleComments[z][1] == arrayName[i][5]:
arrayName[i].append(sampleComments[z][2])
#set up and run the gui for the program
rawInfo = []
sampleInfo = []
worklist = None
sheet = None
fileName = None
outputName = None
priorList = None
priorFile = False
folderSelected = None
priority = False
win = tk.Tk()
win.title('Worklist Converter')
label = tk.Label(win, text='File path')
browse1 = tk.Button(win, text='Choose Input File', command=browsefunc)
label3 = tk.Label(win, text='Previous Worklist')
browse2 = tk.Button(win, text='Select Previous Worklist', command=browsefunc2)
label2 = tk.Label(win, text='Output file name (no extension)')
txt = Entry(win, width=20)
goButton = tk.Button(win, text='Convert', command=convertFile)
label.pack()
browse1.pack(pady=10)
label3.pack()
browse2.pack(pady=10)
label2.pack()
txt.pack(pady=10)
goButton.pack()
win.mainloop() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, getopt
from multiprocessing import Pipe
import random
from operator import itemgetter
import itertools
import json
import threading
import copy
from protocols import *
def runAg(agent, connection, protocol, known, pattern, absint, learn):
""" Method to start an interaction"""
result = agent.interact(connection, protocol, known, pattern, absint, learn)
if verbose:
print "outcome {}".format(result)
return
def start_interaction(agent1, agent2, prot1, prot2, k_1, k_2, pattern, learn=1):
""" Starts interaction between two agents"""
first_conn, second_conn = Pipe()
result_1 = []
result_2 = []
# this is the abstract interaction
absint = []
# agents are two threads connected through a Pipe
a1 = threading.Thread(target=runAg, args=(agent1, first_conn, prot1, k_1, pattern, absint, learn))
a2 = threading.Thread(target=runAg, args=(agent2, second_conn, prot2, k_2, pattern, absint, learn))
a1.start()
a2.start()
a1.join()
a2.join()
return absint
# a1.terminate()
# a2.terminate()
class Protocol():
""" A protocol is composed of a set of labels ids divided into st, co, nc, re
And two dictionaries: Dependencies and Text (the NL labels)
"""
def __init__(self, id, labels, text, deps, language):
self.id = id
self.labels = labels
self.dependencies = deps
self.text = text
self.lang = language
def get_labels(self):
return self.labels[0]+self.labels[1]+self.labels[2]+self.labels[3]
def get_requirements(self):
return self.labels[1]+self.labels[2]+self.labels[3]
def is_possible(self, label, interaction):
""" A label is possible if it has not been said
and its requirements are complete
"""
if not label in self.get_labels():
return False
dependencies = self.dependencies[label]
return not [d for d in dependencies if not d in interaction]
def get_possibilities(self, interaction, restrict):
return [l for l in self.get_labels() if not l in interaction and self.is_possible(l, interaction)]
def get_vocabulary(self):
labels = self.get_labels()
print "labels {}".format(labels)
voc = []
for l in labels:
print l
print self.text[l]
text = self.text[l]
for w in text:
if w not in voc:
voc.append(w)
print "vocabhere : {}".format(voc)
return voc
class Agent():
""" A basic agent"""
def __init__(self, id):
self.id = id
self.alignment = {}
self.interloc = 1-id
self.mappings_made = {}
self.just_failed = False
self.frequency_map = {}
self.own_frequency = {}
self.played = []
def __str__(self):
return str(self.id)
def __repr__(self):
return str(self.id)
def update_own_freq(self, protocol):
for label in protocol.get_labels():
t = protocol.text[label]
for w in t:
if w in self.own_frequency:
self.own_frequency[w] += 1
else:
self.own_frequency[w] = 1
def interact(self, connection, protocol, known, pattern, absint, learn):
"""Start an interaction with an agent"""
if not protocol.id in self.played:
self.update_own_freq(protocol)
interaction = []
unknown = [l for l in protocol.get_labels() if not l in known]
bound = len(pattern)
self.mappings_made = {}
for t in pattern:
if t==self.id:
if verbose:
print ""
print "I am {} and i am a sender with just failed {}".format(self.id, self.just_failed)
utterance = 'none'
label = self.choose_utterance(protocol, known, interaction)
if not label:
connection.send('failed')
if verbose:
print "failed by sender"
if self.just_failed:
# self.punish()
return 0
else:
self.just_failed = True
continue
else:
self.just_failed = False
utterance = protocol.text[label]
interaction.append(label)
absint.append((self.id,label))
connection.send(utterance)
if verbose:
print "Agent {} says {} : {}".format(self.id, label, utterance)
conf = connection.recv()
if conf == 'failed':
if self.just_failed:
# self.punish()
return 0
else:
self.just_failed = True
continue
else:
received = connection.recv()
if verbose:
print ""
print "I am {} and i received {} with just failed {}".format(self.id, received, self.just_failed)
if received == 'failed' or received == 'none':
if self.just_failed:
# self.punish()
return 0
else:
self.just_failed = True
continue
else:
interpretation = self.choose_interpretation(protocol, unknown, interaction, received, learn)
if interpretation == None or interpretation == 0:
if verbose:
print "Failed to interpret"
connection.send('failed')
if self.just_failed:
# self.punish()
return 0
else:
self.just_failed = True
continue
else:
self.just_failed = False
if verbose:
print "Agent {} interprets {}".format(self.id, interpretation)
self.mappings_made[tuple(received)]= protocol.text[interpretation]
interaction.append(interpretation)
if verbose:
print "interaction: {}".format(interaction)
connection.send('ok')
self.played.append(protocol.id)
return 2
def assign_alignment(self, alg):
self.alignment = alg
def choose_utterance(self, protocol, known, interaction):
poss = protocol.get_possibilities(interaction, known)
if poss:
# return the text
return random.choice(poss)
return None
def get_comb_values(self, words, received):
"""Computes the mapping degree of two sentences"""
if len(words)<len(received):
short = words
long = received
else:
short = received
long = words
comb_values = {}
# if verbose:
# print ""
# print "Comb values"
for p in itertools.permutations(list(long), len(short)):
value = 0
for i in range(len(short)):
if len(words)<len(received):
local = short[i]
foreign = p[i]
else:
local = p[i]
foreign = short[i]
if foreign in self.alignment:
if local in self.alignment[foreign].keys():
# print local
# print foreign
# print 1
# print self.alignment[foreign][local]
# print 2
# print self.own_frequency[local]
# print 3
# print foreign
# print self.frequency_map
# print self.frequency_map[foreign]
dist = abs(i-long.index(p[i]))
if dist>2:
tm = dist-2
else:
tm = 0
# value += self.alignment[foreign][local]/((float(self.own_frequency[local])+float(self.frequency_map[foreign]))+abs(i-long.index(p[i])))
# value += self.alignment[foreign][local]/((float(self.own_frequency[local])+float(self.frequency_map[foreign]))+tm)
value += self.alignment[foreign][local]/((float(self.own_frequency[local])+float(self.frequency_map[foreign])))
# value += self.alignment[foreign][local]/((float(self.own_frequency[local])+float(self.frequency_map[foreign]))) - tm/1000
# value += self.alignment[foreign][local]/100
else:
value += 0
if not short:
comb_values[p] = 0
else:
comb_values[p] = value - 0.005 * abs(len(received)-len(words))
# if verbose:
# print "{}: {} {}".format(long, p, comb_values[p])
val = False
if len(words)<len(received):
val = True
return comb_values, val
def normalize(self):
for rec in self.alignment.keys():
sumV = sum(self.alignment[rec].values())
if not sumV==0:
for k in self.alignment[rec].keys():
self.alignment[rec][k] = self.alignment[rec][k] / sumV
def comb_update(self, protocol, received, possibilities, learn):
""" Updates values for possible mappings and retrieves max"""
values = {}
upd = {}
if 1:
for w in received:
if not protocol.id in self.played:
if not w in self.frequency_map.keys():
self.frequency_map[w] = 1
else:
self.frequency_map[w] += 1
for pos in possibilities:
values[pos] = (-20, None)
# updated = []
comb_values, val = self.get_comb_values(protocol.text[pos], received)
for c in comb_values.keys():
# c is always the LONG one
value = comb_values[c]
if value > values[pos][0]:
values[pos] = (value, c)
if val:
foreign = c
local = protocol.text[pos]
else:
foreign = received
local = c
for i in range(len(foreign)):
update = value + 0.1
if not foreign[i] in upd.keys() or not local[i] in upd[foreign[i]].keys():
if not foreign[i] in upd.keys():
# upd[foreign[i]] = {}
upd[foreign[i]] = {}
# if not local[i] in upd[foreign[i]].keys():
# upd[foreign[i]][local[i]] = [0,0]
upd[foreign[i]][local[i]] = update
# upd[foreign[i]][local[i]][0] += update
# upd[foreign[i]][local[i]][1] +=1
else:
if update>upd[foreign[i]][local[i]]:
upd[foreign[i]][local[i]] = update
if learn:
for k in upd.keys():
if not k in self.alignment:
self.alignment[k] = {}
for kk in upd[k].keys():
if kk in self.alignment[k].keys() and self.alignment[k][kk]>0:
self.alignment[k][kk] += upd[k][kk]
# self.alignment[k][kk] += upd[k][kk][0]/upd[k][kk][1]
# self.alignment[k][kk] += (upd[k][kk])/self.alignment[k][kk]
else:
self.alignment[k][kk] = upd[k][kk]
# self.alignment[k][kk] = upd[k][kk][0]/upd[k][kk][1]
return values
def choose_interpretation(self, protocol, restrict, interaction, received, learn):
""" Choose the interpretation for a message and perform the updates """
#received is a set of words
for i in [0]:
for w in received:
if not w in self.alignment:
self.alignment[w] = {}
possibilities = protocol.get_possibilities(interaction, restrict)
if not possibilities:
return 0
values = self.comb_update(protocol, received, possibilities, learn)
chosen = max(possibilities, key=lambda x : values[x][0])
if verbose:
print "-------------------"
print "received {}".format(received)
print possibilities
print "interpretation possibilities"
for p in possibilities:
print (p, protocol.text[p], values[p])
if verbose and not chosen==None:
print "interpretation chosen: {}".format(protocol.text[chosen])
return chosen
def isSuccess(absint, alg_st, prot_es):
"""Returns True if the interaction is successful:
- all labels are said
- all dependencies are ok
"""
esint = []
for i in absint:
if i[0]==1:
esint.extend([l for l in alg_st[i[1]] if unicode(l) in prot_es.get_labels()])
else:
esint.append(i[1])
suc = (not [u for u in prot_es.get_labels() if str(u) not in esint])
if verbose:
print "alg {}".format(alg_st)
print "labels es {}".format(prot_es.get_labels())
print "esint {}".format(esint)
print "suc 1 : {}".format(suc)
print "diffs 1: {}".format([i for i in prot_es.get_labels() if not i in esint])
print "diffs 2: {}".format([i for i in esint if not i in prot_es.get_labels()])
for i in range(len(esint)):
suc = suc and prot_es.is_possible(esint[i], esint[:i])
return suc
############################## PROTOCOL BUILDING #####################################
def build_protocol_fl(prot, ltext, ddep, bound):
""" Build protocol from a freeling text and dependencies"""
protocol = {}
labels = ([],[],[],[])
for l in ltext:
lang = l[0]
if l[1] == 'st':
labels[0].append(l[2])
elif l[1] == 'co':
labels[1].append(l[2])
elif l[1] == 'nc':
labels[2].append(l[2])
elif l[1] == 're':
labels[3].append(l[2])
text = {}
for l in ltext:
words = [w[1] for w in l[3][0]] # this takes just the first sentence for each protocol
text[l[2]] = words[: min(bound, len(words))]
dependencies = {}
for l in ltext:
if l[2] in ddep.keys():
dependencies[l[2]] = ddep[l[2]]
else:
dependencies[l[2]] = []
protocol = Protocol(prot, labels, text, dependencies, lang)
return protocol
def read_dependencies_dict():
with open('data/dependencies.json') as data_file:
deps = json.load(data_file)
data_file.close()
return deps
############################## ALIGNMENT METHODS #####################################
def read_alignment_req():
"""Parse the alignment file and return an english-spanish alignment"""
with open('data/semantic_alg.txt') as f:
content = f.readlines()
f.close()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
alignment = {}
for l in content:
if l:
es, en, conf = l.split()
if not en in alignment.keys():
alignment[unicode(en)] = [unicode(es)]
else:
alignment[unicode(en)].append(unicode(es))
# print alignment
return alignment
def find_alignment_st(prot_en, prot_es):
"""Method to find the true alignment
between the st labels of two protocols
"""
en_dep = prot_en.dependencies
es_dep = prot_es.dependencies
relevant_en = []
relevant_es = []
for lan in ((prot_en, relevant_en),(prot_es, relevant_es)):
prot = lan[0]
relevant = lan[1]
for d1 in prot.dependencies.keys():
if d1 in prot.labels[0]:
for d2 in prot.dependencies[d1]:
if d2 in prot.labels[0]:
relevant.append([d2,d1])
sorted_es = []
sorted_en = []
for lan in ((relevant_en, sorted_en), (relevant_es, sorted_es)):
relevant = lan[0]
sorted = lan[1]
first = [p for p in relevant if not [q for q in relevant if p[0]==q[1]]][0]
last = [p for p in relevant if not [q for q in relevant if p[1]==q[0]]][0][1]
sorted.extend(first)
while not sorted[-1]==last:
next = [p for p in relevant if sorted[-1]==p[0]][0]
sorted.append(next[1])
alg = {sorted_en[i] : sorted_es[i] for i in range(len(sorted_en))}
return alg
def best_maps_dict(alignment):
"""Gets only the mappings with best value, as a dictionary"""
res = {}
for k in alignment.keys():
max_keys = []
highest = None
if alignment[k]:
highest = max(alignment[k].values())
max_keys = [kk for kk in alignment[k].keys() if alignment[k][kk]==highest]
res[k] = max_keys
return res
############################## EXPERIMENT #####################################
def get_en_trans(alg, l):
trans = [k for k in alg.keys() if l in alg[k]]
if trans:
return trans[0]
return False
def interact(learning, alg_req, prot_en, prot_es, a_es, a_en):
if verbose:
print "Protocol {}".format(prot)
if learning:
print "interaction {}".format(h)
else:
print "test interaction {}".format(h)
# Build the protocols
# Build the labels alignment
alg_st = find_alignment_st(prot_en, prot_es) # steps alignment
# requirements alignment
alg = {k : [alg_st[k]] for k in alg_st}
for l in prot_en.get_requirements():
alg[l] = alg_req[l]
# Divide known labels for each agent
k_en = random.sample(prot_en.get_labels(), len(prot_en.get_labels())/2)
k_es = [alg[w] for w in prot_en.get_labels() if not w in k_en]
# Build the interaction patterns
patterns = [[0,1] for p in range(len(prot_en.get_labels()))]
pattern = [e for l in patterns for e in l]
#Start the interaction
absint = start_interaction(a_en,a_es, prot_en, prot_es, k_en, k_es, pattern,learn=learning)
if isSuccess(absint, alg, prot_es):
if verbose:
print "success"
return 1
else:
if verbose:
print "not success"
return 0
def experiment(inters, test, protocols, test_prot, name):
# get the text
with open('data/clean-labels-freeling-dedup.json') as data_file:
text = json.load(data_file)
data_file.close()
# get the dependencies
dependencies = read_dependencies_dict()
leng = 6
# create agents
a_es = Agent(0)
a_en = Agent(1)
successes = []
alg_req = read_alignment_req()
for h in range(inters):
prot = protocols[h]
prot_en = build_protocol_fl(prot, text[prot][0], dependencies[prot][0], leng)
prot_es = build_protocol_fl(prot, text[prot][1], dependencies[prot][1], leng)
interact(1, alg_req, prot_en, prot_es, a_es, a_en)
# now the test phase
print ""
print "TEST PHASE"
print ""
successes = []
for h in range(test):
prot = test_prot[h]
prot_en = build_protocol_fl(prot, text[prot][0], dependencies[prot][0], leng)
prot_es = build_protocol_fl(prot, text[prot][1], dependencies[prot][1], leng)
success = interact(0, alg_req, prot_en, prot_es, a_es, a_en)
successes.append(success)
if test==0:
succrate = 0
else:
succrate = sum(successes)/float(test)
print "successes rate: {}".format(succrate)
return succrate
def main(argv):
name = 'test'
training = 100
test = 100
i = 1
try:
opts, args = getopt.getopt(argv,"t:p:r:",["training=", "precision=", "protocols="])
except getopt.GetoptError:
print '-t number of training interactions \n -r protocol set'
sys.exit(2)
for opt, arg in opts:
if not arg=='':
if opt == '-h':
print '-t number of training interactions \n -r protocol set'
sys.exit()
if opt in ("-t", "--training"):
training = int(arg)
if opt in ("-r", "--protocols"):
if not i in [0,1,2,3,4]:
print 'The protocol must be between 0 and 4 (inclusive)'
sys.exit(2)
i = int(arg)
protocols = [prots0,prots1,prots2,prots3,prots4]
test_protocols = [test_prots0,test_prots1,test_prots2,test_prots3,test_prots4]
res = experiment(training, test, protocols[i], test_protocols[i], 'test')
global verbose
verbose = 0
if __name__ == "__main__":
main(sys.argv[1:]) |
# Challenge: http://pastebin.com/MvLSdU2A
from math import factorial
def nChooseK(n, k):
"""n choose k binomial coefficient"""
return factorial(n) // factorial(k) // factorial(n-k)
class BSTNode():
"""A single Node in the Binary Search Tree"""
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BunnyBST():
def __init__(self, seq):
self.root = BSTNode(seq[0])
for val in seq[1:]:
self._insert(self.root, val)
def countSequences(self):
"""Return the number of sequences that result in the same tree"""
return self._perms(self.root)
def _insert(self, node, value):
"""Insert a new BSTNode into the BST"""
if node.value < value:
if node.left is None:
node.left = BSTNode(value)
else:
self._insert(node.left, value)
else:
if node.right is None:
node.right = BSTNode(value)
else:
self._insert(node.right, value)
def _perms(self, n):
"""Recursively calc the number of permutations for each sub tree"""
if not n: return 1
leftCount, rightCount = self._count(n.left), self._count(n.right)
myPerms = nChooseK(leftCount + rightCount, leftCount)
return myPerms * self._perms(n.left) * self._perms(n.right)
def _count(self, node):
"""Return the sub tree size including self"""
if not node: return 0
return self._count(node.left) + self._count(node.right) + 1
def answer(seq):
tree = BunnyBST(seq)
return tree.countSequences() |
import json
import uuid
from typing import Any, Dict, Callable
import pika
from Core import settings
from Core.Tools.Misc.ObjectSerializers import object_to_json
class RabbitMqAdapter:
@classmethod
def serialize_message(cls, data: Any) -> Dict:
return object_to_json(data)
def __init__(
self,
config: settings.RabbitMq,
exchange: settings.Exchange,
secondary_exchange: settings.Exchange,
prefetch_count: int = 50,
) -> None:
self._config = config
self._exchange = exchange
self._secondary_exchange = secondary_exchange
self._prefetch_count = prefetch_count
self._callback = None
self._channel = None
self.consumer_started = False
self._connection_parameters = pika.ConnectionParameters(
host=self._config.hostname,
port=self._config.port,
virtual_host=self._config.virtual_host,
credentials=(
pika.credentials.PlainCredentials(
username=self._config.username, password=self._config.password, erase_on_connect=False
)
),
heartbeat=self._config.heartbeat,
blocked_connection_timeout=self._config.connection_timeout,
)
def publish(self, message_body: Any, on_secondary: bool = False) -> None:
if on_secondary:
exchange_name = self._secondary_exchange.name
outbound_key = self._secondary_exchange.outbound_queue.key
else:
exchange_name = self._exchange.name
outbound_key = self._exchange.outbound_queue.key
with pika.BlockingConnection(self._connection_parameters) as connection:
with connection.channel() as channel:
channel.basic_publish(
exchange_name,
outbound_key,
json.dumps(RabbitMqAdapter.serialize_message(message_body)),
properties=pika.BasicProperties(
type=message_body.message_type,
message_id=str(uuid.uuid4()),
priority=1,
content_type="application/json",
delivery_mode=2,
),
)
def register_callback(self, callback: Callable) -> "RabbitMqAdapter":
"""callback(ch, method, properties, body)"""
self._callback = callback
return self
def register_consumer(self, consumer_tag: str) -> "RabbitMqAdapter":
if not self._callback:
raise ValueError("No callback provided. Try registering a callback first.")
connection = pika.BlockingConnection(self._connection_parameters)
self._channel = connection.channel()
self._channel.basic_qos(prefetch_count=self._prefetch_count)
self._channel.basic_consume(
self._exchange.inbound_queue.name, self._callback, auto_ack=False, consumer_tag=consumer_tag
)
return self
def start_consuming(self) -> None:
if not self._channel:
raise ValueError("No channel was created. Try creating a channel and registering a consumer first.")
self.consumer_started = True
self._channel.start_consuming()
|
import torch.nn as nn
from .. model import Model
from ... modules.inplace_clip import InplaceClip
class VGG7(Model):
name = "waifu2x.vgg_7"
def __init__(self, in_channels=3, out_channels=3, **kwargs):
super(VGG7, self).__init__(VGG7.name, in_channels=in_channels,
out_channels=out_channels, scale=1, offset=7)
self.register_kwargs({"in_channels": in_channels, "out_channels": out_channels})
self.net = nn.Sequential(
nn.Conv2d(in_channels, 32, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(32, 32, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(32, 64, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(64, 64, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(64, 128, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(128, 128, 3, 1, 0),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(128, out_channels, 3, 1, 0),
InplaceClip(0, 1)
)
def forward(self, x):
return self.net(x)
if __name__ == "__main__":
import torch
from nunif.models import create_model
device = "cuda:0"
model = create_model(VGG7.name, in_channels=3, out_channels=3).to(device)
print(model)
x = torch.zeros((1, 3, 256, 256)).to(device)
with torch.no_grad():
z = model(x)
print(z.shape)
print(model.name, model.offset, model.scale)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 21:42:39 2019
@author: Henri_2
"""
from music21 import converter, instrument, note, chord, stream
def create_midi(prediction_output):
print('creating midi-file')
""" convert the output from the prediction to notes and create a midi file
from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
try:
# pattern is a chord
if ('.' in pattern[1]) or pattern[1].isdigit():
notes_in_chord = pattern[1].split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.duration.type = pattern[0]
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern[1])
new_note.offset = offset
new_note.duration.type = pattern[0]
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
except KeyError:
continue
except:
continue
# increase offset each iteration so that notes do not stack
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid') |
import json
import pytest
from quetz_content_trust import db_models
@pytest.fixture
def trust_roles():
return ["root.json"]
@pytest.fixture
def package_files(pkgstore, channel, trust_roles):
pkgstore.create_channel(channel.name)
for filename in trust_roles:
with open(filename, 'rb') as fid:
content = fid.read()
pkgstore.add_file(content, channel.name, f"linux-64/{filename}")
@pytest.mark.parametrize(
"logged,channel_role,expected_status",
[
(False, None, 401),
(True, None, 403),
(True, "member", 403),
(True, "maintainer", 403),
(True, "owner", 201),
],
)
def test_post_root_role_permissions(
client, channel, root_role_file, logged, channel_role, expected_status
):
"""Check post of root role requires 'owner' channel permissions"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == expected_status
def test_post_root_role(client, channel, db, root_role_file, offline_keys):
"""Check database keys/delegations/roles after posting root role"""
client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
# Check keys
assert db.query(db_models.SigningKey).count() == 2
root_key = (
db.query(db_models.SigningKey)
.join(db_models.RoleDelegation.keys)
.filter(db_models.RoleDelegation.type == "root")
.one()
)
assert root_key.public_key == offline_keys["root"][0]
key_mgr_key = (
db.query(db_models.SigningKey)
.join(db_models.RoleDelegation.keys)
.filter(db_models.RoleDelegation.type == "key_mgr")
.one()
)
assert key_mgr_key.public_key == offline_keys["key_mgr"][0]
# Check delegations
assert db.query(db_models.RoleDelegation).count() == 2
root_delegation = (
db.query(db_models.RoleDelegation)
.filter(db_models.RoleDelegation.type == "root")
.one()
)
assert root_delegation.channel == channel.name
assert len(root_delegation.keys) == 1
assert len(root_delegation.consumers) == 1
key_mgr_delegation = (
db.query(db_models.RoleDelegation)
.filter(db_models.RoleDelegation.type == "key_mgr")
.one()
)
assert key_mgr_delegation.channel == channel.name
assert len(key_mgr_delegation.keys) == 1
assert len(key_mgr_delegation.consumers) == 0
root_role = db.query(db_models.ContentTrustRole).one()
assert root_delegation.issuer == root_role
assert key_mgr_delegation.issuer == root_role
# Check roles
assert db.query(db_models.ContentTrustRole).count() == 1
assert root_role.channel == channel.name
assert len(root_role.delegations) == 2
assert root_role.delegations[0] == key_mgr_delegation
assert root_role.delegations[1] == root_delegation
assert root_role.delegator == root_delegation
def test_delegation_cascade_deletion(client, channel, db, root_role_file):
"""Check cascade deletion"""
client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
root_delegation = (
db.query(db_models.RoleDelegation)
.filter(db_models.RoleDelegation.type == "root")
.one()
)
# Check cascade delete
db.delete(root_delegation)
assert db.query(db_models.RoleDelegation).count() == 0
assert db.query(db_models.ContentTrustRole).count() == 0
assert db.query(db_models.SigningKey).count() == 2
def test_overwrite_root_role(client, root_role_file, channel):
"""Check overwriting 'root' role is not permitted"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == 201
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == 409
@pytest.mark.parametrize(
"logged,channel_role,expected_status",
[
(False, None, 401),
(True, None, 403),
(True, "member", 200),
(True, "maintainer", 200),
(True, "owner", 200),
],
)
def test_get_root_role(client, channel, logged, channel_role, expected_status):
"""Check get 'root' role requires 'member' channel permissions"""
response = client.get(f"/api/content-trust/{channel.name}/roles")
assert response.status_code == expected_status
@pytest.mark.parametrize(
"logged,channel_role,expected_status",
[
(False, None, 401),
(True, None, 403),
(True, "member", 403),
(True, "maintainer", 403),
(True, "owner", 201),
],
)
def test_post_key_mgr_role(
client,
channel,
root_role_file,
key_mgr_role_file,
logged,
channel_role,
expected_status,
):
"""Check posting 'key_mgr' role requires 'owner' channel permissions"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == expected_status
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=key_mgr",
files={"file": (key_mgr_role_file.name, open(key_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
@pytest.mark.parametrize(
"logged,channel_role,expected_status",
[
(False, None, 401),
(True, None, 403),
(True, "member", 403),
(True, "maintainer", 403),
(True, "owner", 400),
],
)
def test_post_key_mgr_role_wo_delegation(
client, channel, key_mgr_role_file, logged, channel_role, expected_status
):
"""Check posting 'key_mgr' role requires delegation from 'root' role"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=key_mgr",
files={"file": (key_mgr_role_file.name, open(key_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
def test_get_new_key(client):
"""Check get a new key pair"""
response = client.get("/api/content-trust/new-key")
assert response.status_code == 200
key = response.json()
assert "keytype" in key and key["keytype"] == "ed25519"
assert "scheme" in key and key["scheme"] == "ed25519"
assert "keyval" in key and len(key["keyval"]) == 64
@pytest.mark.parametrize(
"logged,channel_role,expected_status",
[
(False, None, 401),
(True, None, 403),
(True, "member", 403),
(True, "maintainer", 403),
(True, "owner", 201),
],
)
def test_post_pkg_mgr_role(
client,
channel,
root_role_file,
key_mgr_role_file,
pkg_mgr_role_file,
logged,
channel_role,
expected_status,
):
"""Check posting 'pkg_mgr' role requires delegation from 'key_mgr' role"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == expected_status
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=key_mgr",
files={"file": (key_mgr_role_file.name, open(key_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=pkg_mgr",
files={"file": (pkg_mgr_role_file.name, open(pkg_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
@pytest.mark.parametrize(
"logged,channel_role,delegators_status,expected_status",
[
(False, None, 401, 401),
(True, None, 403, 403),
(True, "member", 403, 403),
(True, "maintainer", 403, 403),
(True, "owner", 201, 400),
],
)
def test_post_pkg_mgr_role_wo_delegation(
client,
channel,
root_role_file,
key_mgr_role_file,
pkg_mgr_role_file,
logged,
channel_role,
delegators_status,
expected_status,
):
"""Check posting 'pkg_mgr' role requires delegation from 'key_mgr' role"""
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=pkg_mgr",
files={"file": (pkg_mgr_role_file.name, open(pkg_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=root",
files={"file": (root_role_file.name, open(root_role_file, 'rb'))},
)
assert response.status_code == delegators_status
response = client.post(
f"/api/content-trust/{channel.name}/roles?type=pkg_mgr",
files={"file": (pkg_mgr_role_file.name, open(pkg_mgr_role_file, 'rb'))},
)
assert response.status_code == expected_status
def test_post_index_signed_repodata(config, channel, signed_package, signing_key):
pkgstore = config.get_package_store()
signed_repodata = json.load(
pkgstore.serve_path(channel.name, "linux-64/repodata.json")
)
public_key = signing_key.public_key
assert "signatures" in signed_repodata
assert signed_package in signed_repodata["signatures"]
assert public_key in signed_repodata["signatures"][signed_package]
assert len(signed_repodata["signatures"][signed_package]) == 1
assert "signature" in signed_repodata["signatures"][signed_package][public_key]
assert (
len(signed_repodata["signatures"][signed_package][public_key]["signature"])
== 128
)
|
#!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 17-7-28 上午10:45
"""
|
"""
Classes for calculationg different energy terms
"""
import numpy as np
class Hamiltonian():
"""
Parent class for different types of interactions.
Assumptions and rules used in interaction construction:
1) Interaction contibution to the Hamiltonian can be described as
H = sum_i (gamma(a_i)*Q_i)
2) Each term in the sum linearly depends on a single parameter
3) Number of terms is less or equal than number of different parameters
Should be able to do the following:
1) Calculate an array, that contains Q_i for all i
2) Parse parameter file and retrieve corresponding parameter for each Q_i.
3) For each of the parameter, calculate a Hamiltonian derivative with respect
to this parameter.
"""
def __init__(self):
"""
Initialize the interaction and pass all the constant parameters
needed for calculation
"""
return 0
@staticmethod
def get_tanh_well(r, eta, r_min, r_max):
"""
Calculate Tanh well.
"""
return 0.25*(1+np.tanh(eta*(r-r_min)))*(1+np.tanh(eta*(r_max-r)))
def load_paramters(self, parameter_file):
"""
Load paramters from parameter_file.
"""
return 0
def _calculate_Q(self, **kwargs):
"""
Calculate Q for each term of the Hamiltonian
"""
return 0
def calculate_derivatives(self):
"""
"""
def get_unique_parameters(self):
"""
Get parameters. Order should be the same as in return of
calculate_derivatives
"""
return 0
def get_all_parameters(self):
"""
Returns parameters and their types, in the same order as Q
"""
def get_H(self):
"""
Calculate H.
"""
class AWSEMDirectInteraction(Hamiltonian):
"""
Is responsible for direct interaction potential.
"""
def __init__(self,
n_residues,
lambda_direct=4.184,
r_min_I=0.45,
r_max_I=0.65,
eta=50,
separation=9):
self.type = 'awsem_direct'
self.lambda_direct = lambda_direct
self.r_min_I = r_min_I
self.r_max_I = r_max_I
self.eta = eta
# Here, determine a mask wich defines,
# which indexes are neded for calculation
counter = 0
mask_indexes = []
residue_pairs = []
for i in range(n_residues-1):
for j in range(i+1, n_residues):
if j-i > separation:
mask_indexes.append(counter)
residue_pairs.append([i,j])
counter += 1
self.mask = np.array(mask_indexes, dtype=int)
self.residue_pairs = residue_pairs
def _calculate_Q(self,
input,
input_type ='distances'
):
"""
Calculate Q values for the direct potential.
Arguments:
input : numpy array
Input data (either distances of tanh well)
It is assumed, that these data are given for
"""
masked_input = input[:, self.mask]
if input_type == 'distances':
q_direct = -1*self.lambda_direct*self.get_tanh_well(masked_input,
self.eta,
self. r_min_I,
self.r_max_I)
elif input_type == 'tanh_well':
q_direct = -self.lambda_direct*masked_input
self.q = q_direct
return q_direct
def load_paramters(self, parameter_file):
"""
Load parameters and determine their corresponding types
"""
gamma_se_map_1_letter = { 'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4,
'Q': 5, 'E': 6, 'G': 7, 'H': 8, 'I': 9,
'L': 10, 'K': 11, 'M': 12, 'F': 13, 'P': 14,
'S': 15, 'T': 16, 'W': 17, 'Y': 18, 'V': 19}
ndx_2_letter = {value : key for key, value in gamma_se_map_1_letter.items() }
types = []
data = np.loadtxt(parameter_file)
gamma_direct = data[:210,0]
self.gamma = gamma_direct
for i in range(20):
for j in range(i, 20):
type=frozenset([ndx_2_letter[i],ndx_2_letter[j]])
types.append(type)
self.types = types
return 0
def map_types_to_pairs(self, sequence):
"""
Create a mapping between types of parameters and residue pairs, that
contribute to the H. As an outcome, creates a dictionary. Keys of the
dictionary - frozen sets representing all the pair types. Values - list of integers -
indexes of pairs in self.pairs, that corresponds to the key type
"""
type_to_pair = { type: [] for type in self.types} # just 210 types
for ndx, pair in enumerate(self.residue_pairs):
pair_type = frozenset([sequence[pair[0]], sequence[pair[1]]])
type_to_pair[pair_type].append(ndx)
return type_to_pair
def precompute_data(self, input, input_type):
"""
Calculate values, that are used repeatedly for different calculations
"""
self.q = self._calculate_Q(input, input_type=input_type)
def calculate_derivatives(self, sequence, input=None, input_type='distances'):
"""
Calculate derivatives with respect of parameters
of each type
"""
if not hasattr(self, 'q'):
self.q = self._calculate_Q(input, input_type=input_type)
#Getting mapping
types_to_pair = self.map_types_to_pairs(sequence)
derivatives = [] # At the end, derivatives should be a matrix
for pair_type in self.types:
fragment = np.sum(self.q[:, types_to_pair[pair_type]], axis=1)
derivatives.append(fragment)
derivatives = np.array(derivatives).T
return(derivatives)
def get_parameters(self):
return self.gamma
def get_n_params(self):
return len(self.gamma)
class AWSEMBurialInteraction(Hamiltonian):
"""
Is responsible for direct interaction potential.
"""
def __init__(self,
n_residues,
lambda_burial=4.184, # will yeild energy in kJ/mol
eta_burial=4.0,
rho_I_limits = [0.0, 3.0],
rho_II_limits = [3.0, 6.0],
rho_III_limits = [6.0, 9.0]
):
self.type = 'awsem_burial'
self.lambda_burial = lambda_burial
self.eta_burial = eta_burial
self.rho_I_limits = rho_I_limits
self.rho_II_limits = rho_II_limits
self.rho_III_limits = rho_III_limits
def _burial_q(self, densities, rho_limits):
"""
Calculate part, that does not depend on parameter
for a particular range of q_values
"""
rho_min, rho_max = rho_limits
term = np.tanh(self.eta_burial*(densities-rho_min))
term += np.tanh(self.eta_burial*(rho_max - densities))
return -0.5*self.lambda_burial*term
def _calculate_Q(self,
densities
):
"""
Calculate Q values for burial potential.
Arguments:
distances, densities : numpy array
Input distances
"""
self.q_I = self._burial_q(densities, self.rho_I_limits)
self.q_II = self._burial_q(densities, self.rho_II_limits)
self.q_III = self._burial_q(densities, self.rho_III_limits)
return self.q_I, self.q_II, self.q_III
def load_paramters(self, parameter_file):
"""
Load parameters and determine their corresponding types
"""
data = np.loadtxt(parameter_file)
gamma_se_map_1_letter = { 'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4,
'Q': 5, 'E': 6, 'G': 7, 'H': 8, 'I': 9,
'L': 10, 'K': 11, 'M': 12, 'F': 13, 'P': 14,
'S': 15, 'T': 16, 'W': 17, 'Y': 18, 'V': 19}
ndx_2_letter = {value : key for key, value in gamma_se_map_1_letter.items() }
self.gamma = np.loadtxt(parameter_file)
self.types = [ndx_2_letter[i] for i in range(20)]
return 0
def map_types_to_residues(self, sequence):
"""
Create a mapping between types of parameters and aminoacid residue, that
contribute to the H. As an outcome, creates a dictionary. Keys of the
dictionary - 1-letter aminoacid type. Values - list of integers -
indexes of residues that corresponds to the key type
"""
type_to_res = { type: [] for type in self.types} # just 210 types
for ndx, residue in enumerate(sequence):
type_to_res[residue].append(ndx)
return type_to_res
def precompute_data(self, densities):
"""
Calculate values, that are used repeatedly for different calculations
"""
self._calculate_Q(densities)
def calculate_derivatives(self, sequence, densities=None):
"""
Calculate derivatives with respect of parameters
of each type
"""
if not (hasattr(self, 'q_I') and hasattr(self, 'q_II') and hasattr(self, 'q_III')):
self._calculate_Q(densities)
#Getting mapping
types_to_res = self.map_types_to_residues(sequence)
n_params_per_type = len(self.gamma)
n_params = 3*n_params_per_type
n_frames = len(self.q_I)
derivatives = np.zeros((n_frames, n_params))
# Dirivatives will contain 3 blocks: for 1, 2, 3 density conditions
# than a block for protein-mediated contacts
for ndx, res_type in enumerate(self.types):
fragment_I = np.sum(self.q_I[:, types_to_res[res_type]], axis=1)
fragment_II = np.sum(self.q_II[:, types_to_res[res_type]], axis=1)
fragment_III = np.sum(self.q_III[:, types_to_res[res_type]], axis=1)
derivatives[:,ndx] = fragment_I
derivatives[:,ndx+n_params_per_type] = fragment_II
derivatives[:,ndx+2*n_params_per_type] = fragment_III
return(derivatives)
def get_parameters(self):
return self.gamma.flatten('F')
def get_n_params(self):
return 3*self.gamma.shape[0]
class AWSEMMediatedInteraction(Hamiltonian):
"""
Is responsible for direct interaction potential.
"""
def __init__(self,
n_residues,
lambda_mediated=4.184,
rho_0 = 2.6,
r_min_II=0.65,
r_max_II=0.95,
eta_sigma =7.0,
eta=50,
separation=9,
density_separation=2):
self.lambda_mediated = lambda_mediated
self.r_min_II = r_min_II
self.r_max_II = r_max_II
self.eta_sigma = eta_sigma
self.eta = eta
self.rho_0 = rho_0
self.type = 'awsem_mediated'
# Here, determine a mask wich defines,
# which indexes are neded for calculation
counter = 0
mask_indexes = []
residue_pairs = []
for i in range(n_residues-1):
for j in range(i+1, n_residues):
if j-i > separation:
mask_indexes.append(counter)
residue_pairs.append([i,j])
counter += 1
self.mask = np.array(mask_indexes, dtype=int)
self.residue_pairs = residue_pairs
def _calculate_sigma(self, densities):
"""
Function computes sigma water and sigma protein
(Equation 12 from AWSEM_MD support info)
rho : 2D numpy array of floats
size NxM, N - number of frames, M - number of particles.
Contains local density for each Calpha bead in each
frame
"""
n_frames = densities.shape[0]
n_pairs = len(self.residue_pairs)
sigma_water = np.zeros((n_frames, n_pairs))
multiplier = 1 - np.tanh(self.eta_sigma*(densities-self.rho_0))
for ndx, pair in enumerate(self.residue_pairs):
sigma_water_fragment = 0.25*np.multiply(multiplier[:,pair[0]],multiplier[:,pair[1]])
sigma_water[:,ndx] = sigma_water_fragment
sigma_prot = 1 - sigma_water
return sigma_water, sigma_prot
def _calculate_Q(self,
distances,
densities
):
"""
Calculate Q values for the mediated potential.
Arguments:
distances, densities : numpy array
Input distances
"""
masked_distances = distances[:, self.mask]
# 1) Calculate tanh well II
tanh_II = self.get_tanh_well(masked_distances,
self.eta,
self.r_min_II,
self.r_max_II)
# 2) Calculate sigma ij (water). (eq 12 in the SI)
# Put 0.25 and 0.75 for debugging purpose
sigma_water, sigma_prot = self._calculate_sigma(densities)
q_water = -1.0*self.lambda_mediated*tanh_II*sigma_water
q_prot = -1.0*self.lambda_mediated*tanh_II*sigma_prot
self.q_water = q_water
self.q_prot = q_prot
return self.q_water, self.q_prot
def load_paramters(self, parameter_file):
"""
Load parameters and determine their corresponding types
"""
gamma_se_map_1_letter = { 'A': 0, 'R': 1, 'N': 2, 'D': 3, 'C': 4,
'Q': 5, 'E': 6, 'G': 7, 'H': 8, 'I': 9,
'L': 10, 'K': 11, 'M': 12, 'F': 13, 'P': 14,
'S': 15, 'T': 16, 'W': 17, 'Y': 18, 'V': 19}
ndx_2_letter = {value : key for key, value in gamma_se_map_1_letter.items() }
types = []
data = np.loadtxt(parameter_file)
self.gamma_mediated_water = data[210:,1]
self.gamma_mediated_prot = data[210:,0]
for i in range(20):
for j in range(i, 20):
type=frozenset([ndx_2_letter[i],ndx_2_letter[j]])
types.append(type)
self.types = types
return 0
def map_types_to_pairs(self, sequence):
"""
Create a mapping between types of parameters and residue pairs, that
contribute to the H. As an outcome, creates a dictionary. Keys of the
dictionary - frozen sets representing all the pair types. Values - list of integers -
indexes of pairs in self.pairs, that corresponds to the key type
"""
type_to_pair = { type: [] for type in self.types} # just 210 types
for ndx, pair in enumerate(self.residue_pairs):
pair_type = frozenset([sequence[pair[0]], sequence[pair[1]]])
type_to_pair[pair_type].append(ndx)
return type_to_pair
def precompute_data(self, distances, densities):
"""
Calculate values, that are used repeatedly for different calculations
"""
self._calculate_Q(distances, densities)
def calculate_derivatives(self, sequence, distances=None, densities=None):
"""
Calculate derivatives with respect of parameters
of each type
"""
if not (hasattr(self, 'q_water') and hasattr(self, 'q_prot')):
self._calculate_Q(distances, densities)
#Getting mapping
types_to_pair = self.map_types_to_pairs(sequence)
n_params_water = len(self.gamma_mediated_water)
n_params_prot = len(self.gamma_mediated_prot)
n_params = n_params_water + n_params_prot
n_frames = len(self.q_water)
derivatives = np.zeros((n_frames, n_params))
# Dirivatives will contain first block for water-mediated contacts
# than a block for protein-mediated contacts
for ndx, pair_type in enumerate(self.types):
fragment_water = np.sum(self.q_water[:, types_to_pair[pair_type]], axis=1)
derivatives[:,ndx] = fragment_water
fragment_prot = np.sum(self.q_prot[:, types_to_pair[pair_type]], axis=1)
derivatives[:,ndx+n_params_water] = fragment_prot
return(derivatives)
def get_parameters(self):
return np.concatenate([self.gamma_mediated_water, self.gamma_mediated_prot])
def get_n_params(self):
return len(self.gamma_mediated_water) + len(self.gamma_mediated_prot)
class SBMNonbondedInteraction(Hamiltonian):
"""
Is responsible for direct interaction potential.
"""
def __init__(self, n_residues, params_description_file):
self.load_parameter_description(params_description_file)
self.n_residues = n_residues
# Now, need to create a mask for distances. Here
# it is assumed that during model loading stage a single distance
# is calculated for each pairs of residues.
counter = 0
mask_indexes = []
self.type = 'sbm_nonbonded'
for i in range(n_residues-1):
for j in range(i+1, n_residues):
if frozenset((i,j)) in self.pairs:
mask_indexes.append(counter)
counter += 1
self.mask = np.array(mask_indexes, dtype=int)
def load_parameter_description(self, file):
"""
Load parameters descriptions and types.
Should create list of pairs, lists of
params.
"""
description = np.genfromtxt(file,
dtype=None,
unpack=True,
encoding=None,
names=['atom_i', 'atom_j', 'ndx', 'type', 'sigma', 'r0', 'sigma_tg'])
self.pairs = [frozenset((i[0]-1, i[1]-1)) for i in zip(description['atom_i'], description['atom_j'])]
self.pair_types = description['type']
self.sigma = description['sigma']
self.r0 = description['r0']
self.sigma_tg = description['sigma_tg']
return
def calculate_lj12gaussian(self, distance, r0, sigma_g):
return -1.0*np.exp(-(distance - r0)**2/(2*sigma_g**2))
def calculate_lj12gaussiantanh(self, distance, r0, sigma_t):
return 0.5*(np.tanh((r0-distance + sigma_t)/sigma_t) + 1)
def _calculate_Q(self,
distances,
):
"""
Calculate Q values for the mediated potential.
Get distances. Should take descriptions and
calculate q.
Arguments:
distances, densities : numpy array
Input
"""
type_dict = {'LJ12GAUSSIAN' : self.calculate_lj12gaussian,
'LJ12GAUSSIANTANH' : self.calculate_lj12gaussiantanh}
masked_distances = distances[:, self.mask]
q = np.zeros(masked_distances.shape)
for ndx, type in enumerate(self.pair_types):
distance = masked_distances[:, ndx]
r0 = self.r0[ndx]
sigma_tg = self.sigma_tg[ndx]
q[:, ndx] = type_dict[type](distance, r0, sigma_tg)
self.q = q
return self.q
def load_paramters(self, parameter_file):
"""
Load parameters and determine their corresponding types.
In future, may need to add types to make parameter-specific
optimization
"""
self.params = np.loadtxt(parameter_file)
return 0
def precompute_data(self, distances):
"""
Calculate values, that are used repeatedly for different calculations
"""
self._calculate_Q(distances)
def calculate_derivatives(self, distances=None, fraction=None):
"""
Calculate derivatives with respect of parameters
of each type.
"""
if not (hasattr(self, 'q')):
self._calculate_Q(distances)
if fraction is None:
derivatives = self.q
else:
derivatives = np.multiply(self.q, fraction)
print("Multiplication done")
return derivatives
def get_parameters(self):
return self.params
def get_n_params(self):
return len(self.params)
|
class Solution(object):
def findPoisonedDuration(self, timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
preTime, poisonTime = -1, -1
answer = 0
for newTime in timeSeries:
if newTime >= poisonTime:
answer += duration
if newTime < poisonTime:
answer = answer + (newTime - preTime)
poisonTime = newTime + duration
preTime = newTime
return answer
def main():
s = Solution()
timeSeries = [1,4]
duration = 2
print s.findPoisonedDuration(timeSeries, duration)
timeSeries = [1,2]
duration = 2
print s.findPoisonedDuration(timeSeries, duration)
timeSeries = [1,2,3,4,5,6,7,8,9]
duration = 1
print s.findPoisonedDuration(timeSeries, duration)
if __name__ == "__main__":
main() |
from unittest.mock import Mock, patch
from django.contrib import admin
from datetime import date
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from mep.accounts.models import Account, Subscription
from mep.books.models import Creator, CreatorType, Work
from mep.people.admin import PersonAdmin, PersonTypeListFilter
from mep.people.models import Person, PastPersonSlug
class TestPersonAdmin(TestCase):
fixtures = ['sample_people']
def test_merge_people(self):
mockrequest = Mock()
test_ids = ['5', '33', '101']
# a dictionary mimes the request pattern of access
mockrequest.session = {}
mockrequest.POST.getlist.return_value = test_ids
# code uses the built in methods of a dict, so making GET an
# actual dict as it is for a request
mockrequest.GET = {}
resp = PersonAdmin(Person, Mock()).merge_people(mockrequest, Mock())
assert isinstance(resp, HttpResponseRedirect)
assert resp.status_code == 303
assert resp['location'].startswith(reverse('people:merge'))
assert resp['location'].endswith('?ids=%s' % ','.join(test_ids))
# key should be set, but it should be an empty string
assert 'people_merge_filter' in mockrequest.session
assert not mockrequest.session['people_merge_filter']
# Now add some values to be set as a query string on session
mockrequest.GET = {'p': '3', 'filter': 'foo'}
resp = PersonAdmin(Person, Mock()).merge_people(mockrequest, Mock())
assert isinstance(resp, HttpResponseRedirect)
assert resp.status_code == 303
assert resp['location'].startswith(reverse('people:merge'))
assert resp['location'].endswith('?ids=%s' % ','.join(test_ids))
# key should be set and have a urlencoded string
assert 'people_merge_filter' in mockrequest.session
# test agnostic as to order since the querystring
# works either way
assert mockrequest.session['people_merge_filter'] in \
['p=3&filter=foo', 'filter=foo&p=3']
def test_tabulate_queryset(self):
person_admin = PersonAdmin(model=Person, admin_site=admin.site)
people = Person.objects.order_by('id').all()
# create at least one subscription so that the subscription_list
# test is meaningful
account = people[0].account_set.first()
Subscription.objects.create(
start_date=date(1955, 1, 6),
end_date=date(1955, 1, 8),
account=account
)
# test that tabular data matches queryset data
for person, person_data in zip(people, person_admin.tabulate_queryset(people)):
# test some properties
assert person.name in person_data
assert person.mep_id in person_data
assert person.updated_at in person_data
# test some methods
assert person.is_creator() in person_data
assert person.has_account() in person_data
assert person.admin_url() in person_data
assert person.subscription_dates() in person_data
@patch('mep.people.admin.export_to_csv_response')
def test_export_csv(self, mock_export_to_csv_response):
person_admin = PersonAdmin(model=Person, admin_site=admin.site)
with patch.object(person_admin, 'tabulate_queryset') as tabulate_queryset:
# if no queryset provided, should use default queryset
people = person_admin.get_queryset(Mock())
person_admin.export_to_csv(Mock())
assert tabulate_queryset.called_once_with(people)
# otherwise should respect the provided queryset
first_person = Person.objects.all()[:0]
person_admin.export_to_csv(Mock(), first_person)
assert tabulate_queryset.called_once_with(first_person)
export_args, export_kwargs = mock_export_to_csv_response.call_args
# first arg is filename
csvfilename = export_args[0]
assert csvfilename.endswith('.csv')
assert csvfilename.startswith('mep-people')
# should include current date
assert now().strftime('%Y%m%d') in csvfilename
headers = export_args[1]
# should use verbose name from db model field
assert 'MEP id' in headers
# or verbose name for property
assert 'Admin Link' in headers
# or title case for property with no verbose name
assert 'Is Creator' in headers
def test_past_slugs_list(self):
person_admin = PersonAdmin(model=Person, admin_site=admin.site)
person = Person.objects.order_by('id').first()
# no object = no error but no value
assert not person_admin.past_slugs_list()
# empty string for person with no past slugs
assert person_admin.past_slugs_list(person) == ''
# add slugs
old_slugs = ['old-slug1', 'old-slug2', 'snail']
for slug in old_slugs:
PastPersonSlug.objects.create(person=person, slug=slug)
assert person_admin.past_slugs_list(person) == ', '.join(old_slugs)
class TestPersonTypeListFilter(TestCase):
def test_queryset(self):
# create some test people
# - has an account
humperdinck = Person(name='Humperdinck', slug='humperdinck')
# - is a creator and has an account
engelbert = Person(name='Engelbert', slug='engelbert')
# uncategorized (not creator or member)
foo = Person(name='Foo', slug='foo')
humperdinck.save()
engelbert.save()
foo.save()
# create some test accounts for the people
h_acc = Account.objects.create()
h_acc.persons.add(humperdinck)
h_acc.save()
e_acc = Account.objects.create()
e_acc.persons.add(engelbert)
e_acc.save()
# create a test work and creator
work = Work(title='Le foo et le bar', year=1916, mep_id='lfelb')
work.save()
ctype = CreatorType(1, order=1)
ctype.save()
creator = Creator(creator_type=ctype, person=engelbert, work=work)
creator.save()
# sanity check our person types outside the admin
assert humperdinck.has_account()
assert engelbert.has_account()
assert not foo.has_account()
assert engelbert.is_creator()
assert not humperdinck.is_creator()
assert not foo.is_creator()
# request only people with accounts (members)
pfilter = PersonTypeListFilter(None, {'person_type': 'member'}, Person, PersonAdmin)
qs = pfilter.queryset(None, Person.objects.all())
assert humperdinck in qs
assert engelbert in qs
assert not foo in qs
# request only people who are creators
pfilter = PersonTypeListFilter(None, {'person_type': 'creator'}, Person, PersonAdmin)
qs = pfilter.queryset(None, Person.objects.all())
assert engelbert in qs
assert not humperdinck in qs
assert not foo in qs
# request uncategorized people (neither members nor creators)
pfilter = PersonTypeListFilter(None, {'person_type': 'uncategorized'}, Person, PersonAdmin)
qs = pfilter.queryset(None, Person.objects.all())
assert foo in qs
assert not engelbert in qs
assert not humperdinck in qs
|
"""
This file contains all of the sqlite functions for scenes
"""
def init_scene(scene_db, name):
"""initializes the scene db"""
import uuid
from src.praxxis.sqlite import connection
conn = connection.create_connection(scene_db)
cur = conn.cursor()
scene_id = str(uuid.uuid4())
create_metadata_table = 'CREATE TABLE "SceneMetadata" (ID TEXT PRIMARY KEY, Ended INTEGER, Scene TEXT)'
create_notebook_list_table='CREATE TABLE "NotebookList" (ID INTEGER PRIMARY KEY AUTOINCREMENT, Notebook TEXT, ' \
'Library TEXT, Path TEXT, RawUrl TEXT) '
create_parameter_table='CREATE TABLE "Parameters" (Parameter TEXT PRIMARY KEY, Value TEXT)'
create_history_table='CREATE TABLE "History" (Timestamp STRING, Notebook TEXT, Library TEXT, OutputPath TEXT)'
init_metadata_table = 'insert into "SceneMetadata"(ID, Ended, Scene) values(?, 0, ?)'
cur.execute(create_metadata_table)
cur.execute(create_notebook_list_table)
cur.execute(create_parameter_table)
cur.execute(create_history_table)
cur.execute(init_metadata_table, (scene_id, name))
conn.commit()
conn.close()
def check_ended(history_db, scene, conn, cur):
"""checks if a scene has ended"""
from src.praxxis.util import error
ended = 'SELECT Ended from "SceneHistory" WHERE Scene = ?'
cur.execute(ended, (scene,))
ended = cur.fetchone()
if ended is None:
raise error.SceneNotFoundError(scene)
elif ended[0]:
raise error.EndEndedSceneError(scene)
return ended
def check_scene_ended(history_db, scene):
"""checks if scene has ended"""
from src.praxxis.sqlite import connection
from src.praxxis.util import error
conn = connection.create_connection(history_db)
cur = conn.cursor()
check_scene_exists = 'SELECT Ended from "SceneHistory" WHERE Scene = ?'
cur.execute(check_scene_exists, (scene,))
rows = cur.fetchall()
conn.close()
if rows == []:
raise error.SceneNotFoundError(scene)
elif rows[0][0]:
raise error.SceneEndedError(scene)
def update_current_scene(history_db, scene):
"""updates the current scene in the history db"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
add_current_scene = 'INSERT INTO "SceneHistory"(Scene, Ended) VALUES(?, 0)'
cur.execute(add_current_scene, (scene,))
conn.commit()
conn.close()
def get_current_scene(history_db):
"""gets the current scene from the history db"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
get_current_scene = 'SELECT Scene FROM "SceneHistory" WHERE Ended != 1 ORDER BY ID DESC LIMIT 0, 1'
cur.execute(get_current_scene)
rows = cur.fetchall()
conn.close()
return rows[0][0]
def delete_scene(history_db, scene):
"""deletes the specified scene"""
import itertools
from src.praxxis.sqlite import connection
from src.praxxis.util import error
conn = connection.create_connection(history_db)
cur = conn.cursor()
try:
check_ended(history_db, scene, conn, cur)
except error.SceneNotFoundError as e:
raise e
except error.EndEndedSceneError:
pass
active_scenes = get_active_scenes(history_db)
if len(active_scenes) <= 1 and scene in list(itertools.chain(*active_scenes)):
raise error.LastActiveSceneError(scene)
else:
delete_scene = 'DELETE FROM "SceneHistory" WHERE Scene = ?'
cur.execute(delete_scene, (scene,))
conn.commit()
conn.close()
return 0
def end_scene(current_scene_db, scene):
"""marks the specified scene as ended"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
end_scene = 'UPDATE "SceneMetadata" SET Ended = 1 WHERE Scene = ?'
cur.execute(end_scene, (scene,))
conn.commit()
conn.close()
def mark_ended_scene(history_db, scene):
"""marks a scene as ended in the history db"""
from src.praxxis.sqlite import connection
from src.praxxis.util import error
import itertools
conn = connection.create_connection(history_db)
cur = conn.cursor()
try:
check_ended(history_db, scene, conn, cur)
except error.SceneNotFoundError as e:
raise e
except error.EndEndedSceneError as e:
raise e
active_scenes = get_active_scenes(history_db)
if len(active_scenes) <= 1 and scene in list(itertools.chain(*active_scenes)) :
raise error.LastActiveSceneError(scene)
else:
end_scene = 'UPDATE "SceneHistory" SET Ended = 1 WHERE Scene = ?'
cur.execute(end_scene, (scene,))
conn.commit()
conn.close()
return 0
def mark_resumed_scene(history_db, scene):
"""mark a scene as resumed in the history db"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
end_scene = 'UPDATE "SceneHistory" SET Ended = 0 WHERE Scene = ?'
cur.execute(end_scene, (scene,))
conn.commit()
conn.close()
def resume_scene(scene_db, scene):
"""resumes a scene"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(scene_db)
cur = conn.cursor()
end_scene = 'UPDATE "SceneMetadata" SET Ended = 0 WHERE Scene = ?'
cur.execute(end_scene, (scene,))
conn.commit()
conn.close()
def get_active_scenes(history_db):
"""returns a list of all active scenes"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
get_active_scenes = 'SELECT DISTINCT Scene from "SceneHistory" WHERE Ended = 0'
cur.execute(get_active_scenes)
rows = cur.fetchall()
conn.close()
return rows
def get_ended_scenes(history_db):
"""returns a list of all ended scenes"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
get_ended_scenes = 'SELECT DISTINCT Scene from "SceneHistory" WHERE Ended = 1'
cur.execute(get_ended_scenes)
rows = cur.fetchall()
conn.close()
return rows
def add_to_scene_history(current_scene_db, timestamp, notebook, library, outputpath):
"""adds a notebook to the scene history"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
add_to_scene_history = 'INSERT INTO "History"(Timestamp, Notebook, Library, OutputPath) VALUES (?,?,?, ?)'
cur.execute(add_to_scene_history, (timestamp, notebook, library, outputpath))
conn.commit()
conn.close()
def get_notebook_history(current_scene_db):
"""gets the notebook history from a scene"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
get_notebook_history = 'SELECT * FROM "History" ORDER BY Timestamp'
cur.execute(get_notebook_history)
conn.commit()
rows = cur.fetchall()
conn.close()
return rows
def get_recent_history(db_file, seq_length):
"""gets last <seq_length> file names from a scene"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(db_file)
cur = conn.cursor()
get_recent_history = 'SELECT Notebook, OutputPath FROM (SELECT * FROM "History" ORDER BY Timestamp DESC LIMIT ?) ' \
'ORDER BY Timestamp ASC '
cur.execute(get_recent_history, (seq_length,))
conn.commit()
rows = cur.fetchall()
conn.close()
return rows
def dump_scene_list(history_db):
"""empties the scene list table"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
clear_list = 'DELETE FROM "SceneList"'
reset_counter = "UPDATE SQLITE_SEQUENCE SET SEQ=0 WHERE NAME='SceneList'"
cur.execute(clear_list)
cur.execute(reset_counter)
conn.commit()
conn.close()
def write_scene_list(history_db, scene_list):
"""writes to scene list table"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(history_db)
cur = conn.cursor()
insert_line = 'INSERT INTO "SceneList" (Scene) VALUES (?)'
cur.executemany(insert_line, scene_list)
conn.commit()
conn.close()
def get_scene_by_ord(history_db, ordinal):
"""gets scene by ordinal"""
from src.praxxis.sqlite import connection
from src.praxxis.util import error
conn = connection.create_connection(history_db)
cur = conn.cursor()
get_scene = 'SELECT Scene FROM "SceneList" ORDER BY ID LIMIT ?, ?'
cur.execute(get_scene, (ordinal-1, ordinal))
conn.commit()
rows = cur.fetchall()
conn.close()
if rows == []:
raise error.SceneNotFoundError(ordinal)
return rows[0][0]
def clear_history(current_scene_db):
"""empties the history table"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
clear_history = 'DELETE FROM "History"'
cur.execute(clear_history)
conn.commit()
conn.close()
def get_notebook_path(current_scene_db, notebook_name):
"""returns the path given a valid notebook name"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
get_path = 'SELECT Path FROM "NotebookList" WHERE Data = ?'
cur.execute(get_path, (notebook_name,))
conn.commit()
path = cur.fetchone()
conn.close()
return path
|
# import the necessary packages
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras import backend as bk
class LeNet_small6:
@staticmethod
def build(width, height, depth, classes, weightsPath=None):
bk.set_image_dim_ordering('th')
# initialize the model
model = Sequential()
# first set of CONV => RELU => POOL
model.add(Convolution2D(5, (5, 5), padding="same",
input_shape=(depth, height, width)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# second set of CONV => RELU => POOL
model.add(Convolution2D(5, (5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# set of FC => RELU layers
model.add(Flatten())
model.add(Dense(20))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# if a weights path is supplied (inicating that the model was
# pre-trained), then load the weights
if weightsPath is not None:
model.load_weights(weightsPath)
# return the constructed network architecture
return model
# (keras) matt@x1c:~/git/notebook/lenet-mnist$ python lenet.py -s 1 -w small6_weights.hdf5
# Using Theano backend.
# /home/matt/.virtualenvs/keras/local/lib/python2.7/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
# "This module will be removed in 0.20.", DeprecationWarning)
# [INFO] downloading MNIST...
# [INFO] compiling model...
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# conv2d_1 (Conv2D) (None, 5, 28, 28) 130
# _________________________________________________________________
# activation_1 (Activation) (None, 5, 28, 28) 0
# _________________________________________________________________
# max_pooling2d_1 (MaxPooling2 (None, 5, 14, 14) 0
# _________________________________________________________________
# conv2d_2 (Conv2D) (None, 5, 14, 14) 630
# _________________________________________________________________
# activation_2 (Activation) (None, 5, 14, 14) 0
# _________________________________________________________________
# max_pooling2d_2 (MaxPooling2 (None, 5, 7, 7) 0
# _________________________________________________________________
# flatten_1 (Flatten) (None, 245) 0
# _________________________________________________________________
# dense_1 (Dense) (None, 20) 4920
# _________________________________________________________________
# activation_3 (Activation) (None, 20) 0
# _________________________________________________________________
# dense_2 (Dense) (None, 10) 210
# _________________________________________________________________
# activation_4 (Activation) (None, 10) 0
# =================================================================
# Total params: 5,890
# Trainable params: 5,890
# Non-trainable params: 0
# _________________________________________________________________
# [INFO] training...
# Epoch 1/5
# 46900/46900 [==============================] - 31s - loss: 1.9977 - acc: 0.3690
# Epoch 2/5
# 46900/46900 [==============================] - 31s - loss: 0.5848 - acc: 0.8241
# Epoch 3/5
# 46900/46900 [==============================] - 31s - loss: 0.3450 - acc: 0.8953
# Epoch 4/5
# 46900/46900 [==============================] - 31s - loss: 0.2795 - acc: 0.9145
# Epoch 5/5
# 46900/46900 [==============================] - 31s - loss: 0.2423 - acc: 0.9259
# [INFO] evaluating...
# 22912/23100 [============================>.] - ETA: 0s[INFO] accuracy: 92.62%
# [INFO] dumping weights to file...
# [INFO] Predicted: 6, Actual: 6
# [INFO] Predicted: 1, Actual: 1
# [INFO] Predicted: 4, Actual: 4
# [INFO] Predicted: 6, Actual: 6
# [INFO] Predicted: 2, Actual: 2
# [INFO] Predicted: 9, Actual: 9
# [INFO] Predicted: 7, Actual: 7
# [INFO] Predicted: 2, Actual: 2
# [INFO] Predicted: 9, Actual: 9
# [INFO] Predicted: 2, Actual: 2
|
# -*- coding: utf-8 -*-
DESC = "domain-2018-08-08"
INFO = {
"DescribeDomainPriceList": {
"params": [
{
"name": "TldList",
"desc": "查询价格的后缀列表。默认则为全部后缀"
},
{
"name": "Year",
"desc": "查询购买的年份,默认会列出所有年份的价格"
},
{
"name": "Operation",
"desc": "域名的购买类型:new 新购,renew 续费,redem 赎回,tran 转入"
}
],
"desc": "按照域名后缀获取对应的价格列表"
},
"CheckDomain": {
"params": [
{
"name": "DomainName",
"desc": "所查询域名名称"
},
{
"name": "Period",
"desc": "年限"
}
],
"desc": "检查域名是否可以注册"
}
} |
# Copyright 2019 Ondrej Skopek.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List
import pytest
import mt.mvae.utils as utils
from mt.mvae.components import Component, EuclideanComponent, HyperbolicComponent, SphericalComponent
from mt.mvae.components import StereographicallyProjectedSphereComponent, PoincareComponent
from mt.mvae.sampling import WrappedNormalProcedure as wn
from mt.mvae.sampling import EuclideanNormalProcedure as en
from mt.mvae.sampling import SphericalVmfProcedure as svmf
from mt.mvae.sampling import ProjectedSphericalVmfProcedure as pvmf
from mt.mvae.sampling import RiemannianNormalProcedure as rn
def component_type_and_dim_equal(xs: List[Component], ys: List[Component]) -> None: # type: ignore
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert x.dim == y.dim
assert type(x) == type(y)
assert x.fixed_curvature == y.fixed_curvature
if not isinstance(x, EuclideanComponent): # Euclidean component is untrainable always.
x.init_layers(10, True)
assert x.manifold.curvature.requires_grad == (not x.fixed_curvature)
if not isinstance(x, EuclideanComponent): # Euclidean component is untrainable always.
y.init_layers(10, True)
assert y.manifold.curvature.requires_grad == (not y.fixed_curvature)
@pytest.mark.parametrize("fixed_curvature", [True, False])
def test_parse_components_empty(fixed_curvature: bool) -> None:
assert utils.parse_components("", fixed_curvature) == []
assert utils.parse_components(" ", fixed_curvature) == []
@pytest.mark.parametrize("fc", [True, False])
def test_parse_components_basic(fc: bool) -> None:
component_type_and_dim_equal(utils.parse_components("d2", fc),
[StereographicallyProjectedSphereComponent(2, fc, pvmf)])
component_type_and_dim_equal(utils.parse_components("p2", fc), [PoincareComponent(2, fc, rn)])
component_type_and_dim_equal(utils.parse_components("e1", fc), [EuclideanComponent(1, fc, en)])
component_type_and_dim_equal(utils.parse_components("e2", fc), [EuclideanComponent(2, fc, en)])
component_type_and_dim_equal(utils.parse_components(
"3e2", fc), [EuclideanComponent(2, fc, en),
EuclideanComponent(2, fc, en),
EuclideanComponent(2, fc, en)])
component_type_and_dim_equal(utils.parse_components("s2", fc), [SphericalComponent(2, fc, svmf)])
component_type_and_dim_equal(utils.parse_components("h2", fc), [HyperbolicComponent(2, fc, wn)])
component_type_and_dim_equal(utils.parse_components("1h2", fc), [HyperbolicComponent(2, fc, wn)])
@pytest.mark.parametrize("fixed_curvature", [True, False])
def test_canonical_name(fixed_curvature: bool) -> None:
assert utils.canonical_name(utils.parse_components("3d2", fixed_curvature)) == "3d2"
assert utils.canonical_name(utils.parse_components("3h3,2s2,1e1,e2", fixed_curvature)) == "e1,e2,3h3,2s2"
for model in ["e1", "e1,e2,s3", "10e2,2h2,4s32"]:
assert utils.canonical_name(utils.parse_components(model, fixed_curvature)) == model
@pytest.mark.parametrize("fc", [True, False])
def test_parse_components_products(fc: bool) -> None:
component_type_and_dim_equal(utils.parse_components(
"e1,e2", fc), [EuclideanComponent(1, fc, en), EuclideanComponent(2, fc, en)])
component_type_and_dim_equal(
utils.parse_components("e1,s2,e2", fc),
[EuclideanComponent(1, fc, en),
SphericalComponent(2, fc, svmf),
EuclideanComponent(2, fc, en)])
component_type_and_dim_equal(
utils.parse_components("h3,s2,e1", fc),
[HyperbolicComponent(3, fc, wn),
SphericalComponent(2, fc, svmf),
EuclideanComponent(1, fc, en)])
component_type_and_dim_equal(utils.parse_components("3h3,2s2,e1", fc), [
HyperbolicComponent(3, fc, wn),
HyperbolicComponent(3, fc, wn),
HyperbolicComponent(3, fc, wn),
SphericalComponent(2, fc, svmf),
SphericalComponent(2, fc, svmf),
EuclideanComponent(1, fc, en)
])
def test_linear_betas_incr() -> None:
betas = utils.linear_betas(1.0, 2.0, 30, 100)
assert len(betas) == 100
assert betas[0] == 1.
assert betas[1] > 1.
assert betas[28] < 2.
for i in range(29, len(betas)):
assert betas[i] == 2.
def test_linear_betas_decr() -> None:
betas = utils.linear_betas(2.0, 1.0, 30, 100)
assert len(betas) == 100
assert betas[0] == 2.
assert betas[1] < 2.
assert betas[28] > 1.
for i in range(29, len(betas)):
assert betas[i] == 1.
|
#
# Data for analyzing causality.
# By Nick Cortale
#
# Paper:
# Detecting Causality in Complex Ecosystems
# George Sugihara et al. 2012
#
# Thanks to Kenneth Ells and Dylan McNamara
#
import numpy as np
from numpy import genfromtxt
from scipy import integrate
def coupled_logistic(rx1, rx2, b12, b21, ts_length,random_start=False):
"""Coupled logistic map.
Parameters
----------
rx1 : float
Parameter that determines chaotic behavior of the x1 series.
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
b21 : float
Influence of x2 on x1.
ts_length : int
Length of the calculated time series.
random_start : bool
Random initialization of starting conditions.
Returns
-------
x1 : 1d array
Array of length (ts_length,) that stores the values of the x series.
x2 : 1d array
Array of length (ts_length,) that stores the values of the y series.
"""
# Initial conditions after McCracken (2014)
x1 = np.zeros(ts_length)
x2 = np.zeros(ts_length)
if random_start:
x1[0] = .15 + .1*np.random.rand()
x2[0] = .35 + .1 *np.random.rand()
else:
x1[0] = 0.2
x2[0] = 0.4
for i in range(ts_length-1):
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i])
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def driven_rand_logistic(rx2, b12, ts_length,random_start=False):
"""Logistic map with random forcing. x1 is the random array and x2 is the
logistic map.
Parameters
----------
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,)
x2 : array
Array of length (ts_length,)
"""
x1 = np.random.rand(ts_length)*.4
x2 = np.zeros(ts_length)
if random_start:
x2[0] = .35 + .1 *np.random.rand()
else:
x2[0] = 0.4
for i in range(ts_length-1):
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def driving_sin(rx2, b12, ts_length, random_start=False):
"""Sine wave driving a logistic map.
Parameters
----------
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,) that stores the values of the x series.
x2 : array
Array of length (ts_length,) that stores the values of the y series.
"""
x1 = np.sin(np.linspace(0,100*np.pi,ts_length))*.4
x2 = np.zeros(ts_length)
if random_start:
x2[0] = .35 + .1 *np.random.rand()
else:
x2[0] = 0.4
for i in range(ts_length-1):
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def lagged_coupled_logistic(rx1, rx2, b12, b21, ts_length, random_start=False):
"""Coupled logistic map. x1 is driven by random lags of x2.
Parameters
----------
rx1 : float
Parameter that determines chaotic behavior of the x1 series.
rx2 : float
Parameter that determines chatotic behavior of the x2 series.
b12 : float
Influence of x1 on x2.
b21 : float
Influence of x2 on x1.
ts_length : int
Length of the calculated time series.
random_start : Boolean
Random initialization of starting conditions.
Returns
-------
x1 : array
Array of length (ts_length,) that stores the values of the x series.
x2 : array
Array of length (ts_length,) that stores the values of the y series.
"""
# Initial conditions after McCracken (2014)
x1 = np.zeros(ts_length)
x2 = np.zeros(ts_length)
if random_start:
x1[0] = .15 + .1*np.random.rand()
x2[0] = .35 + .1 *np.random.rand()
else:
x1[0] = 0.2
x2[0] = 0.4
for i in range(ts_length-1):
try:
randi = np.random.randint(1,10)
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i-randi])
except:
x1[i+1] = x1[i] * (rx1 - rx1 * x1[i] - b21 * x2[i])
x2[i+1] = x2[i] * (rx2 - rx2 * x2[i] - b12 * x1[i])
return x1,x2
def lorenz(sz=10000, noise=0, max_t=100.):
"""Integrates the lorenz equation.
Parameters
----------
sz : int
Length of the time series to be integrated.
noise : float
Amplitude of noise to be added to the lorenz equation.
max_t : float
Length of time to solve the lorenz equation over.
Returns
-------
X : 2D array
Solutions to the Lorenz equations. Columns are X,Y,Z.
"""
def lorenz_deriv(xyz, t0, sigma=10., beta=8./3, rho=28.0):
x,y,z = xyz
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
x0 = [1, 1, 1] # starting vector
t = np.linspace(0, max_t, sz) # one thousand time steps
X = integrate.odeint(lorenz_deriv, x0, t) + noise*np.random.rand(sz,3)
return X
|
from django.urls import path
from App_Shop import views
app_name='App_Shop'
urlpatterns = [
path('create_title/',views.create_category,name='catagory'),
path('add_product/<int:pk>/',views.create_product,name='add_product'),
path('add_another_product/',views.create_another_product,name='add_another_product'),
path('',views.Home.as_view(),name='home_product'),
path('product_details/<int:pk>/',views.ProductDetail.as_view(),name='product_detail'),
]
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# 文件目的:
# 创建日期:2017-12-30
# -------------------------------------------------------------------------
import hashlib
try:
import threading
except ImportError: # pragma: no cover
threading = None
from random import choice
from weixin.messages import MESSAGE_TYPES, UnknownMessage
from weixin.events import EVENT_TYPES, SubscribeScanEvent, SubscribeEvent, SUBSCRIBE_QRSCENE
try:
import xml.etree.cElementTree as Et
except ImportError:
import xml.etree.ElementTree as Et
def check_signature(token, timestamp, nonce, signature):
"""
验证微信服务器签名是否符合微信规则
:param token: 公众号令牌
:param timestamp: 时间戳
:param nonce: 随机字符串
:param signature: 微信加密签名
:return: 如果签名验证成功,则返回True;否则返回False
"""
lst = [token, timestamp, nonce]
# 将token, timestamp, nonce三个参数按升序进行排序
lst.sort()
# 将三个参数拼接成字符串并且sha1算法加密
sign = "".join(lst)
# Python3中sha1中在哈希前需要先将字符串转码
sha1 = hashlib.sha1(sign.encode('utf-8'))
# 加密后的字节转换为文本
hashcode = sha1.hexdigest()
# 将字节与服务器提供的微信加密签名进行比较。如两者相等,则校验通过
return hashcode == signature
def msgtodict(msg_xml):
"""
解析微信消息的XML文本,获取消息数据
:param msg_xml: 消息XML文本
:return: 消息数据字典
"""
props = {}
# 消息XML没有根节点
tree = Et.fromstring(msg_xml)
for child in tree:
props[child.tag] = child.text
return props
def rand_str(length, dictionary):
"""
生成指定长度的随机字符串
:param length: 字符串长度
:param dictionary: 字符字典
:return: 随机字符串
"""
return ''.join(choice(dictionary) for _ in range(length))
class Activator:
@staticmethod
def new_instance(class_name, *args, **kwargs):
"""
动态创建类的实例
:param class_name: 类的全名
:param args: 类构建器所需要的无名参数列表
:param kwargs: 类构造器所需要的有名参数字典
:return: 类的实例
<p>
http://blog.csdn.net/kongxx/article/details/65626418
[Example]
class_name = "weixin.message.TextInputMessage"
dic = {"ToUserName": "2323-232323", "FromUserName": "abdc-dsddss", "MsgId": "M-232322",
"MsgType": "text", "CreateTime": "232323", "Content": "微信发送的消息"}
msg = Activator.new_instance(class_name, dic)
</p>
"""
(modulename, clsname) = class_name.rsplit('.', 1)
modulemeta = __import__(modulename, globals(), locals(), [clsname])
clsmeta = getattr(modulemeta, clsname)
return clsmeta(*args, **kwargs)
class deprecated(object):
"""
打印函数已废弃的警告
>>> @deprecated()
... def f():
... pass
>>> f()
... f is deprectead.
"""
def _wrapper(self, *args, **kwargs):
self.count += 1
if self.count == 1:
print(self.func.__name__, 'is deprecated.')
return self.func(*args, **kwargs)
def __call__(self, func):
self.func = func
self.count = 0
return self._wrapper
class Lockable(object):
def __init__(self):
if threading:
self.lock = threading.RLock()
else: # pragma: no cover
self.lock = None
def acquire_lock(self):
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if self.lock:
self.lock.acquire()
def release_lock(self):
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if self.lock:
self.lock.release()
def parse_message(dic):
"""
将接收到的数据解析为具体的消息类型实例
:param dic: 接收到的数据
:return: 消息类型实例
"""
message_type = dic['MsgType'].lower()
if message_type == 'event':
return parse_event(dic)
message_class = MESSAGE_TYPES.get(message_type, UnknownMessage)
return message_class(dic)
def parse_event(dic):
# assert (dic['MsgType'].lower() == 'event')
event = dic["Event"].lower()
if event == "subscribe":
eventkey = dic.get("EventKey", "")
if eventkey.startswith(SUBSCRIBE_QRSCENE):
return SubscribeScanEvent(dic)
return SubscribeEvent(dic)
event_type = dic['MsgType'].lower()
event_class = EVENT_TYPES.get(event_type, UnknownMessage)
return event_class(dic)
|
import numpy as np
import theano
import theano.tensor as T
import sys, random
from theano_util import *
class MemNN:
def __init__(self, n_words=1000, n_embedding=100, lr=0.01, margin=0.1, n_epochs=100):
self.n_embedding = n_embedding
self.lr = lr
self.margin = margin
self.n_epochs = n_epochs
self.n_words = n_words
self.n_D = 2 * self.n_words
self.n_embedding = n_embedding
phi_x = T.vector('phi_x')
phi_f1 = T.vector('phi_f1')
phi_f1bar = T.vector('phi_f1bar')
# Supporting memories
phi_m0 = T.vector('phi_m0')
# True word
phi_r = T.vector('phi_r')
# False words
phi_rbar = T.vector('phi_rbar')
self.U_O = init_shared_normal(self.n_embedding, self.n_D, 0.01)
self.U_R = init_shared_normal(self.n_embedding, self.n_D, 0.01)
cost = self.calc_cost(phi_x, phi_f1, phi_f1bar, phi_m0, phi_r, phi_rbar)
params = [self.U_O, self.U_R]
gradient = T.grad(cost, params)
updates=[]
for param, gparam in zip(params, gradient):
updates.append((param, param - gparam * self.lr))
self.train_function = theano.function(inputs = [phi_x, phi_f1, phi_f1bar, phi_m0, phi_r, phi_rbar],
outputs = cost,
updates = updates)
phi_f = T.vector('phi_f')
score_o = self.calc_score_o(phi_x, phi_f)
self.predict_function_o = theano.function(inputs = [phi_x, phi_f], outputs = score_o)
score_r = self.calc_score_r(phi_x, phi_f)
self.predict_function_r = theano.function(inputs = [phi_x, phi_f], outputs = score_r)
def calc_score(self, phi_x, phi_y, U):
#return T.dot(T.dot(phi_x.T, self.U_O.T), T.dot(self.U_O, phi_y))
return T.dot(U.dot(phi_x), U.dot(phi_y))
def calc_score_o(self, phi_x, phi_y):
return self.calc_score(phi_x, phi_y, self.U_O)
def calc_score_r(self, phi_x, phi_y):
return self.calc_score(phi_x, phi_y, self.U_R)
def calc_cost(self, phi_x, phi_f1, phi_f1bar, phi_m0, phi_r, phi_rbar):
correct_score1 = self.calc_score_o(phi_x, phi_f1)
false_score1 = self.calc_score_o(phi_x, phi_f1bar)
correct_score2 = self.calc_score_r(phi_x + phi_m0, phi_r)
false_score2 = self.calc_score_r(phi_x + phi_m0, phi_rbar)
cost = (
T.maximum(0, self.margin - correct_score1 + false_score1) +
T.maximum(0, self.margin - correct_score2 + false_score2)
)
return cost
def train(self, dataset_bow, questions, num_words):
for epoch in xrange(self.n_epochs):
costs = []
random.shuffle(questions)
for i, question in enumerate(questions):
article_no = question[0]
line_no = question[1]
question_phi = question[2]
correct_stmt = question[4]
seq = [i for i in range(line_no)]
del seq[correct_stmt]
false_stmt = random.choice(seq)
#print article_no, line_no, correct_stmt, false_stmt
phi_x = np.zeros((self.n_D,))
phi_x[:num_words] = question_phi
phi_f1 = np.zeros((self.n_D,))
phi_f1[num_words:2*num_words] = dataset_bow[article_no][correct_stmt]
phi_f1bar = np.zeros((self.n_D,))
phi_f1bar[num_words:2*num_words] = dataset_bow[article_no][false_stmt]
if article_no == 0 and line_no == 2:
corr_score = self.predict_function(phi_x, phi_f1)
fals_score = self.predict_function(phi_x, phi_f1bar)
print "[BEFORE] corr score: %f, false score: %f" % (corr_score, fals_score)
cost = self.train_function(phi_x, phi_f1, phi_f1bar)
costs.append(cost)
if article_no == 0 and line_no == 2:
corr_score = self.predict_function(phi_x, phi_f1)
fals_score = self.predict_function(phi_x, phi_f1bar)
print "[ AFTER] corr score: %f, false score: %f" % (corr_score, fals_score)
if epoch % 100 == 0:
# print 'Epoch %i/%i' % (epoch + 1, self.n_epochs), np.mean(costs)
sys.stdout.flush()
# print np.mean(costs), np.mean(self.U_O.get_value()), np.max(self.U_O.get_value()), np.min(self.U_O.get_value())
def predict(self, dataset, questions):
correct_answers = 0
wrong_answers = 0
for i, question in enumerate(questions):
article_no = question[0]
line_no = question[1]
question_phi = question[2]
correct_stmt = question[4]
phi_x = np.zeros((self.n_D,))
phi_x[:num_words] = question_phi
answer = -1
max_score = -99999
for i in range(line_no):
phi_f = np.zeros((self.n_D,))
phi_f[num_words:2*num_words] = dataset[article_no][i]
#print phi_x, phi_f
score = self.predict_function(phi_x, phi_f)
if answer == -1 or score > max_score:
max_score = score
answer = i
if answer == correct_stmt:
correct_answers += 1
else:
wrong_answers += 1
print '%d correct, %d wrong' % (correct_answers, wrong_answers)
if __name__ == "__main__":
training_dataset = sys.argv[1]
test_dataset = training_dataset.replace('train', 'test')
dataset, questions, word_to_id, num_words = parse_dataset(training_dataset)
memNN = MemNN(n_words=num_words, n_embedding=100, lr=0.01, n_epochs=10, margin=1.0, word_to_id=word_to_id)
memNN.train(dataset, questions)
test_dataset, test_questions, _, _ = parse_dataset(test_dataset, word_id=num_words, word_to_id=word_to_id, update_word_ids=False)
memNN.predict(test_dataset, test_questions)
|
#!usr/bin/python
given=600851475143
primes=[2]
def nextPrime():
global primes
n=primes[-1]
found=False
test=n+1
while not found:
for prime in primes:
divides= bool(test%prime==0)
broken=False
if divides:
broken=True
break
if not broken:
primes.append(test)
break
else:
test+=1
def hPrime(n):
i=0
found=False
while not found:
while True:
if n%primes[i]==0:
n//= primes[i]
if n==1:
return primes[i]
else:
break
if primes[i]==primes[-1]:
nextPrime()
i+=1
print hPrime(given)
|
from flask import render_template, request, redirect, url_for
from flask_script import Manager
from mainapp import app
from mainapp.views import user_v, logger_v
from models.user import db, User
from utils import cache
# 钩子函数
@app.before_request
def check_login():
app.logger.info(request.path + '被访问了')
if request.path not in ['/user/login',
'/log','/selflog']:
# 判断request中是否包含token
# 验证token是否有效
token = request.cookies.get('token')
if not token:
return redirect(url_for('userBlue.login'))
else:
user_id = cache.get_user_id(token)
if not user_id:
return redirect(url_for('userBlue.login'))
@app.route('/create_db')
def create_database():
db.create_all()
return "创建数据库中的所有模型表成功"
@app.route('/drop_db')
def drop_database():
db.drop_all()
return "删除数据库中的所有模型表成功"
@app.route('/selflog')
def selflog():
return "自己的日志文件喔"
@app.route('/')
def index():
# 获取用户登录信息
token = request.cookies.get('token')
user_id = cache.get_user_id(token)
user = User.query.get(int(user_id))
return render_template('index.html', user=user)
if __name__ == '__main__':
# 将蓝图注册到app中
app.register_blueprint(user_v.blue, url_prefix='/user')
app.register_blueprint(logger_v.blue)
# 初始化数据库
db.init_app(app)
manager = Manager(app)
manager.run()
|
#!/usr/bin/python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
from pytube import YouTube
import moviepy.editor as mp
import os
from flask import Flask, jsonify, request
import socket
import pprint as p
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = ""
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
app = Flask(__name__)
@app.route("/")
def baixa_musica():
try:
youtube_search(request.json)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
return jsonify({'teste':'ok'})
def youtube_search(options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
musica = options['musica'] + ' audio'
#musica = 'kane brown Like a Rodeo audio'
search_response = youtube.search().list(
q=musica,
part="id,snippet",
maxResults=1
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("%s - https://www.youtube.com/watch?v=%s" % (search_result["snippet"]["title"],
search_result["id"]["videoId"]))
# elif search_result["id"]["kind"] == "youtube#playlist":
# playlists.append("%s (%s)" % (search_result["snippet"]["title"],
# search_result["id"]["playlistId"]))
try:
YouTube('http://youtube.com/watch?v=' + search_result["id"]["videoId"]).streams.first().download()
pasta = '/home/washington/PycharmProjects/Youtube/'
caminhos = [os.path.join(pasta, nome) for nome in os.listdir(pasta)]
arquivos = [arq for arq in caminhos if os.path.isfile(arq)]
musica_mp4 = [arq for arq in arquivos if arq.lower().endswith(".mp4")]
print(musica_mp4)
musica_mp3 = mp.VideoFileClip(musica_mp4[0]).subclip(0, 50)
print(musica_mp3)
musica_mp3.audio.write_audiofile(musica_mp4[0][:-1]+'3')
except HttpError as e:
print(e)
if __name__ == "__main__":
#ip_host = socket.gethostbyname(socket.gethostname())
# argparser.add_argument("--q", help="Search term", default="eminem the real slim shady audio")
# argparser.add_argument("--max-results", help="Max results", default=1)
# args = argparser.parse_args()
app.run(host='192.168.2.75', port=5000)
# try:
# youtube_search(args)
# except HttpError as e:
# print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
import collections.abc
from datetime import datetime
import math
import os
from itertools import takewhile
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
import torch
from torch.utils.tensorboard import SummaryWriter
from fpua.models.fetchers import single_input_single_output
from fpua.models.forwarders import basic_forward
def train_single_epoch(model, data_loader, optimizer, criterion, device, clip_gradient_at=5.0,
fetch_model_data=single_input_single_output, feed_model_data=basic_forward, loss_names=None,
log_interval=25, mtll_model=None, num_main_losses=None, **kwargs):
"""General training function to train a PyTorch model for a single epoch.
Arg(s):
model - PyTorch model.
data_loader - Batch generator for model training.
optimizer - Model optimizer.
criterion - Specific loss function for the given model. This function receives as input the output of
model and the ground-truth target, and returns a list of batch losses (for multi-loss models). Even if
the model has a single loss, the return value of criterion must be a list containing this single loss.
device - Which device to use for model training. Either cuda or cpu.
clip_gradient_at - If nonzero clips the norm of the gradient vector at the specified value. The gradient
vector is a vector obtained by concatenating all parameters of the model.
fetch_model_data - Function to fetch the input and output tensors for the model.
feed_model_data - Function to feed the input tensors to the model.
loss_names - Names for the individual losses output by criterion. If None, the losses are named loss_1,
loss_2, ....
log_interval - Print training statistics every log_interval batches.
**kwargs - Any extra parameter that needs to be passed to the feed_model_data of a model.
"""
model.train()
if mtll_model is not None:
mtll_model.train()
loss_names = loss_names if loss_names is not None else ['loss_' + str(n) for n in range(1, 101)]
num_examples = len(data_loader.dataset)
for batch_idx, dataset in enumerate(data_loader):
data, target = fetch_model_data(dataset, device=device)
optimizer.zero_grad()
output = feed_model_data(model, data, **kwargs)
losses = criterion(output, target, reduction='mean')
if mtll_model is not None:
losses = mtll_model(losses)
loss = sum(losses)
loss.backward()
if clip_gradient_at:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=clip_gradient_at)
optimizer.step()
log_now, is_last_batch = (batch_idx % log_interval) == 0, batch_idx == (len(data_loader) - 1)
if log_now or is_last_batch:
num_main_losses = num_main_losses if num_main_losses is not None else len(losses)
loss = sum(losses[-num_main_losses:])
batch_initial_example_idx = min((batch_idx + 1) * data_loader.batch_size, num_examples)
epoch_progress = 100 * (batch_idx + 1) / len(data_loader)
print(f'(Train) Batch [{batch_initial_example_idx:6d}/{num_examples:6d} ({epoch_progress:3.0f}%)] ',
f'Loss: {loss.item(): 8.4f}', end='')
for loss_name, single_loss in zip(loss_names[-num_main_losses:], losses[-num_main_losses:]):
print(f' {loss_name}: {single_loss: 6.4f}', end='')
print()
def train(model, train_loader, optimizer, criterion, epochs, device, clip_gradient_at=5.0,
fetch_model_data=single_input_single_output, feed_model_data=basic_forward, loss_names=None,
val_loader=None, early_stopping=False, initial_epoch=1, mtll_model=None, print_raw_losses=False,
evaluate_train_like_test=False, num_main_losses=None, **kwargs):
"""General training function to train a PyTorch model.
If validation data is not given, the returned checkpoint is the one obtained after training the model for the
specified number of epochs, regardless of the final training loss. If validation data is given, the checkpoint
returned is the one with the lowest validation loss, which could have been obtained in some epoch before the
last one.
Arg(s):
model - PyTorch model.
train_loader - Batch generator for model training.
optimizer - Model optimizer.
criterion - Specific loss function for the given model. This function receives as input the output of
model and the ground-truth target, and returns a list of batch losses (for multi-loss models). Even if
the model has a single loss, the return value of criterion must be a list containing this single loss.
epochs - Maximum number of epochs for model training.
device - Which device to use for model training. Either cuda or cpu.
clip_gradient_at - If nonzero clips the norm of the gradient vector at the specified value. The gradient
vector is a vector obtained by concatenating all parameters of the model.
fetch_model_data - Function to fetch the input and output tensors for the model.
feed_model_data - Function to feed the input tensors to the model.
loss_names - Names for the individual losses output by criterion. If None, the losses are named loss_1,
loss_2, ....
val_loader - Batch generator for model validation.
early_stopping - TO DO.
**kwargs - Any extra parameters to be passed during training.
Returns:
A dictionary containing the history of train losses, the model's weights and associated epoch, and if
val_loader is specified, the history of validation losses as well.
"""
log_dir = kwargs.get('tensorboard_log_dir', None)
writer = SummaryWriter(log_dir) if log_dir is not None else None
checkpoint = {}
train_losses, val_losses, train_raw_losses, val_raw_losses = [], [], [], []
val_loss = float('Inf')
for epoch in range(initial_epoch, epochs + initial_epoch):
# Train
test_mode = kwargs.pop('test_mode', None)
print(f'\nEpoch: [{epoch:4d}/{epochs + initial_epoch - 1:4d}]')
train_single_epoch(model, data_loader=train_loader, optimizer=optimizer, criterion=criterion,
device=device, clip_gradient_at=clip_gradient_at, fetch_model_data=fetch_model_data,
feed_model_data=feed_model_data, loss_names=loss_names, log_interval=25,
mtll_model=mtll_model, num_main_losses=num_main_losses, **kwargs)
if evaluate_train_like_test:
temperature, slope = kwargs.get('temperature', 1.0), kwargs.get('slope', 1.0)
teacher_prob = 0.0 if kwargs.get('teacher_prob') is not None else None
current_train_loss, current_train_losses, current_train_raw_loss, current_train_raw_losses = \
test(model, data_loader=train_loader, criterion=criterion,
device=device, fetch_model_data=fetch_model_data,
feed_model_data=feed_model_data, loss_names=loss_names,
test_set_name='Train', mtll_model=mtll_model,
print_raw_losses=print_raw_losses, num_main_losses=num_main_losses,
temperature=temperature, slope=slope, teacher_prob=teacher_prob)
else:
current_train_loss, current_train_losses, current_train_raw_loss, current_train_raw_losses = \
test(model, data_loader=train_loader, criterion=criterion,
device=device, fetch_model_data=fetch_model_data,
feed_model_data=feed_model_data, loss_names=loss_names,
test_set_name='Train', mtll_model=mtll_model,
print_raw_losses=print_raw_losses, num_main_losses=num_main_losses, **kwargs)
train_losses.append([current_train_loss, current_train_losses])
if mtll_model is not None:
train_raw_losses.append([current_train_raw_loss, current_train_raw_losses])
num_main_losses = num_main_losses if num_main_losses is not None else len(current_train_losses)
if writer is not None:
base_str = 'Loss/train_mtll/' if mtll_model is not None else 'Loss/train/'
for loss_name, loss in zip(loss_names[-num_main_losses:], current_train_losses):
writer.add_scalar(base_str + loss_name, loss, epoch)
writer.add_scalar(base_str + 'total', current_train_loss, epoch)
if mtll_model is not None:
loss_weights = mtll_model.get_weights()
for loss_name, raw_loss, loss_weight in zip(loss_names[-num_main_losses:],
current_train_raw_losses, loss_weights):
writer.add_scalar('Loss/train/' + loss_name, raw_loss, epoch)
writer.add_scalar('Loss/mtll_weight/' + loss_name, loss_weight, epoch)
writer.add_scalar('Loss/train/total', current_train_raw_loss, epoch)
# Validate
kwargs['test_mode'] = test_mode
if val_loader is not None:
temperature, slope = kwargs.get('temperature', 1.0), kwargs.get('slope', 1.0)
teacher_prob = 0.0 if kwargs.get('teacher_prob') is not None else None
test_mode = kwargs.get('test_mode', False)
current_val_loss, current_val_losses, current_val_raw_loss, current_val_raw_losses = \
test(model, data_loader=val_loader, criterion=criterion,
device=device, fetch_model_data=fetch_model_data,
feed_model_data=feed_model_data, loss_names=loss_names,
test_set_name='Validation', mtll_model=mtll_model,
print_raw_losses=print_raw_losses, num_main_losses=num_main_losses,
temperature=temperature, slope=slope, teacher_prob=teacher_prob, test_mode=test_mode)
val_losses.append([current_val_loss, current_val_losses])
if mtll_model is not None:
val_raw_losses.append([current_val_raw_loss, current_val_raw_losses])
if writer is not None:
base_str = 'Loss/val_mtll/' if mtll_model is not None else 'Loss/val/'
for loss_name, loss in zip(loss_names[-num_main_losses:], current_val_losses):
writer.add_scalar(base_str + loss_name, loss, epoch)
writer.add_scalar(base_str + 'total', current_val_loss, epoch)
if mtll_model is not None:
for loss_name, raw_loss in zip(loss_names[-num_main_losses:], current_val_raw_losses):
writer.add_scalar('Loss/val/' + loss_name, raw_loss, epoch)
writer.add_scalar('Loss/val/total', current_val_raw_loss, epoch)
if current_val_loss < val_loss:
val_loss = current_val_loss
checkpoint['epoch'] = epoch
checkpoint['model_state_dict'] = model.state_dict()
if mtll_model is not None:
checkpoint['mtll_model_state_dict'] = mtll_model.state_dict()
else:
checkpoint['epoch'] = epoch
checkpoint['model_state_dict'] = model.state_dict()
if mtll_model is not None:
checkpoint['mtll_model_state_dict'] = mtll_model.state_dict()
_update_kwargs(kwargs, epoch, epochs)
checkpoint['train_losses'] = train_losses
checkpoint['val_losses'] = val_losses
checkpoint['train_raw_losses'] = train_raw_losses
checkpoint['val_raw_losses'] = val_raw_losses
if 'temperature' in kwargs:
checkpoint['temperature'] = kwargs['temperature']
if 'slope' in kwargs:
checkpoint['slope'] = kwargs['slope']
if writer is not None:
writer.close()
return checkpoint
def _update_kwargs(kwargs, epoch, epochs):
if 'temperature' in kwargs:
kwargs['temperature'] = max(0.1, kwargs['temperature'] * 0.90)
if 'slope' in kwargs:
kwargs['slope'] = min(5.0, 1.0 + 0.04 * epoch)
if kwargs.get('teacher_schedule') is not None:
# Since we update the teacher_prob after the first epoch, teacher_prob for the first epoch is the one
# initially passed by the user to train.
teacher_schedule = kwargs['teacher_schedule']
if teacher_schedule == 'linear':
# First half with some teacher help, second half on its own
kwargs['teacher_prob'] = max(0, 1 - (2 / epochs) * epoch)
elif teacher_schedule == 'exponential':
kwargs['teacher_prob'] = 0.9 ** epoch
elif teacher_schedule == 'inverse_sigmoid':
kwargs['teacher_prob'] = 10 / (10 + math.exp(3 * epoch / 10))
elif teacher_schedule == 'random':
kwargs['teacher_prob'] = 0.5
elif teacher_schedule == 'always':
kwargs['teacher_prob'] = 1.0
else:
kwargs['teacher_prob'] = 0.0
def test(model, data_loader, criterion, device, fetch_model_data=single_input_single_output,
feed_model_data=basic_forward, loss_names=None, test_set_name='Test', mtll_model=None,
print_raw_losses=False, num_main_losses=None, **kwargs):
"""General testing function to test a PyTorch model.
Arg(s):
model - PyTorch model.
data_loader - Batch generator for model testing.
criterion - Specific loss function for the given model. This function receives as input the output of
model and the ground-truth target, and returns a list of batch losses (for multi-loss models). Even if
the model has a single loss, the return value of criterion must be a list containing this single loss.
device - Which device to use for model testing. Either cuda or cpu.
fetch_model_data - Function to fetch the input and output tensors for the model.
feed_model_data - Function to feed the input tensors to the model.
loss_names - Names for the individual losses output by criterion. If None, the losses are named loss_1,
loss_2, ....
test_set_name - Optional name given to the set being evaluated. Useful for logging purposes.
num_main_losses - The final test loss is the sum of all non-auxiliary losses. Auxiliary losses should be in
the beginning of the output list.
**kwargs - Any extra parameters that need to be passed to the feed_model_data function.
Returns:
The model loss.
"""
model.eval()
if mtll_model is not None:
mtll_model.eval()
test_raw_losses = None
test_losses = None
with torch.no_grad():
for dataset in data_loader:
data, target = fetch_model_data(dataset, device=device)
output = feed_model_data(model, data, **kwargs)
raw_losses = criterion(output, target, reduction='mean')
if mtll_model is not None:
if test_raw_losses is None:
test_raw_losses = [raw_loss.item() for raw_loss in raw_losses]
else:
test_raw_losses = [test_raw_loss + raw_loss.item()
for test_raw_loss, raw_loss in zip(test_raw_losses, raw_losses)]
losses = mtll_model(raw_losses)
else:
losses = raw_losses
if test_losses is None:
test_losses = [loss.item() for loss in losses]
else:
test_losses = [test_loss + loss.item() for test_loss, loss in zip(test_losses, losses)]
num_main_losses = num_main_losses if num_main_losses is not None else len(test_losses)
test_losses = [test_loss / len(data_loader) for test_loss in test_losses][-num_main_losses:]
total_test_loss = sum(test_losses)
name_fmt_str = '({})'
loss_fmt_str = 'Loss: {: 7.4f}'
print(name_fmt_str.format(test_set_name).rjust(12, ' '), loss_fmt_str.format(total_test_loss), end='')
loss_names = loss_names[-num_main_losses:] if loss_names is not None else ['loss_' + str(n) for n in range(1, 101)]
for loss_name, loss in zip(loss_names, test_losses):
print(' ', loss_name + ':', '{: 6.4f}'.format(loss), end='')
print()
total_test_raw_loss = None
if test_raw_losses is not None:
test_raw_losses = [test_raw_loss / len(data_loader) for test_raw_loss in test_raw_losses][-num_main_losses:]
total_test_raw_loss = sum(test_raw_losses)
if print_raw_losses:
name_fmt_str = '({})'
loss_fmt_str = 'Loss: {: 7.4f}'
print(name_fmt_str.format(test_set_name).rjust(12, ' '), loss_fmt_str.format(total_test_raw_loss), end='')
loss_names = loss_names[-num_main_losses:] if loss_names is not None else ['loss_' + str(n)
for n in range(1, 101)]
for loss_name, raw_loss in zip(loss_names, test_raw_losses):
print(' ', loss_name + ':', '{: 6.4f}'.format(raw_loss), end='')
print()
return total_test_loss, test_losses, total_test_raw_loss, test_raw_losses
def normalise(x, strategy='standard', with_centering=True, quantile_range=(25.0, 75.0), scaler=None):
"""Normalises input n-dimensional tensor according to selected strategy.
Arg(s):
x - n-dimensional ndarray to normalise.
strategy - One of standard, min_max, or robust.
scaler - If given, ignores the selected strategy and uses the given scaler to normalise the input tensor.
Otherwise, creates a scaler from the selected strategy and normalises the input tensor.
Returns:
The normalised input tensor and scaler used.
"""
x_shape = x.shape
x = x.reshape(-1, x_shape[-1])
if scaler is not None:
x = scaler.transform(x)
else:
scaler = select_scaler(strategy, with_centering=with_centering, quantile_range=quantile_range)
x = scaler.fit_transform(x)
x = x.reshape(*x_shape)
return x, scaler
def nan_to_value(x, value, inplace=True):
"""Transform any NaN entry in x to value.
Arg(s):
x - An ndarray.
value - A value to substitute NaNs in x for.
inplace - Whether to perform the substitution in place or not.
Returns:
The input ndarray with NaN values transformed into value.
"""
if not inplace:
x = np.copy(x)
x[np.isnan(x)] = value
return x
def select_scaler(strategy, with_centering=True, quantile_range=(25.0, 75.0)):
"""Returns an instance of a *Scaler, selected according to input strategy.
Arg(s):
strategy - One of standard, min_max, or robust.
with_centering - If True, subtract the mean of the data from the data in case strategy is standard, or
subtract the median of the data from the data in case strategy is robust.
quantile_range - In case the strategy is robust, divide the data by the this quantile range.
Returns:
An instance of a *Scaler. * is one of Standard, MinMax, or Robust.
"""
assert strategy in {'standard', 'min_max', 'robust'}, 'strategy must be one of: standard, min_max, or robust.'
scalers = {'standard': StandardScaler(with_mean=with_centering),
'min_max': MinMaxScaler(),
'robust': RobustScaler(with_centering=with_centering, quantile_range=quantile_range),
}
scaler = scalers[strategy]
return scaler
def numpy_to_torch(*arrays, device='cpu'):
"""Convert any number of numpy arrays to PyTorch tensors."""
return [torch.from_numpy(array).to(device) for array in arrays]
def save_checkpoint(log_dir, checkpoint, checkpoint_name=None, include_timestamp=True):
"""Save model checkpoint.
Arg(s):
log_dir - Directory to save checkpoint file.
checkpoint - A dictionary containing the model checkpoint and other metadata such as data scalers and
model creation arguments.
checkpoint_name - If given, use that as the file name to save. Otherwise, the file name is 'checkpoint'.
"""
file_save_name = checkpoint_name if checkpoint_name is not None else 'checkpoint'
if include_timestamp:
time_now = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S')
file_save_name = time_now + '_' + file_save_name
file_save_name += '.tar'
file_save_path = os.path.join(log_dir, file_save_name)
torch.save(checkpoint, file_save_path)
print('log files written to %s' % file_save_path)
def grab_subset(*args, n=5):
"""Grab n examples from tensors in a list.
We assume that the first dimension of the tensor is the batch dimension.
Arg(s):
args - List of tensors to grab a subset of each of them.
n - Number of examples to grab from the tensors.
Returns:
A list of tensors, where each tensor contains only a number of examples from the original tensor.
"""
return [tensor[:n] for tensor in args]
def create_alias(hidden_size, epochs, batch_size, input_seq_len, output_seq_len, length_activation,
learning_rate=None, transition_learning_rate=None, nc=None, embedding_size=None,
loss_weights=None, teacher_schedule=None, validation_data=None, l2_reg=0.0,
multi_task_loss_learner=False, num_layers=1, normalisation=None, quantile_range=None,
optimizer=None, input_normalisation=None, obs_at_least_k_percent=None, share_encoder_decoder=False,
share_embeddings=False, share_predictions=False, disable_parent_input=None, disable_encoder_loss=None,
embedding_nonlinearity=None, mask_softmax=False, positional_embedding=False,
add_skip_connection=False, weight_initialisation='pytorch', clip_gradient_at=5.0,
use_plain_gru_cell=None, disable_transition_layer=False, use_hmgruv2_cell=False,
disable_gradient_from_child=False, use_lstm_cell=False, weight_decay_decoder_only=False,
pretrain_coarse=0, model_v2=False, model_v3=False, do_not_reset_after_flush=False,
always_include_parent_state=False, with_final_action=False,
baseline_type=None, action_level=None, test_mode=False, input_soft_parent=False):
checkpoint_name = 'hs'
if isinstance(hidden_size, collections.abc.Sequence):
for hs in hidden_size:
checkpoint_name += '-' + str(hs)
else:
checkpoint_name += str(hidden_size)
checkpoint_name += '_' + str(epochs) + 'e_' + 'bs' + str(batch_size)
if isinstance(length_activation, collections.abc.Sequence) and not isinstance(length_activation, str):
if length_activation[0] == length_activation[1]:
length_activation = length_activation[:1]
checkpoint_name += '_act'
for la in length_activation:
checkpoint_name += '-' + la
else:
checkpoint_name += '_act-' + length_activation
if pretrain_coarse:
checkpoint_name += '_pce' + str(pretrain_coarse)
if num_layers:
checkpoint_name += '_h' + str(num_layers)
if learning_rate is not None:
checkpoint_name += '_lr' + '{:.0e}'.format(learning_rate)
if transition_learning_rate is not None:
checkpoint_name += '_tlr' + '{:.0e}'.format(transition_learning_rate)
if optimizer is not None:
checkpoint_name += '_opt-' + str(optimizer)
if share_embeddings:
checkpoint_name += '_sh-emb'
if share_encoder_decoder:
checkpoint_name += '_sh-ed'
if share_predictions:
checkpoint_name += '_sh-pred'
if embedding_size is not None:
checkpoint_name += '_es'
if isinstance(embedding_size, collections.abc.Sequence):
for es in embedding_size:
if isinstance(es, collections.abc.Sequence):
checkpoint_name += '-' + str(max(es))
else:
checkpoint_name += '-' + str(es)
else:
checkpoint_name += str(embedding_size)
if embedding_nonlinearity is not None:
checkpoint_name += '_' + embedding_nonlinearity
if positional_embedding:
checkpoint_name += '_pos-emb'
if l2_reg:
checkpoint_name += '_l2r' + '{:.0e}'.format(l2_reg)
if weight_decay_decoder_only:
checkpoint_name += '-dec'
if teacher_schedule is not None:
checkpoint_name += '_ts' + teacher_schedule
if multi_task_loss_learner:
checkpoint_name += '_mtll'
if input_normalisation is not None:
checkpoint_name += '_istd-' + str(input_normalisation)
if input_normalisation == 'robust' and quantile_range is not None:
for qr in quantile_range:
checkpoint_name += '-' + str(qr)
if normalisation is not None:
if normalisation == 'robust':
checkpoint_name += '_robust'
if quantile_range is not None:
for qr in quantile_range:
checkpoint_name += '-' + str(qr)
elif normalisation == 'min_max':
checkpoint_name += '_min-max'
elif normalisation == 'standard':
checkpoint_name += '_std'
if disable_parent_input is not None:
if disable_parent_input:
checkpoint_name += '_rm-par-inp'
if disable_encoder_loss is not None:
if disable_encoder_loss:
checkpoint_name += '_rm-enc-loss'
if mask_softmax:
checkpoint_name += '_mask-sm'
if add_skip_connection:
checkpoint_name += '_skip-con'
if weight_initialisation != 'pytorch':
checkpoint_name += '_winit-' + weight_initialisation
if clip_gradient_at != 5.0:
checkpoint_name += '_gclip-' + str(clip_gradient_at)
if use_plain_gru_cell is not None and use_plain_gru_cell:
checkpoint_name += '_use-gru'
elif use_hmgruv2_cell:
checkpoint_name += '_use-hmgruv2'
elif use_lstm_cell:
checkpoint_name += '_use-lstm'
if disable_transition_layer:
checkpoint_name += '_rm-tl'
if disable_gradient_from_child:
checkpoint_name += '_rm-childgrad'
if model_v2:
checkpoint_name += '_mv2'
elif model_v3:
checkpoint_name += '_mv3'
if input_soft_parent:
checkpoint_name += '_isp'
if do_not_reset_after_flush:
checkpoint_name += '_nrh'
if always_include_parent_state:
checkpoint_name += '_aips'
if with_final_action:
checkpoint_name += '_wfa'
if baseline_type is not None:
checkpoint_name += '_bt' + str(baseline_type)
if baseline_type == 0 and action_level is not None:
checkpoint_name += action_level
if test_mode:
checkpoint_name += '_tm'
if loss_weights is not None:
checkpoint_name += '_lw'
for loss_w in loss_weights:
checkpoint_name += '-' + str(loss_w)
checkpoint_name += '_isq' + str(input_seq_len) + '_osq' + str(output_seq_len)
if nc is not None:
checkpoint_name += '_nc' + str(nc)
if obs_at_least_k_percent is not None:
checkpoint_name += '_atleast' + str(obs_at_least_k_percent)
if validation_data is not None:
checkpoint_name += '_with-val'
return checkpoint_name
def extract_info_from_str(training_data):
avg_num_actions_per_video, nc, fa, obs_at_least = None, None, None, None
avg_idx = training_data.find('avg')
if avg_idx != -1:
start_idx = avg_idx + 3
sub_str = training_data[start_idx:]
sub_str = list(takewhile(lambda x: x not in {'_', '.'}, sub_str))
avg_num_actions_per_video = ''.join(sub_str)
nc_idx = training_data.find('nc')
if nc_idx != -1:
start_idx = nc_idx + 2
sub_str = training_data[start_idx:]
sub_str = list(takewhile(lambda x: x not in {'_', '.'}, sub_str))
nc = ''.join(sub_str)
fa_idx = training_data.find('_fa')
if fa_idx != -1:
start_idx = fa_idx + 3
sub_str = training_data[start_idx:]
sub_str = list(takewhile(lambda x: x not in {'_', '.'}, sub_str))
fa = ''.join(sub_str)
obs_at_least_idx = training_data.find('_atleast')
if obs_at_least_idx != -1:
start_idx = obs_at_least_idx + len('_atleast')
sub_str = training_data[start_idx:]
sub_str = list(takewhile(lambda x: x not in {'_', '.'}, sub_str))
obs_at_least = ''.join(sub_str)
return avg_num_actions_per_video, nc, fa, obs_at_least
def one_hot_to_index(x):
original_shape = x.shape
x = x.reshape(-1, original_shape[-1])
na_indices = np.nansum(x, axis=-1, keepdims=True)
na_indices[na_indices == 0.0] = np.nan
x = nan_to_value(x, value=-1.0)
x_new = np.nanargmax(x, axis=-1).reshape(-1, 1)
x_new = x_new * na_indices
x_new = x_new.reshape(*original_shape[:-1], 1)
return x_new
def num_workers_from_batch_size(batch_size):
"""Select number of workers for PyTorch's DataLoader class according to the batch size.
Arg(s):
batch_size - Batch size for model training.
Returns:
The number of workers for PyTorch's DataLoader class according to the input batch size.
"""
num_workers = 0
if 64 < batch_size <= 512:
num_workers = 2
elif 512 < batch_size < 2048:
num_workers = 4
elif 2048 <= batch_size:
num_workers = 8
return num_workers
def set_initial_teacher_prob(teacher_schedule):
"""Set initial probability for teacher forcing depending on the schedule.
Arg(s):
teacher_schedule - String containing the name of the schedule.
Returns:
The initial probability according to the input schedule.
"""
if teacher_schedule == 'inverse_sigmoid':
return 0.9
elif teacher_schedule == 'random':
return 0.5
return 1.0
def maybe_denormalise(y, scaler):
y_shape = y.shape
y = y.reshape(-1, y_shape[-1])
if scaler is not None:
y = scaler.inverse_transform(y)
y = y.reshape(*y_shape)
return y
def logit2one_hot(y_dec_logits):
"""Translate a tensor of logits to a one-hot representation.
Given a tensor of shape (batch_size, num_categories) containing the logits of each category, create a one-hot
tensor of shape (batch_size, num_categories).
Arg(s):
y_dec_logits - Tensor of shape (batch_size, num_categories).
Returns:
A one-hot representation of y_dec_logits.
"""
y_dec_cat = torch.zeros_like(y_dec_logits)
indices = torch.argmax(y_dec_logits, dim=-1).long()
y_dec_cat[range(y_dec_cat.size(0)), indices] = 1.0
return y_dec_cat
|
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import argparse
import subprocess
def main(args):
input = args.input
N = args.size # size of USPTO-15k training set = 9236
scores = np.empty(N)
X = np.load(args.dir+'hidden_states_b' + str(input) + '.npy')
X = np.mean(X, axis=0) # average across all tokens to return vector of size 256
if args.type=='dotproduct': # prenormalize
X = X / np.linalg.norm(X)
for i in tqdm(range(N)):
Y = np.load(args.dir+'hidden_states_b' + str(i) + '.npy')
Y = np.mean(Y, axis=0)
if args.type == 'dotproduct':
Y = Y / np.linalg.norm(Y)
scores[i] = np.vdot(X, Y)
else:
dist = np.linalg.norm(X - Y)
scores[i]=1/(1+dist)
inds = np.argpartition(scores, -10)[-10:] # index of top-10 scoring reactions (should contain original)
print('\nOriginal reaction:')
bashCommand = "sed '" + str(np.argmax(scores) + 1) + "q;d' "+args.dataset
subprocess.call(bashCommand, shell=True)
print('\nMost similar reactions:')
for ind in inds[np.argsort(scores[inds])]:
bashCommand = "sed '" + str(ind + 1) + "q;d' "+args.dataset
subprocess.call(bashCommand, shell=True)
print('score = {:.3f}'.format(scores[ind]))
if args.plot:
plt.hist(scores, bins=1000)
plt.ylabel('Frequency')
if args.type=='dotproduct':
plt.xlabel('normalized dot-product')
plt.savefig('dot_'+str(input)+'_hist.png')
else:
plt.xlabel('1/1+euc-dist')
plt.savefig('dist_'+str(input)+'_hist.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-input', type=int, default=481,
help='Index of hiddenstate to compare against.')
parser.add_argument('-size', type=int, default=9236,
help='Size of training set to search through.')
parser.add_argument('-type', type=str, default='dist',
help='Method for calculating similarity', choices=['dist', 'dotproduct'])
parser.add_argument('-dir', type=str, default='../../data/data/USPTO_15k/hidden_states/',
help='Location of saved hidden state .npy arrays.')
parser.add_argument('-dataset', type=str, default='USPTO-15k.txt',
help='Location of untokenized reaction dataset for extraction of reactions.')
parser.add_argument('-plot', action='store_true',
help='Whether or not to plot histogram of similarity scores.')
args = parser.parse_args()
main(args)
|
from rest_framework import serializers
from .models import Personas
def validar_edad(source):
if source <= 100:
pass
else:
raise serializers.ValidationError("No hay nadie mayor a 100")
pass
class PeopleGetName(serializers.Serializer):
nombre = serializers.CharField(max_length=100)
class PersonasCreationSerlializer(serializers.Serializer) :
nombre = serializers.CharField(max_length=100)
edad = serializers.IntegerField(validators=[validar_edad])
sexo = serializers.CharField(max_length=5)
tipo_de_persona = serializers.CharField(max_length=50)
def create(self, validated_data):
return Personas.objects.create(**validated_data)
class PersonasSerializer(serializers.ModelSerializer):
class Meta:
model = Personas
fields = '__all__'
# fields = ['nombre','edad','sexo','tipo_de_persona','id']
class PersonasModifieSerliazer(serializers.Serializer) :
tipo_de_persona = serializers.CharField(max_length=50)
# def create(self, validated_data):
# return Personas.objects.create(**validated_data)
# def update(self, instance, validated_data):
# instance.tipo_de_persona = validated_data.get('tipo_de_persona', instance.tipo_de_persona)
# return instance
# class PersonasSerializer(serializers.ModelSerializer):
# class Meta:
# model = Personas
# # fields = '__all__'
# fields = ('nombre','edad','sexo','tipo_de_persona')
|
import sys
from magma import *
from mantle.xilinx.spartan6.RAM import RAM128
from loam.shields.megawing import MegaWing
megawing = MegaWing()
megawing.Clock.on()
megawing.Switch.on(7)
megawing.LED.on(1)
main = megawing.main()
ram = RAM128(64*[0,1])
ADDR = main.SWITCH[0:7]
wire(ram(ADDR, 0, 0), main.LED[0])
compile(sys.argv[1], main)
|
from unittest.mock import Mock, patch, PropertyMock
from scripts.generate_hamilton_input_UPL import GenerateHamiltonInputUPL
from tests.test_common import TestEPP, NamedMock
def fake_all_inputs1(unique=False, resolve=False):
"""Return a list of 2 mocked artifacts with container names and locations defined as a tuple. The first mocked
artifact is assigned a name as this is used in one of the error messages during testing"""
return (
NamedMock(real_name='artifact_input1', id='ai1', type='Analyte', container=NamedMock(real_name='Name1'),
location=('ContainerVariable1', 'A:1')),
Mock(id='ai2', type='Analyte', container=(NamedMock(real_name='Name2')),
location=('ContainerVariable1', 'A:1'))
)
def fake_all_inputs2(unique=False, resolve=False):
"""Return a list of 10 mocked artifacts with container names and locations defined as a tuple."""
return (
Mock(id='ai1', type='Analyte', container=NamedMock(real_name='Name1'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai2', type='Analyte', container=NamedMock(real_name='Name2'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai3', type='Analyte', container=NamedMock(real_name='Name3'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai4', type='Analyte', container=NamedMock(real_name='Name4'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai5', type='Analyte', container=NamedMock(real_name='Name5'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai6', type='Analyte', container=NamedMock(real_name='Name6'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai7', type='Analyte', container=NamedMock(real_name='Name7'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai8', type='Analyte', container=NamedMock(real_name='Name8'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai9', type='Analyte', container=NamedMock(real_name='Name9'), location=('ContainerVariable1', 'A:1')),
Mock(id='ai10', type='Analyte', container=NamedMock(real_name='Name10'), location=('ContainerVariable1', 'A:1'))
)
def fake_outputs_per_input1(inputid, Analyte=False):
# outputs_per_input is a list of all of the outputs per input obtained from the process by searching with input id
# the outputs should have the container name and the location defined
outputs = {
'ai1': [Mock(id='ao1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'A:1'))],
'ai2': [Mock(id='ao1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'B:1'))],
'ai3': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'C:1'))],
'ai4': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'D:1'))],
'ai5': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'E:1'))],
'ai6': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'F:1'))],
'ai7': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'G:1'))],
'ai8': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'H:1'))],
'ai9': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'A:2'))],
'ai10': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'A:2'))]
}
return outputs[inputid]
def fake_outputs_per_input2(inputid, Analyte=False):
# outputs_per_input is a list of all of the outputs per input obtained from the process by searching with input id
# the outputs should have the container name and the location defined. Need to test what happens if two outputs
# per input present
outputs = {
'ai1': [
Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'A:1')),
Mock(id='bo2', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'B:1'))
],
'ai2': [
Mock(id='ao1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'B:1'))
]
}
return outputs[inputid]
def fake_outputs_per_input3(inputid, Analyte=False):
# outputs_per_input is a list of all of the outputs per input obtained from the process by searching with input id
# the outputs should have the container name and the location defined. Need to test what happens if amongst the
# outputs for different inputs there are >1 output containers
outputs = {
'ai1': [Mock(id='bo1', container=NamedMock(real_name='OutputName1'), location=('ContainerVariable1', 'A:1'))],
'ai2': [Mock(id='bo2', container=NamedMock(real_name='OutputName2'), location=('ContainerVariable1', 'A:1'))]
}
return outputs[inputid]
class TestGenerateHamiltonInputUPL(TestEPP):
def setUp(self):
step_udfs = {
'DNA Volume (uL)': '200',
}
self.patched_process1 = patch.object(
GenerateHamiltonInputUPL,
'process', new_callable=PropertyMock(return_value=Mock(all_inputs=fake_all_inputs1, udf=step_udfs,
outputs_per_input=fake_outputs_per_input1))
)
self.patched_process2 = patch.object(
GenerateHamiltonInputUPL,
'process', new_callable=PropertyMock(return_value=Mock(all_inputs=fake_all_inputs2, udf=step_udfs,
outputs_per_input=fake_outputs_per_input1))
)
self.patched_process3 = patch.object(
GenerateHamiltonInputUPL,
'process', new_callable=PropertyMock(return_value=Mock(all_inputs=fake_all_inputs1, udf=step_udfs,
outputs_per_input=fake_outputs_per_input3))
)
self.patched_process4 = patch.object(
GenerateHamiltonInputUPL,
'process', new_callable=PropertyMock(return_value=Mock(all_inputs=fake_all_inputs1, udf=step_udfs,
outputs_per_input=fake_outputs_per_input2))
)
self.epp = GenerateHamiltonInputUPL(self.default_argv + ['-i', 'a_file_location'])
def test_happy_input(self): # test that file is written under happy path conditions i.e. <=9 input plates, 1 output
# per input
with self.patched_process1:
self.epp._run()
expected_lines = ['Input Plate,Input Well,Output Plate,Output Well,DNA Volume,TE Volume',
'Name1,A:1,OutputName1,A:1,200,0',
'Name2,A:1,OutputName1,B:1,200,0',
]
assert self.file_content('a_file_location-hamilton_input.csv') == expected_lines
assert self.stripped_md5('a_file_location-hamilton_input.csv') == 'da6961a426d838d4be206364a61329bb'
def test_10_input_containers(self): # test that sys exit occurs if >9 input containers
with self.patched_process2, patch('sys.exit') as mexit:
self.epp._run()
mexit.assert_called_once_with(1)
def test_2_input_containers(self): # test that sys exit occurs if >1 output containers
with self.patched_process3, patch('sys.exit') as mexit:
self.epp._run()
mexit.assert_called_once_with(1)
def test_2_output_artifacts(self): # test that sys exit occurs if >1 output artifacts for one input
with self.patched_process4, patch('sys.exit') as mexit:
self.epp._run()
mexit.assert_called_once_with(1)
|
__version__ = '0.2.0.dev0'
__version_info__ = tuple([field for field in __version__.split('.')])
__api_version__ = 'v1.0'
|
# Generated by Django 3.0 on 2020-08-20 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_navmenu_file_menu'),
]
operations = [
migrations.AddField(
model_name='navchild',
name='is_file_active',
field=models.BooleanField(default=False, verbose_name='Активне посилання на файл'),
),
migrations.AddField(
model_name='navchild',
name='is_url_active',
field=models.BooleanField(default=False, verbose_name='Активне посилання'),
),
migrations.AddField(
model_name='navmenu',
name='is_file_active',
field=models.BooleanField(default=False, verbose_name='Активне посилання на файл'),
),
migrations.AddField(
model_name='navmenu',
name='is_url_active',
field=models.BooleanField(default=False, verbose_name='Активне посилання'),
),
]
|
import json
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from facet_core import primo
class FacetQuery(models.Model):
query = models.CharField( max_length=150)
query_facets = models.TextField()
clean_query = models.ForeignKey("FacetQuery",null=True)
year_range = models.CharField(max_length=10,null=True)
total_hits = models.IntegerField()
def _save_facets(self,facets):
for name, values in facets.items():
f = self.facets.create(name=name)
f.values.bulk_create([
FacetValue(facet=f,key=k,count=v) for k,v in values.items()
])
@property
def facet_ids(self):
return json.loads(self.query_facets) if self.query_facets else []
@staticmethod
def serialize_facet_ids(ids):
return json.dumps(ids)
def get_absolute_url(self):
return reverse("render", args=(self.pk,))
def __str__(self):
return "Query: {}, Total hits: {}".format(self.query,self.total_hits)
@property
def facet_pairs(self):
pairs = set()
for facet_id in self.facet_ids:
o = FacetValue.objects.get(pk=facet_id)
pairs.add((o.facet.name,o.key))
if self.year_range:
years = self.year_range.split(",")
if years[0] == years[1]:
years = years[0]
else:
years = "{} - {}".format(*years)
pairs.add(("creationdate",years))
return list(pairs) or None
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
try:
o = FacetQuery.objects.get(query=self.query,
year_range=self.year_range,
query_facets=self.query_facets)
self.pk = o.pk
except ObjectDoesNotExist:
res = primo.facet_query(self.query,
self.facet_pairs,
query_total=True)
self.total_hits = res['total']
if self.query_facets or self.year_range:
self.clean_query = FacetQuery.objects.get(query=self.query,
year_range=None,
query_facets="")
super().save(force_insert, force_update, using, update_fields)
self._save_facets(res['facets'])
class Facet(models.Model):
query = models.ForeignKey(FacetQuery, related_name='facets')
name = models.CharField(max_length=100)
def __str__(self):
return "Query: {} - Facet: {}".format(self.query.query,self.name)
class FacetValue(models.Model):
facet = models.ForeignKey(Facet, related_name='values')
key = models.CharField(max_length=100)
count = models.IntegerField()
def __str__(self):
return "Facet: {} - (Key: {}, Count: {})".format(self.facet.name,
self.key, self.count)
|
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
#print(arr)
arr1 = list(set(arr))
arr1.sort(reverse = True)
print(arr1[1])
#leap year
def is_leap(year):
# Write your logic here
leap = False
if year % 400 ==0:
leap = True
elif year % 100 ==0:
leap = False
elif year % 4 ==0:
leap = True
return leap
#There is an array of integers. There are also disjoint sets, and , each containing integers. You like all the #integers in set and #dislike all the integers in set . Your initial happiness is . For each integer in the array, #if , you add to your happiness. If , you #add to your happiness. Otherwise, your happiness does not change. #Output your final happiness at the end.
if __name__=='__main__':
s=input().split()
array1=map(int,input().split())
a=set(map(int, input().split()))
b=set(map(int, input().split()))
happy = 0
sad = 0
for i in array1:
if i in a:
happy = happy + 1
elif i in b:
sad = sad -1
tot = happy + sad
print(tot)
|
from django.forms import ModelForm, DateTimeInput, Textarea, DateInput
from django.forms.models import inlineformset_factory
from project.models import Project, ProjectTeam, Task
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ('name', 'desc', 'owners', 'start_date', 'end_date')
widgets = {
'desc': Textarea(attrs={'cols': 40, 'rows': 4}),
# need to create a validator that prevents a project start date occuring after the end date - if sd gte ed then error
'start_date': DateInput(format='%d %b %Y', attrs={'class': 'date-input'}),
'end_date': DateInput(format='%d %b %Y', attrs={'class': 'date-input'}),
}
class ProjectTeamForm(ModelForm):
class Meta:
model = ProjectTeam
fields = '__all__'
class TaskForm(ModelForm):
class Meta:
model = Task
fields = '__all__'
TaskFormSet = inlineformset_factory(Project, Task, form=TaskForm, extra=0, can_delete=True, min_num=1)
ProjectTeamFormSet = inlineformset_factory(Project, ProjectTeam, form=ProjectTeamForm, extra=0, can_delete=True, min_num=1) |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, \
TextAreaField, FileField
from wtforms.validators import DataRequired
import os
class LoginForm(FlaskForm):
username = StringField('Login', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign in')
class RegForm(FlaskForm):
username = StringField('Login', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign up')
class EditForm(FlaskForm):
username = StringField('New Name', validators=[DataRequired()])
submit = SubmitField('Change name')
class CreateForm(FlaskForm):
title = StringField('New Community Name', validators=[DataRequired()])
bio = TextAreaField('Bio', validators=[DataRequired()])
submit = SubmitField('Create')
class EditComForm(FlaskForm):
title = StringField('New Community Name')
bio = TextAreaField('New Bio')
submit = SubmitField('Change Community')
class AddNewsForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
hashtag = StringField('Hashtag')
content = TextAreaField('Content', validators=[DataRequired()])
image = FileField('Image')
submit = SubmitField('Post')
class EditNewsForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
hashtag = StringField('Hashtag', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Change')
|
import re
import requests
from find.models import FastaSource, MicroRNAAlias
headers = None # TODO: Implement proper headers
class SequenceNotFoundError(Exception):
pass
class Fasta:
def __init__(self, desc, seq, fasta_source):
self.description = desc
self.sequence = seq
self.fasta_source = fasta_source
def to_dict(self):
return self.__dict__
class Uniprot:
REGEX = r"[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}"
URL = "https://www.uniprot.org/uniprot/{}.fasta"
SOURCE = 'UNIPROT'
@classmethod
def is_valid(cls, query):
return bool(re.match(cls.REGEX, query))
@classmethod
def get(cls, accession=None, fasta_source=None):
url = fasta_source.url if fasta_source else cls.URL.format(accession)
response = requests.get(url, headers=headers, verify=False)
if response.status_code == 404:
raise SequenceNotFoundError("No sequence found in Uniprot: {}".format(accession))
else:
content = response.content.decode("utf-8").split("\n")
description = content[0]
sequence = "".join(content[1:])
if not fasta_source:
fasta_source, _ = FastaSource.objects.get_or_create(url=url, accession=accession, source=cls.SOURCE)
return Fasta(description, sequence, fasta_source)
class NCBI:
REGEX = r'[A-Z]{1,2}_[0-9]{4,10}\.?[0-9]{1,2}'
URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db={}&id={}&rettype=fasta&retmode=text'
SOURCE = 'NCBI'
@classmethod
def is_valid(cls, query):
return bool(re.match(cls.REGEX, query))
@classmethod
def get(cls, accession=None, fasta_source=None):
if fasta_source:
url = fasta_source.url
else:
url = cls.URL.format('protein', accession)
response = requests.get(url, headers=headers, verify=False)
if response.status_code in [404, 400]:
url = cls.URL.format('nuccore', accession)
response = requests.get(url, headers=headers, verify=False)
if response.status_code in [404, 400]:
raise SequenceNotFoundError("No sequence found in Uniprot: {}".format(accession))
content = response.content.decode("utf-8").split("\n")
description = content[0]
sequence = "".join(content[1:])
if not fasta_source:
fasta_source, _ = FastaSource.objects.get_or_create(url=url, accession=accession, source=cls.SOURCE)
return Fasta(description, sequence, fasta_source)
class Mirbase:
REGEX = r'MI(MAT)?[0-9]{7}'
URL = 'http://www.mirbase.org/cgi-bin/get_seq.pl?acc={}'
SOURCE = 'MIRBASE'
@classmethod
def is_valid(cls, query):
return bool(re.match(cls.REGEX, query))
@classmethod
def get(cls, accession=None, fasta_source=None):
url = fasta_source.url if fasta_source else cls.URL.format(accession)
response = requests.get(url, headers=headers, verify=False)
content = response.content.decode('utf-8').split('\n')
if content == '':
raise SequenceNotFoundError('No sequence found in MirBase: {}'.format(accession))
description = content[1]
sequence = content[2]
if not fasta_source:
fasta_source, _ = FastaSource.objects.get_or_create(url=url, accession=accession, source=cls.SOURCE)
return Fasta(description, sequence, fasta_source)
class MicroRNA:
REGEX = r'([a-z0-9]{3,7}-(let|mir|miR|bantam|lin|iab|mit|lsy)(-?[a-z0-9]{1,6}(-[0-9]{1,5}l?)?(-(3|5)(p|P))?)?\*?)|bantam'
@classmethod
def is_valid(cls, query):
return bool(re.match(cls.REGEX, query))
@classmethod
def get(cls, accession=None, fasta_source=None):
if fasta_source:
return Mirbase.get(fasta_source=fasta_source)
try:
mirbase_accession = MicroRNAAlias.objects.get(alias=accession.lower()).accession
return Mirbase.get(accession=mirbase_accession)
except MicroRNAAlias.DoesNotExist:
raise SequenceNotFoundError
|
#!/usr/bin/python//Wi2018-Classroom/students/jean-baptisteyamindi/session05
#This is to proper exception handler in the except mailroom code, so that the code can run.
#We make sure to catch specifically the error you find
NameError
~/Pythonpython/Wi2018-Classroom/students/jean-baptisteyamindi/session05 except-mailroom.py in mailroom()
first_try = ["Donor list:"]
listing_except = listing.append(first_try[0])
~/PythonStuff/UWPCE/Temp/except_test.py in listing.append(donor)
def listing.append(donor):
if donor == 'name':
print(s)
elif donor == 'menu':
print()
NameError: name 's' is not defined
# Here is a try/except block. Add an else that prints not_listing_except
try:
not_listing_except = listing_except = listing.append(first_try[2])
except SyntaxError:
print('Run Away!') |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from thawra import hero
class HeroTest(unittest.TestCase):
def setUp(self):
self.hero = hero.Hero(name="",
skillmap="",
attributes=[8, 8, 3],
element="fire",
macros=hero.randattack)
def test_attributes(self):
self.assertEqual(self.hero.strength, 8)
self.assertEqual(self.hero.intelligence, 8)
self.assertEqual(self.hero.agility, 3)
def test_level(self):
self.assertEqual(self.hero.level, 1)
def test_hero_maxHP(self):
return self.assertEqual(self.hero.hp, self.hero.intelligence * 100)
def test_hero_maxMP(self):
return self.assertEqual(self.hero.mp, self.hero.intelligence * 100)
def test_hero_stats(self):
return self.assertEqual(self.hero.stats, {
'ATK': self.hero.strength * 10,
'DEF': self.hero.strength * 2,
'MAG': self.hero.intelligence * 7,
'MDE': self.hero.intelligence * 2,
'SPD': self.hero.agility * 30})
def test_hero_hp(self):
self.assertEqual(self.hero.hp, self.hero.maxHP)
self.hero.hp -= self.hero.maxHP + 1
self.assertEqual(self.hero.hp, 0)
self.hero.hp += self.hero.maxHP * 2
self.assertEqual(self.hero.hp, self.hero.maxHP)
def test_invalid_attributes(self):
self.assertRaises(hero.InvalidHero, hero.Hero,
"", "", [10], "", None)
def test_choice(self):
"""This test should be renamed test_randattack gambit.
Or something.
"""
choice, target = self.hero.choice([self.hero], [self.hero])
self.assertEqual(choice, "ATK")
self.assertEqual(target, [self.hero])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# This program is to show basic of python programming
# I'm opening a file using python
#
import os
import sys
import time
import getopt
from datetime import datetime
def print_help():
message = """\n******** Script Information *********\n
Usage : This script takes three arguments. env, action and source zip location.\n
Based on env type it will upload files in Akama NetStorage.\n
******** Arguments Information *********\n
Arguments :\n
-h or --help : Help argument which show details of the script
-e or --env : Environment argument - test / prod \n
-a or --action : Action argument - upload / delete / post \n
-d or --dir : File path argument - Full file path needs to be specified here. \n
e.g. /tmp/abc/content.zip \n
Note: If file ext is zip than script will extract the zip file first and \n
uploads all the files. \n
Example: python cmd_argument.py -e test -a upload -d /tmp/abc/pkr.zip\n
"""
print (message)
def print_exception(message):
print ('Error Message : ' + message)
print_help()
def main():
print ('Enter main method')
env = ''
dir_loc = ''
action = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "he:a:d:", ["help", "env=", "action=", "dir="])
if len(opts) != 3 :
print_exception('1 or more arguments were missing in command. Arguments : ' + str(opts))
sys.exit()
except getopt.GetoptError as err:
print_exception('Opps.. Invalid option..!! Error message : ' + str(err))
sys.exit(2)
for opt, arg in opts:
print ('option -- ' + opt)
#print 'arg -- ' + arg
if opt in ('-h', '--help'):
print_help()
sys.exit()
elif opt in ("-e", "--env"):
env = arg
elif opt in ("-d", "--dir"):
dir_loc = arg
elif opt in ("-a", "--action"):
action = arg
else:
print ('-- Invalid Option : ', opt)
print_help()
sys.exit()
print ('Env : ' + env)
print ('Dir : ' + dir_loc)
print ('Action : ' + action)
if __name__ == "__main__":
main() |
"""
Created on Tue Dec 17 16:10:32 2019
@author: Mohsen Mehrani, Taha Enayat
"""
import numpy as np
class arrays_glossary():
def array(self,what_array):
"""
Extracts information from agent matrix and graph. It generates and
returns desired arrays.
Parameters
----------
what_array : string
what_array is a string indicating the desired array. It can have
get value from the following:
'degree', 'neighbor', 'value', 'time', 'probability', 'utility',
'others_feeling', 'worth_ratio', 'situation', 'asset', 'money',
'approval', 'active_neighbor'
"""
ref = {}
if what_array == 'degree':
ref[what_array] = [self.G.degree(n) for n in self.G.nodes()]
return ref[what_array]
if what_array == 'neighbor':
ref[what_array] = np.zeros((self.N,self.N))
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].neighbor
return ref[what_array]
if what_array == 'value':
ref[what_array] = np.zeros((self.N,self.N))
for i in np.arange(self.N):
for j in self.a_matrix[i].active_neighbor.keys():
ref[what_array][i,j] = self.a_matrix[i].value[j]
return ref[what_array]
if what_array == 'time':
ref[what_array] = np.zeros((self.N,self.N,self.memory_size))
for i in np.arange(self.N):
for j in np.arange(self.N):
if self.a_matrix[i].neighbor[j] != 0:
ref[what_array][i,j] = self.a_matrix[i].time[j]
#ref[what_array][i,j] = self.a_matrix[i].value[j]
else:
ref[what_array][i,j] = -1
return ref[what_array]
if what_array == 'probability':
ref[what_array] = np.zeros((self.N,self.N))
for i in np.arange(self.N):
for j in self.a_matrix[i].active_neighbor.keys():
ref[what_array][i,j] = self.a_matrix[i].active_neighbor[j]
return ref[what_array]
if what_array == 'utility':
ref[what_array] = np.zeros((self.N,self.N))
for i in np.arange(self.N):
for j in self.a_matrix[i].active_neighbor.keys():
ref[what_array][i,j] = self.a_matrix[i].active_neighbor[j] * self.a_matrix[j].active_neighbor[i]
return ref[what_array]
if what_array == 'others_feeling':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array] += self.a_matrix[i].feeling[:]
return ref[what_array]/np.sum(ref[what_array])
if what_array == 'worth_ratio':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].worth_ratio
return ref[what_array]
if what_array == 'situation':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].situation
return ref[what_array]
if what_array == 'asset':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].asset
return ref[what_array]
if what_array == 'money':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].money
return ref[what_array]
if what_array == 'approval':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = self.a_matrix[i].approval
return ref[what_array]
if what_array == 'active_neighbor':
ref[what_array] = np.zeros(self.N)
for i in np.arange(self.N):
ref[what_array][i] = len(self.a_matrix[i].active_neighbor)
return ref[what_array]
pass
|
import inspect
class Base():
def __init__(self, *args, **kwargs):
if self.__class__.__name__ == 'Base':
raise Exception('You are required to subclass the {} class'
.format('Base'))
methods = set([ x[0] for x in
inspect.getmembers(self.__class__, predicate=inspect.ismethod)])
required = set(['foo', 'bar'])
if not required.issubset( methods ):
missing = required - methods
raise Exception("Requried method '{}' is not implemented in '{}'"
.format(', '.join(missing), self.__class__.__name__))
class Real(Base):
def foo(self):
print('foo in Real')
def bar(self):
print('bar in Real')
def other(self):
pass
class Fake(Base):
# user can hide the __init__ method of the parent class:
# def __init__(self):
# pass
def foo(self):
print('foo in Fake')
r = Real()
#b = Base() # You are required to subclass the Base class
#f = Fake() # Requried method 'bar' is not implemented in class 'Fake'
|
import os
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
SHORT_DATE_FORMAT = "%d.%m"
LONG_DATE_FORMAT = "%d.%m.%Y"
TIME_FORMAT = "%H:%M" |
import controls
import engine_factory
from audio_device import AudioDevice
engine = engine_factory.v_four_90_deg()
audio_device = AudioDevice()
stream = audio_device.play_stream(engine.gen_audio)
print('\nEngine is running...')
try:
controls.capture_input(engine) # blocks until user exits
except KeyboardInterrupt:
pass
print('Exiting...')
stream.close()
audio_device.close()
|
import pandas as pd
import numpy as np
import mpmath
import periodictable as pt
import matplotlib.pyplot as plt
import functools
from .compositions import renormalise
from .normalisation import ReferenceCompositions, RefComp
from .util.text import titlecase
from .util.pd import to_frame
from .util.math import OP_constants, lambdas, lambda_poly
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def get_radii(el):
"""Convenience function for ionic radii."""
if isinstance(el, list):
return [get_radii(e) for e in el]
elif not isinstance(el, str):
el = str(el)
return _RADII[el]
def ischem(s):
"""
Checks if a string corresponds to chemical component.
Here simply checking whether it is a common element or oxide.
TODO: Implement checking for other compounds, e.g. carbonates.
"""
chems = common_oxides() + common_elements()
chems = [e.upper() for e in chems]
if isinstance(s, list):
return [str(st).upper() in chems for st in s]
else:
return str(s).upper() in chems
def tochem(strings:list, abbrv=['ID', 'IGSN'], split_on='[\s_]+'):
"""
Converts a list of strings containing come chemical compounds to
appropriate case.
"""
# accomodate single string passed
if not type(strings) in [list, pd.core.indexes.base.Index]:
strings = [strings]
chems = common_oxides() + common_elements()
trans = {str(e).upper(): str(e) for e in chems}
strings = [trans[str(h).upper()]
if str(h).upper() in trans else h
for h in strings]
return strings
def to_molecular(df: pd.DataFrame, renorm=True):
"""
Converts mass quantities to molar quantities of the same order.
E.g.:
mass% --> mol%
mass-ppm --> mol-ppm
"""
df = to_frame(df)
MWs = [pt.formula(c).mass for c in df.columns]
if renorm:
return renormalise(df.div(MWs))
else:
return df.div(MWs)
def to_weight(df: pd.DataFrame, renorm=True):
"""
Converts molar quantities to mass quantities of the same order.
E.g.:
mol% --> mass%
mol-ppm --> mass-ppm
"""
df = to_frame(df)
MWs = [pt.formula(c).mass for c in df.columns]
if renorm:
return renormalise(df.multiply(MWs))
else:
return df.multiply(MWs)
def get_cations(oxide:str, exclude=[]):
"""
Returns the principal cations in an oxide component.
Todo: Consider implementing periodictable style return.
"""
if 'O' not in exclude:
exclude += ['O']
atms = pt.formula(oxide).atoms
cations = [el for el in atms.keys() if not el.__str__() in exclude]
return cations
def common_elements(cutoff=92, output='string'):
"""
Provides a list of elements up to a particular cutoff (default: including U)
Output options are 'formula', or strings.
"""
elements = [el for el in pt.elements
if not (str(el) == 'n' or el.number>cutoff)]
if not output == 'formula':
elements = [str(el) for el in elements]
return elements
def REE(output='string', include_extras=False):
"""
Provides the list of Rare Earth Elements
Output options are 'formula', or strings.
Todo: add include extras such as Y.
"""
elements = ['La', 'Ce', 'Pr', 'Nd', 'Pm',
'Sm', 'Eu', 'Gd', 'Tb', 'Dy',
'Ho', 'Er', 'Tm', 'Yb', 'Lu']
if output == 'formula':
elements = [getattr(pt, el) for el in elements]
return elements
def common_oxides(elements: list=[], output='string',
addition: list=['FeOT', 'Fe2O3T', 'LOI'],
exclude=['O', 'He', 'Ne', 'Ar', 'Kr', 'Xe']):
"""
Creates a list of oxides based on a list of elements.
Output options are 'formula', or strings.
Note: currently return FeOT and LOI even for element lists
not including iron or water - potential upgrade!
Todo: element verification
"""
if not elements:
elements = [el for el in common_elements(output='formula')
if not str(el) in exclude]
else:
# Check that all elements input are indeed elements..
pass
oxides = [ox for el in elements
for ox in simple_oxides(el, output=output)]
if output != 'formula':
oxides = [str(ox) for ox in oxides] + addition
return oxides
def simple_oxides(cation, output='string'):
"""
Creates a list of oxides for a cationic element
(oxide of ions with c=1+ and above).
"""
try:
if not isinstance(cation, pt.core.Element):
catstr = titlecase(cation) # edge case of lowercase str such as 'cs'
cation = getattr(pt, catstr)
except AttributeError:
raise Exception("You must select a cation to obtain oxides.")
ions = [c for c in cation.ions if c > 0] # Use only positive charges
# for 3.6+, could use f'{cation}{1}O{c//2}', f'{cation}{2}O{c}'
oxides = [str(cation)+str(1)+'O'+str(c//2)
if not c%2 else
str(cation)+str(2)+'O'+str(c)
for c in ions]
oxides = [pt.formula(ox) for ox in oxides]
if not output == 'formula':
oxides = [str(ox) for ox in oxides]
return oxides
def devolatilise(df: pd.DataFrame,
exclude=['H2O', 'H2O_PLUS', 'H2O_MINUS', 'CO2', 'LOI'],
renorm=True):
"""
Recalculates components after exclusion of volatile phases (e.g. H2O, CO2).
"""
keep = [i for i in df.columns if not i in exclude]
if renorm:
return renormalise(df.loc[:, keep])
else:
return df.loc[:, keep]
def oxide_conversion(oxin, oxout):
"""
Generates a function to convert oxide components between
two elemental oxides, for use in redox recalculations.
"""
if not (isinstance(oxin, pt.formulas.Formula)
or isinstance(oxin, pt.core.Element)):
oxin = pt.formula(oxin)
if not (isinstance(oxout, pt.formulas.Formula)
or isinstance(oxout, pt.core.Element)):
oxout = pt.formula(oxout)
inatoms = {k: v for (k, v) in oxin.atoms.items() if not k.__str__()=='O'}
in_els = inatoms.keys()
outatoms = {k: v for (k, v) in oxout.atoms.items() if not k.__str__()=='O'}
out_els = outatoms.keys()
assert len(inatoms) == len(outatoms) == 1 # Assertion of simple oxide
assert in_els == out_els # Need to be dealilng with the same element!
# Moles of product vs. moles of reactant
cation_coefficient = list(inatoms.values())[0] / list(outatoms.values())[0]
def convert_series(dfser: pd.Series, molecular=False):
if molecular:
factor = cation_coefficient
else:
factor = cation_coefficient * oxout.mass / oxin.mass
converted = dfser * factor
return converted
doc = "Convert series from "+str(oxin)+" to "+str(oxout)
convert_series.__doc__ = doc
return convert_series
def recalculate_redox(df: pd.DataFrame,
to_oxidised=False,
renorm=True,
total_suffix='T'):
"""
Recalculates abundances of redox-sensitive components (particularly Fe),
and normalises a dataframe to contain only one oxide species for a given
element.
Consider reimplementing total suffix as a lambda formatting function
to deal with cases of prefixes, capitalisation etc.
Automatic generation of multiple redox species from dataframes
would also be a natural improvement.
"""
# Assuming either (a single column) or (FeO + Fe2O3) are reported
# Fe columns - FeO, Fe2O3, FeOT, Fe2O3T
FeO = pt.formula("FeO")
Fe2O3 = pt.formula("Fe2O3")
dfc = df.copy()
ox_species = ['Fe2O3', "Fe2O3"+total_suffix]
ox_in_df = [i for i in ox_species if i in dfc.columns]
red_species = ['FeO', "FeO"+total_suffix]
red_in_df = [i for i in red_species if i in dfc.columns]
if to_oxidised:
oxFe = oxide_conversion(FeO, Fe2O3)
Fe2O3T = dfc.loc[:, ox_in_df].fillna(0).sum(axis=1) + \
oxFe(dfc.loc[:, red_in_df].fillna(0)).sum(axis=1)
dfc.loc[:, 'Fe2O3T'] = Fe2O3T
to_drop = red_in_df + \
[i for i in ox_in_df if not i.endswith(total_suffix)]
else:
reduceFe = oxide_conversion(Fe2O3, FeO)
FeOT = dfc.loc[:, red_in_df].fillna(0).sum(axis=1) + \
reduceFe(dfc.loc[:, ox_in_df].fillna(0)).sum(axis=1)
dfc.loc[:, 'FeOT'] = FeOT
to_drop = ox_in_df + \
[i for i in red_in_df if not i.endswith(total_suffix)]
dfc = dfc.drop(columns=to_drop)
if renorm:
return renormalise(dfc)
else:
return dfc
def aggregate_cation(df: pd.DataFrame,
cation,
form='oxide',
unit_scale=None):
"""
Aggregates cation information from oxide and elemental components
to a single series. Allows scaling (e.g. from ppm to wt% - a factor
of 10,000).
Needs to also implement a 'molecular' version.
"""
elstr = cation.__str__()
oxstr = [o for o in df.columns if o in simple_oxides(elstr)][0]
el, ox = pt.formula(elstr), pt.formula(oxstr)
if form == 'oxide':
if unit_scale is None: unit_scale = 1/10000 # ppm to Wt%
assert unit_scale > 0
convert_function = oxide_conversion(ox, el)
conv_values = convert_function(df.loc[:, elstr]).values * unit_scale
totals = np.nansum(np.vstack((df.loc[:, oxstr].values, conv_values)),
axis=0)
totals[np.isclose(totals, 0)] = np.nan
df.loc[:, oxstr] = totals
df.drop(columns=[elstr], inplace=True)
assert elstr not in df.columns
elif form == 'element':
if unit_scale is None: unit_scale = 10000 # Wt% to ppm
assert unit_scale > 0
convert_function = oxide_conversion(el, ox)
conv_values = convert_function(df.loc[:, oxstr]).values * unit_scale
totals = np.nansum(np.vstack((df.loc[:, elstr].values, conv_values)),
axis=0)
totals[np.isclose(totals, 0)] = np.nan
df.loc[:, elstr] = totals
df.drop(columns=[oxstr], inplace=True)
assert oxstr not in df.columns
return df
def check_multiple_cation_inclusion(df, exclude=['LOI', 'FeOT', 'Fe2O3T']):
"""
Returns cations which are present in both oxide and elemental form.
Todo: Options for output (string/formula).
"""
major_components = [i for i in common_oxides() if i in df.columns]
elements_as_majors = [get_cations(oxide)[0] for oxide in major_components
if not oxide in exclude]
elements_as_traces = [c for c in common_elements(output='formula')
if str(c) in df.columns]
return set([el for el in elements_as_majors if el in elements_as_traces])
def add_ratio(df: pd.DataFrame,
ratio:str,
alias:str='',
convert=lambda x: x):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Returned series be assigned an alias name.
"""
num, den = ratio.split('/')
assert num in df.columns
assert den in df.columns
name = [ratio if not alias else alias][0]
conv = convert(df.loc[:, [num, den]])
conv.loc[(conv[den]==0.), den] = np.nan # avoid inf
df.loc[:, name] = conv.loc[:, num] / conv.loc[:, den]
return df
def add_MgNo(df: pd.DataFrame,
molecularIn=False,
elemental=False,
components=False):
if not molecularIn:
if components:
# Iron is split into species
df.loc[:, 'Mg#'] = df['MgO'] / pt.formula('MgO').mass / \
(df['MgO'] / pt.formula('MgO').mass + df['FeO'] / pt.formula('FeO').mass)
else:
# Total iron is used
assert 'FeOT' in df.columns
df.loc[:, 'Mg#'] = df['MgO'] / pt.formula('MgO').mass / \
(df['MgO'] / pt.formula('MgO').mass + df['FeOT'] / pt.formula('FeO').mass)
else:
if not elemental:
# Molecular Oxides
df.loc[:, 'Mg#'] = df['MgO'] / (df['MgO'] + df['FeO'])
else:
# Molecular Elemental
df.loc[:, 'Mg#'] = df['Mg'] / (df['Mg'] + df['Fe'])
def lambda_lnREE(df,
norm_to='Chondrite_PON',
exclude=['Pm', 'Eu'],
params=None,
degree=5):
"""
Calculates lambda coefficients for a given set of REE data, normalised
to a specific composition. Lambda factors are given for the
radii vs. ln(REE/NORM) polynomical combination.
"""
ree = [i for i in REE() if (not str(i) in exclude) and
(str(i) in df.columns or i in df.columns)] # no promethium
radii = np.array(get_radii(ree))
if params is None:
params = OP_constants(radii, degree=degree)
else:
degree = len(params)
col_indexes = [i for i in df.columns if i in ree
or i in map(str, ree)]
if isinstance(norm_to, str):
norm = ReferenceCompositions()[norm_to]
norm_abund = np.array([norm[str(el)].value for el in ree])
elif isinstance(norm_to, RefComp):
norm_abund = np.array([getattr(norm_to, str(e)) for e in col_indexes])
else: # list, iterable, pd.Index etc
norm_abund = np.array([i for i in norm_abund])
assert len(norm_abund) == len(ree)
labels = [chr(955) + str(d) for d in range(degree)]
norm_df = df.loc[:, col_indexes]
norm_df.loc[:, col_indexes] = np.divide(norm_df.loc[:, col_indexes].values,
norm_abund)
norm_df = norm_df.applymap(np.log)
lambda_partial = functools.partial(lambdas,
xs=radii,
params=params,
degree=degree)
lambdadf = pd.DataFrame(np.apply_along_axis(lambda_partial, 1,
norm_df.values),
index=df.index,
columns=labels)
return lambdadf
_RADII = {str(k): v for (k, v) in zip(REE(), [1.160, 1.143, 1.126, 1.109,
1.093, 1.079, 1.066, 1.053,
1.040, 1.027, 1.015, 1.004,
0.994, 0.985, 0.977])}
|
from sqlalchemy import create_engine
from flask_sqlalchemy import SQLAlchemy
# Initialize Flask-SQLAlchemy
db = SQLAlchemy()
def db_connection():
return 'sqlite:///test.db'
|
__version__ = '0.0.1'
__all__ = ["fig_to_d3", "display_d3", "show_d3"]
from .display import fig_to_d3, display_d3, show_d3
|
from random import randint
import matplotlib.pyplot as plt
import numpy as np
cant_tiros = 0
cant_simulaciones = 0
# Input
print("Elija un numero de la ruleta (0-36)")
num_elegido = input()
print("Ingrese la cantidad de tiros que desea simular")
cant_tiros = input()
print("Ingrese la cantidad de simulaciones que desea realizar")
cant_simulaciones = input()
num_elegido = int(num_elegido)
cant_tiros = int(cant_tiros)
cant_simulaciones = int(cant_simulaciones)
# Valores esperados (a priori se conoce que son equiprobables)
frecuencia_esperada = 1/37
promedio_esperado = np.average(range(37))
desvio_esperado = np.std(range(37))
varianza_esperada = np.var(range(37))
# Inicializa las listas generales de los valores a buscar. Consistirán de sublistas con los valores obtenidos en cada simulación
frecuencias_relativas = []
promedios = []
desvios = []
varianzas = []
for k in range(cant_simulaciones):
frec_absol = 0
frec_rel = 0
#Listas de la corrida actual
tiradas = []
frec_relativas_k = []
promedios_k = []
desvios_k = []
varianzas_k = []
#Simulacion (corrida)
for i in range(1, cant_tiros + 1):
#randint() devuelve un entero aleatorio comprendido entre los dos valores (incluidos los mismos)
tiro = randint(0,36)
tiradas.append(tiro)
#Calcula frecuencia
if (tiro == num_elegido):
frec_absol += 1
frec_rel = frec_absol / i
#Añade a las listas de la simulación actual
frec_relativas_k.append(frec_rel)
promedios_k.append(np.average(tiradas))
desvios_k.append(np.std(tiradas))
varianzas_k.append(np.var(tiradas))
#Añade las listas de la simulación anterior a las matrices generales
frecuencias_relativas.append(frec_relativas_k)
promedios.append(promedios_k)
desvios.append(desvios_k)
varianzas.append(varianzas_k)
# Grafica datos
# Usa subplots() para poder redimensionar automaticamente el padding entre cada subplot mediante constrained_layout=True
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, constrained_layout=True)
# Frecuencia
for i in frecuencias_relativas:
ax1.plot(i)
ax1.hlines(frecuencia_esperada,0, cant_tiros, linestyles="dashed", lw=1)
ax1.set_xlabel("Numero de tiradas")
ax1.set_ylabel("Frecuencia Relativa")
ax1.set_title("Frecuencia Relativa")
# Promedio
for i in promedios:
ax2.plot(i)
ax2.hlines(promedio_esperado,0, cant_tiros, linestyles="dashed", lw=1)
ax2.set_xlabel("Numero de tiradas")
ax2.set_ylabel("Promedio")
ax2.set_title("Promedio")
# Desvio
for i in desvios:
ax3.plot(i)
ax3.hlines(desvio_esperado ,0, cant_tiros, linestyles="dashed", lw=1)
ax3.set_xlabel("Numero de tiradas")
ax3.set_ylabel("Desvio estándar")
ax3.set_title("Desvio estándar")
# Varianza
for i in varianzas:
ax4.plot(i)
ax4.hlines(varianza_esperada,0, cant_tiros, linestyles="dashed", lw=1)
ax4.set_xlabel("Numero de tiradas")
ax4.set_ylabel("Varianza")
ax4.set_title("Varianza")
# Grafica la figura con todas las graficas en cada subplot y la guarda en un archivo
plt.savefig("Grafica (" + str(cant_simulaciones) + " simulaciones).png")
plt.show()
|
#Author: Dylan E. Wheeler
#Email: dylan.wheeler@usm.edu
#Date: 2019 05 20
#Course: CSC411 - Intro to Databases
#Prof.: Dr. Bo Li
import sqlite3
import random
import glob
import engine
from engine import get_cmd
from datetime import date
#use formatted text colors if library is available
try:
import colorama
except Exception as error:
print(error)
cm = False
else:
colorama.init()
from colorama import Fore, Back, Style
cm = True
#
def reorder(conn, user):
cursor = conn.cursor()
#get inventory id for items to order
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "Inventory ID" + (Fore.RESET if cm else "") +
" of the item you'd like to order.")
if (engine.quit(input, "Exiting order mode.")):
return
while int(input) not in [i[0] for i in cursor.execute("SELECT id FROM inventory WHERE " +
"id = '" + input + "';").fetchall()]:
input = get_cmd("Inventory ID not found, please re-enter Inventory ID, or type "+
(Fore.GREEN if cm else "") + "cancel" + (Fore.RESET if cm else "") +
" to cancel.")
if (engine.quit(input), "Exiting order mode."):
return
#end while id not found
#once id is found
reorder_id = int(input)
#get quantity
while True:
try:
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "quantity" + (Fore.RESET if cm else "") +
" of the item you'd like to order.")
if (engine.quit(input, "Exiting order mode.")):
return
#
input = int(input)
except ValueError as error:
print("Error, please enter an integer.")
continue
else:
reorder_quantity = int(input)
break
#end get quantity
#output suppliers for user reference
print("Available Suppliers:")
engine.print_cursor_fetch(cursor.execute("SELECT * FROM supplier;").fetchall(), cursor.description)
print()
#get supplier id for items to order
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "Supplier ID" + (Fore.RESET if cm else "") +
" you would like to order from.")
if (engine.quit(input, "Exiting order mode.")):
return
while int(input) not in [i[0] for i in cursor.execute("SELECT id FROM supplier WHERE " +
"id = '" + input + "';").fetchall()]:
input = get_cmd("Supplier ID not found, please re-enter Supplier ID, or type "+
(Fore.GREEN if cm else "") + "cancel" + (Fore.RESET if cm else "") +
" to cancel.")
if (engine.quit(input), "Exiting order mode."):
return
#end while supplier id not found
supplier_id = int(input)
#generate itemization id
#find id that is unique
untrimmed_itemization_list = glob.glob(str(".\\itemization\\*.csv"))
itemization_list = []
for each_item in untrimmed_itemization_list:
itemization_list.append(str(each_item.replace(".\\itemization\\", "").replace(".csv", "")))
while True:
item_id = random.randint(11111111, 99999999)
item_id = str("i" + str(item_id))
if item_id in itemization_list:
continue #if exists, try again
else:
break #if unique, move on
#
#create itemization table
try:
query = str(engine.get_itemization_query(item_id))
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error building itemization table for " + str(item_id) + ":" +
(Fore.RESET if cm else ""))
print(query)
print(error)
else:
try:
this_row_id = str(random.randint(11111111, 99999999)) #get random id for item row
this_row_category = str(engine.get_cell(conn, "category", "inventory", "id", reorder_id)) #get category from inventory table
this_row_item_id = str(engine.get_cell(conn, "item_id", "inventory", "id", reorder_id)) #get item_id from inventory table
this_row_price = str(engine.get_cell(conn, "price", this_row_category, "id", this_row_item_id)) #get quantity to be ordered
query = str("INSERT INTO " + item_id + " VALUES ('401" +
str(this_row_id) + "', '" +
str(this_row_category) + "', '" +
str(this_row_item_id) + "', '" +
str(reorder_quantity) + "', '" +
str(this_row_price) +
"');")
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error populating itemization table for " + str(item_id) + ":" +
(Fore.RESET if cm else ""))
print(query)
print(error)
else:
#add order to orders table
try:
#get unique order id
while True:
this_order_id = random.randint(11111111, 99999999)
if this_order_id in [i[0] for i in cursor.execute("SELECT id FROM orders;")]:
continue #if exists, try again
else:
break #if unique, move on
#
grand_total = float(float(reorder_quantity) * float(this_row_price))
query = str("INSERT INTO orders VALUES ('" +
str(this_order_id) + "', '" +
str(date.today()) + "', '" +
str(user.store_id) + "', '" +
str(supplier_id) + "', '" +
str(user.id) + "', '" +
str(item_id) + "', '" +
str(grand_total) +
"');")
print("Ordering " + str(reorder_quantity) + " of item " +
str(engine.get_cell(conn, "name", "inventory", "id", reorder_id)) +
"...")
print(query)
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error populating order table for " + str(this_order_id) + ":" +
(Fore.RESET if cm else ""))
print(error)
#end reorder()
def restock(conn, user):
"""restock items in inventory"""
cursor = conn.cursor()
engine.print_cursor_fetch(cursor.execute("SELECT * FROM inventory WHERE store_id='" + str(user.store_id) +
"' ORDER BY stock ASC").fetchall(), cursor.description)
#get inventory id for items to add
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "Inventory ID" + (Fore.RESET if cm else "") +
" of the item you'd like to restock.")
if (engine.quit(input, "Exiting order mode.")):
return
while int(input) not in [i[0] for i in cursor.execute("SELECT id FROM inventory WHERE " +
"id = '" + input + "';").fetchall()]:
input = get_cmd("Inventory ID not found, please re-enter Inventory ID, or type "+
(Fore.GREEN if cm else "") + "cancel" + (Fore.RESET if cm else "") +
" to cancel.")
if (engine.quit(input), "Exiting order mode."):
return
#end while id not found
#once id is found
restock_id = int(input)
#get quantity
while True:
try:
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "quantity" + (Fore.RESET if cm else "") +
" of the item you'd like to restock.")
if (engine.quit(input, "Exiting order mode.")):
return
#
input = int(input)
except ValueError as error:
print("Error, please enter an integer.")
continue
else:
restock_quantity = int(input)
break
#end get quantity
restock_quantity = int( int(restock_quantity) + int(engine.get_cell(conn, "stock", "inventory", "id", restock_id)) )
try:
query = str("UPDATE inventory SET stock = '" + str(restock_quantity) +
"' WHERE id = '" + str(restock_id) + "';")
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error restocking inventory item " + str(restock_id) + ":" +
(Fore.RESET if cm else ""))
print(query)
print(error)
else:
print("Successfully added stock.")
engine.print_cursor_fetch(cursor.execute("SELECT * FROM inventory WHERE store_id='" + str(user.store_id) +
"' ORDER BY stock ASC").fetchall(), cursor.description)
#end restock()
def checkout(conn, user):
cursor = conn.cursor()
cart = []
grand_total = 0.00
while True:
#get inventory id for items to order
input = get_cmd("Enter the " +
(Fore.CYAN if cm else "") + "Inventory ID" + (Fore.RESET if cm else "") +
" of the item you'd add.\n" +
"Enter " +
(Fore.CYAN if cm else "") + "cancel" + (Fore.RESET if cm else "") +
" to exit.\n" +
"Enter " +
(Fore.CYAN if cm else "") + "done" + (Fore.RESET if cm else "") +
" when complete.")
if (engine.quit(input, "Exiting checkout mode.")):
input = "_BREAKLOOP_"
break
elif input == "done":
break
elif int(input) not in [i[0] for i in cursor.execute("SELECT id FROM inventory WHERE " +
"id = '" + input + "';").fetchall()]:
print((Fore.RED if cm else "") +
"Error: inventory item " + str(input) + " not found." +
(Fore.RESET if cm else ""))
continue #got to top of input loop
else: #not done, not exit, and inventory item is found; add to list
cart.append(input)
print("Item " +
(Fore.GREEN if cm else "") + input + (Fore.RESET if cm else "") +
" added to purchase!")
if input == "_BREAKLOOP_": #if canceling purchase
return #break out of checkout mode
#end while True
#get customer info
input = get_cmd("Would the customer like to use their membership ID? Enter the " +
(Fore.CYAN if cm else "") + "Customer ID" + (Fore.RESET if cm else "") +
" or enter " +
(Fore.CYAN if cm else "") + "no" + (Fore.RESET if cm else "") + ".")
if (engine.quit(input, "Exiting checkout mode.")):
return
elif (input.lower() == "no"):
customer_id = "NULL"
else:
customer_id = input
while int(input) not in [i[0] for i in cursor.execute("SELECT id FROM customer WHERE " +
"id = '" + input + "';").fetchall()]:
input = get_cmd("Customer ID not found, please re-enter Customer ID, or type "+
(Fore.GREEN if cm else "") + "cancel" + (Fore.RESET if cm else "") +
" to cancel.")
customer_id = input
if (engine.quit(input), "Not using Customer ID, get CC info."):
customer_id = "NULL"
break
#end get customer id
#get cc info
if customer_id != "NULL":
input = get_cmd("Would the customer like to use new CC or charge or their card on file? Enter the " +
(Fore.CYAN if cm else "") + "card" + (Fore.RESET if cm else "") +
" or enter " +
(Fore.CYAN if cm else "") + "account" + (Fore.RESET if cm else "") + ".")
if (engine.quit(input, "Exiting checkout mode.")):
return
if ((customer_id == "NULL") or (input == "card")):
input = get_cmd("Enter the customer's " +
(Fore.CYAN if cm else "") + "CC number" + (Fore.RESET if cm else "") + ".")
if (engine.quit(input, "Exiting checkout mode.")):
return
customer_cc = str(input)
input = get_cmd("Enter the customer's " +
(Fore.CYAN if cm else "") + "CC expiration date" + (Fore.RESET if cm else "") + ".")
if (engine.quit(input, "Exiting checkout mode.")):
return
customer_cc_exp = str(input)
elif input == "account":
customer_cc = str(engine.get_cell(conn, "card_number", "customer", "id", customer_id))
customer_cc_exp = str(engine.get_cell(conn, "card_exp", "customer", "id", customer_id))
else:
print((Fore.RED if cm else "") +
"Error inputing CC information. CC set to NULL, contact manager." +
(Fore.RESET if cm else "") )
customer_cc = str("NULL")
customer_cc_exp = str("NULL")
#end get CC info
#generate itemization id
#find id that is unique
untrimmed_itemization_list = glob.glob(str(".\\itemization\\*.csv"))
itemization_list = []
for each_item in untrimmed_itemization_list:
itemization_list.append(str(each_item.replace(".\\itemization\\", "").replace(".csv", "")))
while True:
item_id = random.randint(11111111, 99999999)
item_id = str("i" + str(item_id))
if item_id in itemization_list:
continue #if exists, try again
else:
break #if unique, move on
#
#create itemization table
try:
query = str(engine.get_itemization_query(item_id))
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error building itemization table for " + str(item_id) + ":" +
(Fore.RESET if cm else ""))
print(query)
print(error)
else:
#add each item in cart to itemized table
for each_item_id in cart:
try:
this_row_id = str(random.randint(11111111, 99999999)) #get random id for item row
this_row_category = str(engine.get_cell(conn, "category", "inventory", "id", each_item_id)) #get category from inventory table
this_row_item_id = str(engine.get_cell(conn, "item_id", "inventory", "id", each_item_id)) #get item_id from inventory table
this_row_price = str(engine.get_cell(conn, "price", this_row_category, "id", this_row_item_id)) #get quantity to be ordered
query = str("INSERT INTO " + item_id + " VALUES ('401" +
str(this_row_id) + "', '" +
str(this_row_category) + "', '" +
str(this_row_item_id) + "', '" +
str("1") + "', '" +
str(this_row_price) +
"');")
print(query) #debugging
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error populating itemization table for " + str(item_id) + ":" +
(Fore.RESET if cm else ""))
print(query)
print(error)
else:
grand_total = float(float(grand_total) + float(this_row_price))
#end adding to table
#add purchase to purchases table
try:
#get unique order id
while True:
this_purchase_id = random.randint(11111111, 99999999)
if this_purchase_id in [i[0] for i in cursor.execute("SELECT id FROM purchases;")]:
continue #if exists, try again
else:
break #if unique, move on
#
"""
From pruchases schema:
id;customer_id;store_id;cc_number;cc_expiration;itemization_id;grand_total;date
int;int;int;int;date;varchar(255);float(12,2);date
"""
query = str("INSERT INTO purchases VALUES ('" +
str(this_purchase_id) + "', '" +
str(customer_id) + "', '" +
str(user.store_id) + "', '" +
str(customer_cc) + "', '" +
str(customer_cc_exp) + "', '" +
str(item_id) + "', '" +
str(grand_total) + "', '" +
str(date.today()) +
"');")
for each_item in cart:
print("Buying item " +
str(engine.get_cell(conn, "name", "inventory", "id", each_item)) +
"...")
print(query) #debugging
cursor.execute(query)
except sqlite3.Error as error:
print((Fore.RED if cm else "") +
"Error populating puchases table for " + str(this_purchase_id) + ":" +
(Fore.RESET if cm else ""))
print(error)
print("\nGrand total for the purchase is:\n" + (Fore.GREEN if cm else "") +
"$" + str(round(grand_total,2)) + (Fore.RESET if cm else "") + "\n")
#end checkout() |
from db import db, ma
class ReviewModel(db.Model):
__tablename__ = 'Review'
review_id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(45), nullable=False)
comment = db.Column(db.String(2000), nullable=False)
rating = db.Column(db.Float(), nullable=False)
date = db.Column(db.DateTime())
cws_id = db.Column(db.Integer(), db.ForeignKey('CoworkingSpace.cws_id'), primary_key=True)
coworker_id = db.Column(db.Integer(), db.ForeignKey('Coworker.coworker_id'), primary_key=True)
def __init__(self, title,
comment,
rating,
cws_id,
coworker_id):
self.title = title
self.comment = comment
self.rating = rating
self.cws_id = cws_id
self.coworker_id = coworker_id
def save_to_db(self):
db.engine.execute("INSERT INTO `Review` (title, comment, rating, cws_id, coworker_id) VALUES (%s,%s,%s,%s,%s)",
(self.title, self.comment, self.rating, self.cws_id, self.coworker_id))
@classmethod
def get_all(cls):
return db.engine.execute("SELECT * FROM Review").fetchall()
@classmethod
def find_by_id(cls, review_id):
return db.engine.execute("SELECT * FROM Review WHERE review_id=%s", (review_id)).fetchone()
@classmethod
def find_by_cwsId_coworkerId(cls, cws_id, coworker_id):
return db.engine.execute("SELECT * FROM Review WHERE cws_id=%s AND coworker_id=%s", (cws_id, coworker_id)).fetchone()
@classmethod
def find_by_cwspace(cls, cws_id):
return db.engine.execute("SELECT * FROM Review WHERE cws_id=%s", (cws_id)).fetchall()
class ReviewSchema(ma.Schema):
class Meta:
fields = ('review_id', 'title', 'comment', 'rating', 'cws_id', 'coworker_id', 'date') |
from django.shortcuts import render, get_object_or_404, redirect
from .models import Question, Answer, Comment
from django.utils import timezone
from .forms import QuestionForm, AnswerForm, CommentForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator
from django.db.models import Q, Count
def index(request):
return render(request, 'pybo/index.html')
def profile(request):
return render(request, 'pybo/profile.html')
def board(request):
# 질문 목록
# 127.0.0.1:8000/pybo/board/?page=1
page = request.GET.get('page', '1') # 페이지
kw = request.GET.get('kw', '') # 검색어
so = request.GET.get('so', 'recent') # 정렬 기준
# 정렬
if so == 'recommend':
question_list = Question.objects.annotate( #num_voter : 임시 필드
num_voter=Count('voter')).order_by('-num_voter', '-create_date')
elif so == 'popular':
question_list = Question.objects.annotate( #num_answer : 임시 필드
num_answer=Count('answer')).order_by('-num_answer', '-create_date')
else: # recent
question_list = Question.objects.order_by('-create_date')
if kw:
question_list = question_list.filter(
Q(subject__icontains=kw) | # 제목 검색
Q(content__icontains=kw) | # 내용 검색
Q(author__username__icontains=kw) | # 질문 글쓴이
Q(answer__author__username__icontains=kw) |#답변 글쓴이
Q(answer__content__icontains=kw)
).distinct() # 중복제거
# 페이징 처리 -페이지당 10개씩 보여줌
paginator = Paginator(question_list, 10)
page_obj = paginator.get_page(page)
context = {'question_list': page_obj, 'page': page, 'kw': kw, 'so':so}
return render(request, 'pybo/question_list.html' ,context)
def detail(request, question_id):
#question = Question.objects.get(id=question_id)
question = get_object_or_404(Question, pk=question_id)
context ={'question' : question}
return render(request, 'pybo/question_detail.html', context)
@login_required(login_url='common:login') #'@'을 데코레이터라 함
def answer_create(request, question_id):
#답변 등록
question = get_object_or_404(Question, pk=question_id)
if request.method == 'POST':
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save(commit=False)
answer.author = request.user #인증된 사용자(글쓴이)
answer.question = question
answer.create_date = timezone.now()
answer.save()
return redirect('pybo:detail', question_id=question.id)
else:
form = AnswerForm()
context = {'question':question, 'form': form}
return render(request, 'pybo/question_detail.html', context)
@login_required(login_url='common:login')
def question_create(request):
# 질문 등록
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.author = request.user #인증된 사용자(글쓴이)
question.create_date = timezone.now()
question.save()
return redirect('pybo:board')
else: # request.method == 'GET'
form = QuestionForm()
context = {'form': form}
return render(request, 'pybo/question_form.html', context)
@login_required(login_url="common:login")
def question_modify(request, question_id):
# 질문 수정
question = get_object_or_404(Question, pk=question_id)
if request.user != question.author:
messages.error(request, "수정 권한이 없습니다.")
return redirect('pybo:detail', question_id=question.id)
if request.method == "POST":
form = QuestionForm(request.POST, instance=question)
if form.is_valid():
question = form.save(commit=False)
question.author = request.user
question.modify_date = timezone.now()
question.save()
return redirect('pybo:detail', question_id=question.id)
else:
form = QuestionForm(instance=question)
context = {'form': form}
return render(request, 'pybo/question_form.html', context)
@login_required(login_url='common:login')
def question_delete(request, question_id):
# 질문 삭제
question = get_object_or_404(Question, pk=question_id)
if request.user != question.author:
messages.error(request, "삭제 권한이 없습니다.")
return redirect('pybo:detail', question_id=question.id)
else:
question.delete()
return redirect('pybo:board')
@login_required(login_url='common:login')
def answer_modify(request, answer_id):
#답변 수정
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, "수정 권한이 없습니다.")
return redirect('pybo:detail', question_id=answer.question.id)
if request.method == "POST":
form = AnswerForm(request.POST, instance=answer)
if form.is_valid():
answer = form.save(commit=False)
answer.author = request.user
answer.modify_date = timezone.now()
answer.save()
return redirect('pybo:detail', question_id=answer.question.id)
else:
form = AnswerForm(instance=answer)
context = {'form': form}
return render(request, 'pybo/answer_form.html', context)
@login_required(login_url='common:login')
def answer_delete(request, answer_id):
# 답변 삭제
answer = get_object_or_404(Answer, pk=answer_id)
if request.user != answer.author:
messages.error(request, '삭제 권한이 없습니다.')
return redirect('pybo:detail', question_id=answer.id)
else:
answer.delete()
return redirect('pybo:detail', question_id=answer.question.id)
@login_required(login_url='common:login')
def vote_question(request, question_id):
# 질문 추천
question = get_object_or_404(Question, pk=question_id)
if request.user == question.author: #로그인한 사람 == 질문 글쓴이
messages.error(request, '본인이 작성한 글은 추천할 수 없습니다.')
else:
question.voter.add(request.user)
return redirect('pybo:detail', question_id=question.id)
@login_required(login_url='common:login')
def comment_create_question(request, question_id):
# 질문 댓글 등록
question = get_object_or_404(Question, pk=question_id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user # 댓글 글쓴이 = 로그인된 회원
comment.create_date = timezone.now() # 작성일
comment.question = question # 댓글
comment.save()
return redirect('pybo:detail', question_id=question.id)
else:
form = CommentForm()
context = {'form': form}
return render(request, 'pybo/comment_form.html', context)
@login_required(login_url='comment:login')
def comment_modify_question(request, comment_id):
#질문 댓글 수정
comment = get_object_or_404(Comment, pk=comment_id)
if request.method == "POST":
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(comment)
comment.author = request.user
comment.modify_date = timezone.now()
comment.save()
return redirect("pybo:detail", question_id=comment.question.id)
else:
form = CommentForm(instance=comment)
context = {'form':form}
return render(request, 'pybo/comment_form.html', context)
@login_required(login_url='comment:login')
def comment_delete_question(request, comment_id):
#질문 댓글 삭제
comment = get_object_or_404(Comment, pk=comment_id)
comment.delete()
return redirect('pybo:detail', question_id=comment.question.id)
@login_required(login_url='comment:login')
def vote_answer(request, answer_id):
#답변 추천 등록
@login_required(login_url='comment:login')
def comment_create_answer(request, answer_id):
#답변 댓글 등록
answer = get_object_or_404(Answer, pk=answer_id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.create_date = timezone.now()
comment.answer = answer
comment.save()
return redirect('pybo:detail', question_id=comment.answer.question.id)
else:
form = CommentForm()
context = {'form': form}
return render(request, 'pybo/comment_form.html', context)
@login_required(login_url='comment:login')
def comment_modify_answer(request, comment_id):
#답변 댓글 수정
comment = get_object_or_404(Comment, pk=comment_id)
if request.user != comment.author:
messages.error(request, '댓글수정권한이 없습니다')
return redirect('pybo:detail', question_id=comment.answer.question.id)
if request.method == "POST":
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.modify_date = timezone.now()
comment.save()
return redirect('pybo:detail', question_id=comment.answer.question.id)
else:
form = CommentForm(instance=comment)
context = {'form': form}
return render(request, 'pybo/comment_form.html', context)
@login_required(login_url='comment:login')
def comment_delete_answer(request, comment_id):
#답변 댓글 삭제
comment = get_object_or_404(Comment, pk=comment_id)
if request.user != comment.author:
messages.error(request, '댓글삭제권한이 없습니다')
return redirect('pybo:detail', question_id=comment.answer.question.id)
else:
comment.delete()
return redirect('pybo:detail', question_id=comment.answer.question.id)
def jqtest(request):
return render(request, 'pybo/jqtest.html')
def imgtest(request):
return render(request, 'pybo/imgtest.html')
def market(request):
return render(request, 'pybo/market.html')
def components(request):
return render(request, 'pybo/boot_components.html')
|
import torch
from torch import nn
def loss_function(d, d_hat):
# L2 = (d - d_hat).pow(2).mean()
L1 = nn.L1Loss()(d, d_hat).mean()
start = (d[:, 0] + d_hat[:, 0]) / 2
start = start.reshape(-1, 1, 8)
cosine_distance = 1 - nn.CosineSimilarity()(d - start, d_hat - start).mean()
return cosine_distance + L1
def error_loss_function(d, d_hat):
# L2 = (d - d_hat).pow(2).mean()
L1 = nn.L1Loss()(d, d_hat).mean()
start = (d[:, 0] + d_hat[:, 0]) / 2
start = start.reshape(-1, 1, 8)
return L1
|
# 从键盘输入一个字符串,将小写字母全部转换成大写字母,然后输出到一个磁盘文件"test"中保存。
inputOut = input("请输入:")
if inputOut:
with open("test", 'w', encoding="utf-8") as stream:
stream.write(inputOut.upper())
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 列表
# my_list = ['加油', '运动场', '图书馆', 777]
# len
# print(len(my_list))
# 索引
# print(my_list[1])
# 切片
# print(my_list[0: 2])
# for循环
# for item in my_list:
# print(item)
# 练习1
"""
name_list = ["詹姆斯", "韦德", "罗斯"]
for item, index in enumerate(name_list):
print(index, "===>", item)
"""
# append
"""
name_list = []
while True:
input_str = input("请输入您的姓名:")
name_list.append(input_str)
print(name_list)
"""
# insert
"""
name_list = ["詹姆斯", "韦德", "罗斯"]
name_list.insert(1, "科比")
print(name_list)
"""
# remove pop clear(清空)
"""
name_list = ["詹姆斯", "韦德", "罗斯"]
print(name_list.remove('韦德')) # 返回None 入参是删除的内容
print(name_list.pop(0)) # 返回删除的元素 入参是要删除的索引
print(name_list)
"""
# del
"""
name_list = ["詹姆斯", "韦德", "罗斯"]
del name_list[1]
print(name_list)
"""
"""
可变类型:list tuple
不可变类型:int bool string
"""
print(type((1,2,3)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.