text stringlengths 38 1.54M |
|---|
import sys
import os
import numpy as np
import scipy.misc
# data_path = "../data/preprocessed/rendered/train_msra_merged.npz"
data_path = "../data/train_mano_shape_new.npz"
data = np.load(data_path)
output_dir = "../data/plot_train_msra_merged/"
for idx, data in enumerate(data[data.files[1]]):
array = np.array(data[0])
filename = "{}.jpg".format(idx)
scipy.misc.imsave(output_dir + filename, array)
|
from flask import render_template, flash, url_for, request, redirect
from flask_login import login_user, login_required, logout_user, current_user
from sqlalchemy import or_, and_
from app import app, db
from app.models import User, Transactions
from app.forms import RegisterForm, LoginForm, TransactForm, SearchFilterForm
@app.route("/")
def home():
regform = RegisterForm()
logform = LoginForm()
if current_user.is_authenticated:
return redirect(url_for('user_dashboard'))
else:
return render_template('home.html', regform=regform, logform=logform)
@app.route("/login", methods=["GET", "POST"])
def login():
logform = LoginForm()
if request.method == "GET":
return redirect(url_for('home'))
else:
if current_user.is_authenticated:
return redirect(url_for('user_dashboard'))
if logform.validate_on_submit():
user = User.query.filter_by(phone=logform.phone.data).first()
if user is None or not user.check_password(logform.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=logform.remember_me.data)
return redirect(url_for("user_dashboard"))
return redirect(url_for('home'))
@app.route("/register", methods=["GET", "POST"])
def register():
regform = RegisterForm()
if request.method == 'GET':
return redirect(url_for('home'))
else:
if regform.validate_on_submit():
new = User(name=regform.name.data, phone=regform.phone.data, email=regform.email.data)
if regform.type.data == 'personal':
new.type = 0
elif regform.type.data == 'business':
new.type = 1
new.set_password(regform.password.data)
db.session.add(new)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
else:
flash("Form didn't validate")
return redirect(url_for('register'))
# TODO change user's url
# TODO user can still go to the login page using browser back button
@app.route("/user/dashboard", methods=["GET", "POST"])
@login_required
def user_dashboard():
transactform = TransactForm()
TransactQuery = Transactions.query.join(User, (Transactions.payer_id == User.id)).filter(or_(Transactions.payer_id == current_user.id, Transactions.payee_id == current_user.id)).order_by(Transactions.timestamp.desc())
Requests = Transactions.query.join(User, (Transactions.payer_id == User.id)).filter(and_(Transactions.done == 0, Transactions.payer_id == current_user.id)).order_by(Transactions.timestamp.desc())
# Add Money feature
if request.method == "POST" and transactform.addbutton.data:
current_user.money += int(transactform.add.data)
db.session.commit()
return redirect(url_for('user_dashboard'))
# Pay/Receive feature
if request.method == "POST":
if transactform.paybutton.data:
payee = User.query.filter_by(phone=transactform.payee.data).first()
transact = Transactions(payer_id=current_user.id, payee_id=payee.id, amount=int(transactform.pay.data))
current_user.money -= int(transactform.pay.data)
payee.money += int(transactform.pay.data)
transact.done = 1
db.session.add(transact)
if transactform.receivebutton.data:
payer = User.query.filter_by(phone=transactform.payer.data).first()
transact = Transactions(payer_id=payer.id, payee_id=current_user.id, amount=int(transactform.receive.data))
db.session.add(transact)
if 'accept' in request.form.values():
transact = Transactions.query.filter_by(id=request.form.get('request_id')).first()
payee = User.query.filter_by(id=transact.payee_id).first()
current_user.money -= int(transact.amount)
payee.money += int(transact.amount)
transact.done = 1
if 'decline' in request.form.values():
Transactions.query.filter_by(id=request.form.get('request_id')).delete()
db.session.commit()
return redirect(url_for('user_dashboard'))
# Request someone
if request.method == 'POST' and request.form['filter'] == 'string':
print('string request')
TransactQuery = Transactions.query.join(User, (Transactions.payer_id == User.id)).filter(or_(Transactions.payer_id == current_user.id, Transactions.payee_id == current_user.id)).filter(Transactions.timestamp.between(request.form.get('from'), request.form.get('to'))).order_by(Transactions.timestamp.desc())
if request.method == 'POST' and request.form['filter'] == 'date':
print('date request')
TransactQuery = Transactions.query.join(User, (Transactions.payer_id == User.id)).filter(or_(Transactions.payer_id == current_user.id, Transactions.payee_id == current_user.id)).filter(User.name.contains(request.form.get('search'))).order_by(Transactions.timestamp.desc())
return render_template("user.html", name=current_user.name, money=current_user.money, transactform=transactform,
TransactionsHistory=TransactQuery, current_user_id=current_user.id, type=current_user.type,
requests_pending=Requests)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
|
from flask import session, g, current_app, request
from flask_wtf import csrf
from .models import User
def init_app(app):
"""Register functions are executed before each request to app."""
# you can disable csrf default by `WTF_CSRF_CHECK_DEFAULT=False`
# and then selectively call protect() only when you need.
# @app.before_request
# def check_csrf():
# if not is_oauth(request):
# csrf.protect()
# load user into g.user
@app.before_request
def load_logged_in_user():
"""If a user id is stored in the session, load the user object from
the database into `g.user`."""
user_id = session.get('user_id', None)
if user_id is None:
g.user = None
else:
g.user = User.query.filter_by(id=user_id).first() |
import os
import time
import logging
import json
import subprocess
from .utils.bottle import Bottle, route, request, HTTPError
log = logging.getLogger(__name__)
class DummyAppController(Bottle):
"""
A dummy application controller which can just receive and save status messages from applications.
"""
def __init__(self, statusldir="."):
super(DummyAppController, self).__init__()
log.debug("Initializing DummyAppController.")
self.put("/application/<id>/status", callback=self.put_application_status)
self.post("/applications", callback=self.post_applications)
# Status log directory
self._statusldir = statusldir
os.makedirs(self._statusldir, exist_ok=True)
# File handlers for status logs of applications
self._apps_statusf = {}
def put_application_status(self, id):
log.debug("put_application_status(%s)" % id)
if not request.json:
error_no_json_content(request.path)
status = request.json
status['timestamp'] = time.time()
if id not in self._apps_statusf:
self._apps_statusf[id] = open(os.path.join(self._statusldir, "%s.json.csv" % id), "a")
self._apps_statusf[id].write(json.dumps(status) + "\n")
self._apps_statusf[id].flush()
return {'success': "Application status updated!"}
def post_applications(self):
log.debug("post_applications()")
# We are not doing anything here
return {'success': "Application registered."}
def error_no_json_content(route):
log.error("POST: {}: No JSON content received!".format(route))
raise HTTPError(status=400, body="No JSON content received!")
if __name__ == "__main__":
logconf = {'format': '[%(asctime)s.%(msecs)-3d: %(name)-16s - %(levelname)-5s] %(message)s', 'datefmt': "%H:%M:%S"}
logging.basicConfig(level=logging.DEBUG, **logconf)
import argparse
parser = argparse.ArgumentParser(description="SDN Controller Interface.")
parser.add_argument('-l', '--log', help="Folder where to store the application status logs.", default='.')
parser.add_argument('-b', '--bind', help="REST API bind IP.", default="0.0.0.0")
parser.add_argument('-p', '--port', help="REST API Port.", default=8080)
args = parser.parse_args()
restapi = DummyAppController(statusldir=args.log)
restapi.run(host=args.bind, port=args.port, debug=True)
|
#Exercise 2
#Write a program that categorizes each mail message by which day of the weel the commit was done.
#To do this look for lines that start with "From", then look for the third word and keep a running count of each of the days of the week.
#At the end of the program print out the contents of your dictionary (order does not matter)
fname=input('Enter a file name: ')
fhand=open(fname)
count=0
counts=dict()
for line in fhand:
if line.startswith('From'):
words=line.split() #split into items
if len(words)>3: #guarding for the line with more than 3 items in the line
day=words[2]
if day not in counts:
counts[day]=1
else:
counts[day]+=1 #summing up
print(counts)
|
import pytest
import offsetbasedgraph as obg
from graph_peak_caller.linear_filter import LinearFilter
def graph():
nodes = [9, 10, 11, 12, 13, 14]
nodes = {i: obg.Block(10) for i in nodes}
edges = {
9: [10],
10: [11, 12],
11: [13],
12: [13],
13: [14]}
return obg.Graph(nodes, edges)
@pytest.fixture
def indexed_interval():
start = obg.Position(10, 5)
end = obg.Position(13, 5)
return obg.IndexedInterval(
start, end, [10, 11, 13],
graph=graph())
def test_get_start_positions(indexed_interval):
positions = [obg.Position(10, 7),
obg.Position(11, 5),
obg.Position(-11, 5),
obg.Position(13, 2)]
linear_filter = LinearFilter(positions, indexed_interval)
start_positions = linear_filter.find_start_positions()
assert start_positions == {"+": [2, 10, 17],
"-": [10]}
|
#!/usr/bin/python3
import copy
noCard = -1
king = 12
queen = king - 1
jack = queen - 1
ten = jack - 1
ace = 0
suitChars = ['C', 'D', 'H', 'S', ]
pipsChars = ['-', 'A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', ]
def makeCard(suit, pips):
return suit * 13 + pips
def suit(card):
return card // 13
def pips(card):
return card % 13
def formatPips(suit, pips):
return pipsChars[pips + 1] + suitChars[suit]
def formatCard(card):
return formatPips( suit(card), pips(card) ) if card != noCard else '--'
def parseCard(cardStr):
if cardStr == '--': return noCard
try:
pips = pipsChars.index(cardStr[0]) - 1
suit = suitChars.index(cardStr[1])
return makeCard(suit, pips)
except:
print( cardStr[0], cardStr[1] )
raise
def parseDeck(deckStr):
deck = [parseCard(cardStr) for cardStr in deckStr.split()]
cards = set()
for d, card in enumerate(deck):
assert card not in cards, f"Duplicate card {formatCard(card)} at position {d+1}"
cards.add( card )
return deck
def isStacked( cascade ):
return len(cascade) > 1 and cascade[-1] == cascade[-2] - 1
def isKingStack( cascade ):
if not cascade: return False
prev = cascade[0]
if pips( prev ) != king: return False
for row, card in enumerate( cascade ):
if not row: continue
if prev != card + 1: return False
prev = card
return True
class Board:
def __init__(self, deck):
# How many suits were we given?
self._nsuits = suit(len(deck))
# Aces contains the highest pip number for that ace
self._foundations = self._nsuits * [ noCard ]
# Cells contains cards
self._cells = self._nsuits * [ noCard ]
self._firstFree = 0
# Columns contains the main board layout
self._tableau = [ [] for c in range(self._nsuits * 2) ]
for d in range(0, len(deck)):
self._tableau[d % len(self._tableau)].append(deck[d])
# We use a hash of the sorted cascades as a memento
# to avoid looping. Resorting is expensive, so we
# try to avoid it by setting a flag when the sort
# order becomes invalid. Since cards are unique,
# the sort order only changes when the first card
# in a cascade changes. We need to rehash whenever
# cards move, so that is a separate flag.
self._memento = None
self._resort = True
self._rehash = True
self._sorted = [cascade for cascade in self._tableau]
def __str__(self):
result = []
# Aces stacks across the top
result.append('Aces:')
row = []
for suit, pips in enumerate(self._foundations):
row.append( formatPips( suit, pips ) )
row.reverse()
result.append(' '.join(row))
result.append('')
# Columns in the middle
result.append('Table:')
rows = max([len(cascade) for cascade in self._tableau])
for r in range(rows):
row = []
for cascade in self._tableau:
row.append(formatCard(cascade[r] if r < len(cascade) else noCard))
result.append(' '.join(row))
result.append('')
# Cells at the bottom
result.append('Cells:')
row = [formatCard(card) for card in self._cells]
result.append(' '.join(row))
result.append('')
return '\n'.join(result)
def isFoundationIndex(self, idx):
return idx < 0
def indexOfFoundation(self, cardSuit):
return -cardSuit - 1
def foundationOfIndex(self, idx):
return -idx - 1
def isCellIndex(self, idx):
return idx >= len(self._tableau)
def indexOfCell(self, cell):
return cell + len(self._tableau)
def cellOfIndex(self, idx):
return idx - len(self._tableau)
def cardOfIndex(self, idx):
if self.isCellIndex( idx ) :
return self._cells[self.cellOfIndex( idx )]
elif self.isFoundationIndex( idx ):
cardSuit = foundationOfIndex( idx )
return makeCard( cardSuit, self._foundations[ cardSuit ] )
else:
return self._tableau[idx][-1]
def checkCards(self):
cards = set()
for cardSuit, topPips in enumerate(self._foundations):
for cardPips in range(topPips + 1):
cards.add(makeCard(cardSuit, cardPips))
for cell, card in enumerate(self._cells):
assert card not in cards, f"Duplicate card {formatCard(card)} in cell {cell}"
if card != noCard: cards.add(card)
for column, cascade in enumerate(self._tableau):
for row, card in enumerate(cascade):
assert card not in cards, f"Duplicate card {formatCard(card)} in cascade {column}, row {row}"
cards.add(card)
if len(cards) != self._nsuits * 13:
for card in range(0, self._nsuits * 13):
assert card in cards, f"Missing card {formatCard(card)}"
def moveCard(self, move, validate = False):
"""Move a card at the start location to the finish
location. Negative locations are the aces;
locations past the number of cascades are the
holding cells.
moveCard works in both directions so it can
be used to backtrack."""
start, finish = move
card = noCard
# From a foundation
if self.isFoundationIndex(start):
foundation = self.foundationOfIndex(start)
if validate:
assert foundation < len(self._foundations), f"Move from invalid foundation {foundation}"
assert self._foundations[foundation] != noCard, f"Move from empty foundation {foundation}"
cardPips = self._foundations[foundation]
self._foundations[foundation] = cardPips - 1
card = makeCard(foundation, cardPips)
# From a cell
elif self.isCellIndex(start):
cell = self.cellOfIndex(start)
if validate:
assert cell < len(self._cells), f"Move from invalid cell {cell}"
assert self._cells[cell] != noCard, f"Move from empty cell {cell}"
card = self._cells[cell]
self._cells[cell] = noCard
# Check whether this is now the first free cell
if self._firstFree > cell:
self._firstFree = cell
# From a cascade
else:
cascade = self._tableau[start]
if validate:
assert cascade, f"Move from empty cascade {start}"
card = cascade.pop()
if not cascade: self._resort = True
# To a foundation
if self.isFoundationIndex(finish):
foundation = self.foundationOfIndex(finish)
cardPips = pips(card)
if validate:
assert foundation < len(self._foundations), f"Move to invalid foundation {foundation}"
assert foundation == suit(card), f"Move of {formatCard(card)} to the wrong foundation {foundation}"
assert self._foundations[foundation] == cardPips - 1, f"Move of {formatCard(card)} to foundation {foundation} not onto previous card {formatCard(makeCard(foundation, self._foundations[foundation]))}"
self._foundations[foundation] = cardPips
# To a cell
elif self.isCellIndex(finish):
# Insert into cell
cell = self.cellOfIndex(finish)
if validate:
assert cell < len(self._cells), f"Move to invalid cell {cell}"
assert self._cells[cell] == noCard, f"Move to occupied cell {formatCard(self._cells[cell])}"
self._cells[cell] = card
# Update the first free cell
if cell == self._firstFree:
self._firstFree += 1
while self._firstFree < len(self._cells):
if self._cells[self._firstFree] == noCard:
break
self._firstFree += 1
# To a cascade
else:
cascade = self._tableau[finish]
if validate and cascade:
assert cascade[-1] == card + 1, f"Move of {formatCard(card)} to cascade {finish} not onto subsequent card {formatCard(cascade[-1])}"
if not cascade: self._resort = True
cascade.append(card)
# Need to rehash after moving
self._rehash = True
if validate:
self.checkCards()
return move
def backtrack(self, moves, validate = False):
"""Undoes a sequence of moves by executing them in reverse order."""
while moves:
finish, start = moves.pop()
self.moveCard((start, finish,), validate)
def moveToFoundations( self, validate = False ):
"""Move all cards that can cover aces.
Return a list of the moves.
This list should be treated as a single unit."""
moves = []
moved = len(moves) - 1
while moved != len(moves):
moved = len(moves)
for cell, card in enumerate(self._cells):
if card == noCard: continue
cardSuit = suit(card)
cardPips = pips(card)
# Can we remove it?
if self._foundations[cardSuit] == cardPips - 1:
start = self.indexOfCell(cell)
finish = self.indexOfFoundation(cardSuit)
moves.append( self.moveCard( (start, finish, ), validate ) )
for start, cascade in enumerate(self._tableau):
while cascade:
card = cascade[-1]
cardSuit = suit(card)
cardPips = pips(card)
# Can we remove it?
if self._foundations[cardSuit] != cardPips - 1: break
finish = self.indexOfFoundation(cardSuit)
moves.append( self.moveCard( (start, finish, ), validate ) )
return moves
def enumerateFinishCascades(self, start, card):
"""Enumerate all the finish cascades for a card."""
moves = []
fromCell = self.isCellIndex( start )
for finish, cascade in enumerate(self._tableau):
if start == finish: continue
if cascade:
under = cascade[-1]
# We can't stack a king on an ace because
# exposed aces are always removed first
if under == card + 1:
moves.append((start, finish,))
# Don't move between empty cascades - NOP
elif fromCell or len(self._tableau[start]) > 1:
moves.append((start, finish,))
return moves
def enumerateMoves(self):
"""Enumerate all the legal moves that can be made."""
# 3. Move from cascades to the first open cell
stacked_to_cell = [] # Stacked card to free cell
isolate_to_cell = [] # Isolate card to free cell
openCells = 0
if self._firstFree < len(self._cells):
for card in self._cells: openCells += ( card == noCard )
finish = self.indexOfCell(self._firstFree)
for start, cascade in enumerate(self._tableau):
if not cascade:
continue
elif isStacked( cascade ):
# If the stack is anchored on a king, don't move anything
if openCells >= len( cascade ) or not isKingStack( cascade ):
stacked_to_cell.append( (start, finish,) )
else:
isolate_to_cell.append( (start, finish,) )
# 2. Move from cells to cascades
cell_to_cascade = [] # Cell card to any cascade
for start, card in enumerate(self._cells):
if card != noCard:
cell_to_cascade.extend(self.enumerateFinishCascades(self.indexOfCell(start), card))
# 1. Move from cascades to cascades
stacked_to_open = [] # Stacked card to open cascade
isolate_to_cascade = [] # Isolate card to any cascade
for start, cascade in enumerate(self._tableau):
if cascade:
finishes = self.enumerateFinishCascades(start, cascade[-1])
if isStacked( cascade ):
if openCells >= len( cascade ) or not isKingStack( cascade ):
stacked_to_open.extend( finishes )
else:
isolate_to_cascade.extend( finishes )
# Build the list in reverse order
# because we will pop choices from the back.
moves = []
moves.extend( stacked_to_cell )
moves.extend( isolate_to_cell )
moves.extend( cell_to_cascade )
moves.extend( stacked_to_open )
moves.extend( isolate_to_cascade )
return moves
def memento(self):
"""Lazily compute the cached memento."""
# If the cascades are out of order, then re-sort them
if self._resort:
self._sorted.sort()
self._resort = False
self._rehash = True
# If cards moved, re-hash the sorted cascades
if self._rehash:
self._memento = hash( tuple(tuple(cascade) for cascade in self._sorted) )
self._rehash = False
return self._memento
def solved(self):
return sum(self._foundations) == self._nsuits * 12
def solve(self, callback = None, validate = False ):
"""Finds the first solution of the board using a depth first search.
If a callback is provided, it will be given the board, solution and visited hash set
and should return True to keep searching for shorter solutions, False to terminate."""
solution = []
# Search state
visited = set()
stack = []
history = []
# Move the aces
moves = self.moveToFoundations()
history.append(moves)
if self.solved(): solution = history
# Remember the starting position
visited.add(self.memento())
# Add the first level, if any
level = self.enumerateMoves()
if level: stack.append(level)
while stack:
# We always remove from the backs of lists
# to avoid copying
if stack[-1]:
moves = stack[-1]
move = moves.pop()
try:
self.moveCard( move, validate )
except:
print( move )
print( self )
raise
moves = [move,]
moves.extend(self.moveToFoundations())
history.append(moves)
tooLong = ( solution and len(solution) <= len(history) )
# Are we done?
terminated = callback and not callback(board=self, history=history, solution=solution, visited=visited)
if terminated or self.solved():
# Keep the shortest solution
if self.solved() and not tooLong:
solution = copy.deepcopy( history )
if not callback: terminated = True
if terminated: break
# Nowhere else to go
self.backtrack(history.pop())
continue
# Check whether we have been here before
memento = self.memento()
if memento in visited or tooLong:
# Abort this level if we have been here before
self.backtrack( history.pop(), validate )
else:
# Remember this position
visited.add(memento)
# Go down one level, if we can
level = self.enumerateMoves()
if level:
stack.append(level)
else:
self.backtrack( history.pop(), validate )
else:
# Go up one level
stack.pop()
# Back out the move
self.backtrack(history.pop())
# Final callback with empty history
if callback: callback(board=self, history=history, solution=solution, visited=visited)
# Empty stack => empty history
return solution
if __name__ == '__main__':
b = Board(range(0,52))
print(b)
print(b.solve())
|
data = input()
followers = {}
while not data == 'Log out':
tokens = data.split(": ")
if tokens[0] == 'New follower':
username = tokens[1]
if username not in followers:
followers[username] = [0, 0]
elif tokens[0] == 'Like':
username = tokens[1]
likes = int(tokens[2])
if username in followers:
followers[username][0] += likes
else:
followers[username] = [likes, 0]
elif tokens[0] == 'Comment':
username = tokens[1]
if username in followers:
followers[username][1] += 1
else:
followers[username] = [0, 1]
elif tokens[0] == 'Blocked':
username = tokens[1]
if username in followers:
del followers[username]
else:
print(f"{username} doesn't exist.")
data = input()
print(f"{len(followers)} followers")
for follower in sorted(followers.items(), key=lambda x: (-x[1][0], x[0])):
likes = follower[1][0]
comments = follower[1][1]
print(f"{follower[0]}: {likes + comments}")
|
__author__ = 'Danyang'
class Solution:
def minDistance(self, word1, word2):
m = len(word1)
n = len(word2)
d = [[-1 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(m+1):
d[i][0] = i
for j in xrange(n+1):
d[0][j] = j
for i in xrange(1, m+1):
for j in xrange(1, n+1):
if word1[i-1]==word2[j-1]:
d[i][j] = d[i-1][j-1]
else:
d[i][j]= min(
d[i-1][j]+1,
d[i][j-1]+1,
d[i-1][j-1]+1
)
return d[-1][-1]
|
##Rachael stage 0 team woese
def hamming_distance(string1, string2) :
distance = 0
L = len(string1)
for i in range(L) :
if string1[i] != string2[i] :
distance +=1
return distance
name = "Rachael"
email = "rachaelemman@gmail.com"
slack_name = "@Rachael"
biostack = "genomics"
twitter_handle = "@onionssssssss"
hamming_distance = hamming_distance("Rachael", "onionssssssss")
print(f"{name},{email},{slack_name},{biostack},{twitter_handle},{hamming_distance}") |
#!/usr/bin/python
def main():
size = input()
array = [ int(x) for x in raw_input().split() ]
targetsum = input()
for i in range(size):
for j in range(size):
arraysum = array[i] + array[j]
if arraysum == targetsum:
print True
exit()
else:
print False
main()
|
from sklearn.datasets import fetch_california_housing
california = fetch_california_housing() # bunch object
# print(california.DESCR)
# print(california.data.shape)
# print(california.target.shape)
# print(california.feature_names)
import pandas as pd
pd.set_option("precision", 4)
pd.set_option("max_columns", 9)
pd.set_option("display.width", None)
# creates the initial dataframe using the data in california.data and with the column names
# specified based on the features of the sample
california_df = pd.DataFrame(california.data, columns=california.feature_names)
# add a column to the dataframe for the median house value sstored in california.target
california_df["MedHouseValue"] = pd.Series(california.target)
# print(california_df.head()) # peek at first 5 rows
# using the describe method of dataframes we can get some statistical information
# print(california_df.describe())
# The keyword argument frac specifies the fraction of the data to select (0.1 for 10%),
# and the keyword argument random_state enables you to seed the random number generator,
# this allows you to reproduce the same 'randomly' selected rows
sample_df = california_df.sample(frac=0.1, random_state=17)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2)
sns.set_style("whitegrid")
for feature in california.feature_names:
plt.figure(figsize=(8, 4.5))
sns.scatterplot(
data=sample_df,
x=feature,
y="MedHouseValue",
hue="MedHouseValue",
palette="cool",
legend=False,
)
# plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
california.data, california.target, random_state=11
)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
linear_regression.fit(X=X_train, y=y_train)
# In Python, a for loop is usually written as a loop over an iterable obnject. This means
# Variable to access items in the iterable. Sometimes, though, you do want to have a variable
# each loop iteration. Rtaher than creating and incrementing a variable yourself, you can
# enumerate() to get a counter and the value from the iterable at the same time!
for i, name in enumerate(california.feature_names):
print(f"{name}: {linear_regression.coef_[i]}")
predicted = linear_regression.predict(X_test)
print(predicted[:5])
expected = y_test
print(expected[:5])
# values
df = pd.DataFrame()
df["Expected"] = pd.Series(expected)
df["Predicted"] = pd.Series(predicted)
print(df[:10])
# plot the data as a scatterplot with the expected(target)
# prices
import matplotlib.pyplot as plt2
figure = plt2.figure(figsize=(9, 9))
axes = sns.scatterplot(
data=df, x="Expected", y="Predicted", hue="Predicted", palette="cool", legend=False
)
# set the x- and y-axes' limits to use the same scale along both axes
start = min(expected.min(), predicted.min())
print(start)
end = max(expected.max(), predicted.max())
print(end)
axes.set_xlim(start, end)
axes.set_ylim(start, end)
line = plt2.plot([start, end], [start, end], "k--")
plt2.show() |
# -*- coding: utf-8 -*-
"""
Created on Thu May 07 18:32:47 2015
@author: Sarunya
"""
import pickle
from sctree import tree
pickleFile = open('20150507_164012.pic', 'rb')
root = pickle.load(pickleFile)
pickleFile.close()
#init the test tree
t=tree()
t.settree(root) |
#==============================================================================
# Lesson #36: Controlling loop flow with break continue and else
#==============================================================================
def main():
s = 'This is a string'
# Using "continue" to skip an iteration
for c in s:
if c == 's': continue # skipping all "s" letters
print(c, end='') # printing one letter at a time
print("---------------------------------------------------------")
# Using "break" to exit the loop completely
for c in s:
if c == 's': break
print(c, end='')
print("---------------------------------------------------------")
# Using "else" to add logic
for c in s:
if c == 's':
continue # skipping all "s" letters
print(c, end='') # printing one letter at a time
else:
print("EXTRA")
if __name__ == '__main__': main() |
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# /polls/
path('', views.index, name='index'),
path('about/', views.about, name='about'),
path('add_category/', views.add_category, name='add_category'),
path('category/<slug:category_name_slug>/', views.category, name='category'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
import sys
import matplotlib.pyplot as plt
from PIL import Image
def patch_savefig_for_argv_metadata():
savefig_old = plt.savefig
def savefig(*args, **kwargs):
savefig_old(metadata=dict(argv=" ".join(sys.argv)), *args, **kwargs)
plt.savefig = savefig
def print_fig_metadata(filename):
img = Image.open(filename)
print("file generated with:")
print()
print(img.info["argv"])
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("filename")
args = argparser.parse_args()
print_fig_metadata(args.filename)
|
# coding=utf-8
from __future__ import division
from matplotlib import pyplot as plt
from matplotlib import dates
import datetime,random,time
import numpy as np
print "---第二題---"
# lambda
pi = 3.14
r = 5
getRoundArea = lambda x: x*pi**2
print("pi * r ^ 2 = {} * {} ^ 2 = {}".format(pi, r, getRoundArea(r)))
print "---第三題---"
# random不重複
q2 = []
for i in range(3):
q2.append(np.arange(10, 20))
np.random.shuffle(q2[i])
for i,v in enumerate(q2):
print("[{}] = {}".format(i,v))
print "---第四題---"
# matplotlib畫圖
def string_to_date(date):
tmp = []
for i, d in enumerate(date):
ts = d.split("-")
tmp.append(datetime.date(int(ts[0]),int(ts[1]),int(ts[2])))
return tmp
date = ['2015-1-10', '2015-1-11', '2015-1-12', '2015-1-13', '2015-1-14','2015-1-15',
'2015-1-16', '2015-1-17', '2015-1-18', '2015-1-19', '2015-1-20']
temp = np.array([16.7,17.4,17.1,20.3,16.2,16.1,17.5,15.3,16.8,16,18.4])
plt.plot_date(string_to_date(date),temp,'r-',marker = '.')
plt.xlabel("Date")
plt.ylabel("Temperature")
plt.title("Taipei January Temperature\n40341120")
plt.show()
print("date = {}\ntemp = {}".format(string_to_date(date),temp))
print "---第五題---"
# 亂數矩陣相加
#random.seed(40341120)
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def make_matrix(num_rows, num_cols, entry_fn):
return [[entry_fn(i, j) for j in range(num_cols)]for i in range(num_rows)]
def random_number(i, j):
return random.randint(0,20)
def matrix_cal(A, B,L):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j):
return L(A[i][j] , B[i][j])
return make_matrix(num_rows, num_cols, entry_fn)
mAdd = lambda x, y: x+y
mMinus = lambda x, y : x-y
matrixA = make_matrix(2, 3, random_number)
matrixB = make_matrix(2, 3, random_number)
matrixC = make_matrix(2, 3, random_number)
print("matrixA = {}".format(matrixA))
print("matrixB = {}".format(matrixB))
print("matrixC = {}".format(matrixC))
print("matrixA - matrixB = {}".format(matrix_cal(matrixA,matrixB,mMinus)))
print("matrixA + matrixB + matrixC = {}".format(matrix_cal(matrix_cal(matrixA,matrixB,mAdd),matrixC,mAdd)))
print "---第六題---"
# 填空題
a1=0
a2=0
aboth=0
n=100000
def random_ball():
return random.choice(["B", "Y"])
random.seed(2)
for _ in range(n):
get1 = random_ball()
get2 = random_ball()
if get1 == "B":
a1 += 1
if get1 == "B" and get2 == "B":
aboth += 1
if get2 == "B":
a2 += 1
print "P(aboth):", aboth / n
print "P(get1): ", a1 / n
print "P(get2): ", a2 / n
print "P(get1,get2): ", a1 / n * a2 / n
print "P(get1|get2) = p(aboth)/p(get2): ", (aboth / n) / (a2 / n)
print "p(get1|get2)/p(get2) = p(get1)p(get2)/p((get2) = p(get1) : ", a1 / n
|
'''
Created on Jan 15, 2018
@author: VSENTH17
'''
import time
if __name__ == '__main__':
pass
def banner(message, border="-"):
line = border * len(message)
print(line)
print(message)
print(line)
banner("Norwegian Blue")
banner("Norwegian Blue","*")
banner(border=".", message="Hello from Earth")
#print(time.ctime())
def showDefault(arg=time.ctime()):
print(arg)
showDefault()
showDefault(arg=time.ctime())
print(time.ctime())
def add_spam(menu=[]):
menu.append("spam")
return menu
breakfast = ['bacon','eggs']
print(add_spam(breakfast))
lunch = ['baked beans']
print(add_spam(lunch))
print(add_spam())
print(add_spam())
def add_spamModified(menu=None):
if menu is None:
menu = []
menu.append("spam")
return menu
print(add_spamModified())
print(add_spamModified()) |
#第7回 演習1#
#わからない#
def my_abs(x):
if (x < 0):
x *= -1
return x
print(my_abs(5))
print(my_abs(-3)) |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Turku University (2018) Department of Future Technologies
# Foresail-1 / PATE Monitor / Middleware component
# API Command interface
#
# Command.py - Jani Tammi <jasata@utu.fi>
#
# 0.1.0 2018.10.12 Initial version.
#
import json
import logging
import sqlite3
from flask import g
from application import app
from . import InvalidArgument, NotFound
from . import DataObject
class Command(DataObject):
accepted_request_arguments = ('id')
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __missing__(self, key):
"""Return None if non-existing key is accessed"""
return None
def __init__(self, request):
"""Parses request arguments."""
self.cursor = g.db.cursor()
super().__init__(self.cursor, 'command')
try:
# build empty arg dictionary
self.args = self.DotDict()
for var in self.accepted_request_arguments:
setattr(self.args, var, None)
if request.args:
# Raise exception for request unsupported arguments
for key, _ in request.args.items():
if key not in self.accepted_request_arguments:
raise InvalidArgument(
"Unsupported argument '{}'".format(key)
)
try:
id = request.args.get('id', None)
except Exception as e:
# Replace with api.ApiException
raise InvalidArgument(
"Argument extraction failed!",
{'arguments' : request.args, 'exception' : str(e)}
) from None
self.args.id = int(id) if begin else None
except InvalidArgument:
raise
except Exception as e:
# Replace with api.ApiException
raise InvalidArgument(
"Parameter parsing failed!",
str(e)
) from None
#
# JSON, if any
#
app.logger.debug(request.json)
self.payload_json = request.json
def query(self):
"""Fetch request ONLY!"""
#
# Complain 'id' is missing, because we support fetch requests only
#
if not self.args.id:
raise InvalidArgument(
"Missing mandatory 'id' query parameter!",
"This interface supports only fetch-type requests, which require the command 'id' to be specified."
)
# SQL
self.sql = "SELECT * FROM command WHERE id = :id"
#
# Execute query
#
try:
self.cursor.execute(self.sql, self.args)
except:
app.logger.exception(
"Query failure! SQL='{}', args='{}'"
.format(self.sql, self.args)
)
raise
return self.cursor
def get(self):
"""Handle Fetch and Search requests."""
cursor = self.query()
result = cursor.fetchall()
if len(result) < 1:
raise NotFound(
"Specified command not found!",
"Provided command id '{}' does not match any in the database"
.format(self.args.id)
)
data = dict(zip([c[0] for c in cursor.description], result[0]))
if app.config.get("DEBUG", False):
return (
200,
{
"data" : data,
"query" : {
"sql" : self.sql,
"variables" : self.args,
"fields" : None
}
}
)
else:
return (200, {"data": data})
def post(self, interface, command):
"""Enter new command into the database."""
cmd2val = {
"SET VOLTAGE" : "voltage",
"SET CURRENT LIMIT" : "current_limit",
"SET POWER" : "power"
}
if command not in cmd2val:
# Programmer's error
raise ValueError(
"Unsupported command '{]'"
.format(command)
)
#
# Extract JSON parameters
#
if not self.payload_json:
raise InvalidArgument(
"This method requires a JSON payload!"
)
try:
value = self.payload_json.get(cmd2val[command])
except Exception as e:
raise InvalidArgument(
"Extracting JSON Parameter '{}' failed!"
.format(cmd2val[command]),
str(e)
) from None
# Active session_id provided by /api/__init__.py:DataObject()
self.sql = """
INSERT INTO command (
session_id,
interface,
command,
value
)
VALUES (
:session_id,
:interface,
:command,
:value
)
"""
bvars = {
"session_id" : self.session_id,
"interface" : interface,
"command" : command,
"value" : value
}
try:
cursor = g.db.cursor()
cursor.execute(self.sql, bvars)
g.db.commit()
except:
app.logger.exception(
"Insert failure! SQL='{}', bvars='{}'"
.format(self.sql, bvars)
)
raise
#
# Return
#
if app.config.get("DEBUG", False):
return (
202,
{
"command_id" : cursor.lastrowid,
"query" : {
"sql" : self.sql,
"variables" : bvars,
"fields" : None
}
}
)
else:
return (202, {"command_id": cursor.lastrowid})
# EOF |
"""
Given an unordered list of flights taken by someone, each represented as (origin, destination) pairs,
and a starting airport, compute the person's itinerary. If no such itinerary exists, return null.
If there are multiple possible itineraries, return the lexicographically smallest one. All flights must be used in the itinerary.
For example, given the list of flights [('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')] and
starting airport 'YUL', you should return the list ['YUL', 'YYZ', 'SFO', 'HKO', 'ORD'].
Given the list of flights [('SFO', 'COM'), ('COM', 'YYZ')] and starting airport 'COM', you should return null.
Given the list of flights [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'A')] and starting airport 'A',
you should return the list ['A', 'B', 'C', 'A', 'C'] even though ['A', 'C', 'A', 'B', 'C'] is also a valid itinerary.
However, the first one is lexicographically smaller.
"""
def getItinerary(flights, currentItinerary):
if not flights:
return currentItinerary
lastStop = currentItinerary[-1]
for i, (source, dest) in enumerate(flights):
flightsMinusCurrent = flights[:i] + flights[i+1:]
currentItinerary.append(dest)
if source == lastStop:
return getItinerary(flightsMinusCurrent, currentItinerary)
currentItinerary.pop()
return None
if __name__ == "__main__":
flights = [("HNL", "AKL"), ("YUL", "ORD"), ("ORD", "SFO"), ("SFO", "HNL")]
source = "YUL"
print(" -> ".join(getItinerary(flights, [source]))) |
"""Adds config flow for templatebinarysensor."""
from collections import OrderedDict
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.helpers import template as templater
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class TemplateBinarySensorFlowHandler(config_entries.ConfigFlow):
"""Config flow for templatebinarysensor."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self._errors = {}
async def async_step_user(self, user_input={}):
"""Handle a flow initialized by the user."""
self._errors = {}
if user_input is not None:
valid = await self._validate_template(user_input["template"])
if valid:
return self.async_create_entry(
title=user_input["name"], data=user_input
)
else:
self._errors["base"] = "template"
return await self._show_config_form(user_input)
return await self._show_config_form(user_input)
async def _show_config_form(self, user_input):
"""Show the configuration form to edit location data."""
# Defaults
name = ""
template = ""
device_class = ""
if user_input is not None:
if "name" in user_input:
name = user_input["name"]
if "template" in user_input:
template = user_input["template"]
if "device_class" in user_input:
device_class = user_input["device_class"]
data_schema = OrderedDict()
data_schema[vol.Required("name", default=name)] = str
data_schema[vol.Required("template", default=template)] = str
data_schema[vol.Optional("device_class", default=device_class)] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=self._errors
)
async def _validate_template(self, template):
"""Return true if template is valid."""
try:
templater.Template(template, self.hass).async_render()
return True
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error(exception)
pass
return False
|
#
# Copyright (c) 2019, TU/e Robotics, Netherlands
# All rights reserved.
#
# \author Janno Lunenburg
# System
import math
# ROS
import geometry_msgs.msg
import rospy
import tf2_ros
from tf2_pykdl_ros import VectorStamped
# noinspection PyUnresolvedReferences
import tf2_pykdl_ros
# noinspection PyUnresolvedReferences
import tf2_geometry_msgs
import smach
# TU/e Robotics
from ed.entity import Entity, Volume
import robot_smach_states.util.designators as ds
from robot_smach_states.human_interaction import Say
from robot_smach_states.world_model.world_model import Inspect
from robot_smach_states.utility import WriteDesignator
from robot_skills import get_robot_from_argv
from robot_skills.robot import Robot
from robot_skills.classification_result import ClassificationResult
from robocup_knowledge import load_knowledge
# Items with x- or y-dimension larger than this value will be filtered out
SIZE_LIMIT = 0.2
# Items with a ratio between x- and y-dimensions outside this value are considered 'faulty' segmentations
RATIO_LIMIT = 4.0
class InspectFurniture(smach.StateMachine):
def __init__(self, robot, furniture_designator, entity_designator, max_number_items=2):
# type: (Robot, object) -> None
"""
Drives to the designated furniture object, inspects this and selects the entity that will be pointed to
:param robot: (Robot) robot API object
:param furniture_designator: (EdEntityDesignator) designates the furniture object that was pointed to.
:param entity_designator: (EdEntityDesignator) writeable EdEntityDesignator
:param max_number_items: Max number of items in the entity designator
"""
# ToDo: we need to add userdata
smach.StateMachine.__init__(self,
outcomes=["succeeded", "failed"],
input_keys=["laser_dot"])
assert ds.is_writeable(entity_designator), "Entity designator must be writeable for this purpose"
object_ids_des = ds.VariableDesignator([], resolve_type=[ClassificationResult])
inspect_area_des = ds.VariableDesignator(resolve_type=str)
nav_area_des = ds.VariableDesignator(resolve_type=str)
common_knowledge = load_knowledge('common')
@smach.cb_interface(outcomes=["done", "failed"], input_keys=["laser_dot"])
def _select_inspect_area(userdata, robot, furniture_des, inspect_area_des):
def _ray_volume_check(ray_vs: VectorStamped, vol: Volume):
if vol.min_corner.z() <= ray_vs.vector.z() <= vol.max_corner.z():
return 0
else:
return min(abs(ray_vs.vector.z()-vol.min_corner.z()), abs(ray_vs.vector.z()-vol.max_corner.z()))
entity_to_inspect = furniture_des.resolve()
if entity_to_inspect is None:
inspect_area_des.write("on_top_of")
return "failed"
laser_dot = tf2_ros.convert(userdata["laser_dot"], VectorStamped)
laser_dot.header.stamp = rospy.Time()
laser_dot_entity_frame = robot.tf_buffer.transform(laser_dot, entity_to_inspect.uuid)
best_volumes = []
for vol_name, vol in entity_to_inspect.volumes.items():
if vol_name not in common_knowledge.get_inspect_areas(entity_to_inspect.uuid):
continue
best_volumes.append((vol_name, _ray_volume_check(laser_dot_entity_frame, vol)))
if not best_volumes:
rospy.loginfo("No volumes found from knowledge, using 'on_top_of'")
inspect_area_des.write("on_top_of")
return "failed"
best_volumes.sort(key=lambda tup: tup[1], reverse=False)
rospy.loginfo(f"Volumes: {best_volumes}")
best_vol = best_volumes[0]
rospy.loginfo(f"Using volume: {best_vol[0]}")
inspect_area_des.write(best_vol[0])
return "done"
@smach.cb_interface(outcomes=["done", "failed"])
def _select_nav_area(userdata, furniture_des, inspect_area_des, nav_area_des):
entity_to_inspect = furniture_des.resolve()
inspect_area = inspect_area_des.resolve()
if entity_to_inspect is None or not inspect_area:
rospy.loginfo("Using backup nav_area: 'in_front_of'")
nav_area_des.write("in_front_of")
return "failed"
nav_area = common_knowledge.get_inspect_position(entity_to_inspect.uuid, inspect_area)
rospy.loginfo(f"For {entity_to_inspect.uuid}:{inspect_area} selected '{nav_area}' to navigate")
nav_area_des.write(nav_area)
return "done"
with self:
smach.StateMachine.add("SAY_GO",
Say(robot, "Let's go to the {furniture_object}",
furniture_object=ds.AttrDesignator(furniture_designator, "uuid",
resolve_type=str)),
transitions={"spoken": "CLEAR_FOUND_ENTITY_DESIGNATOR"})
smach.StateMachine.add('CLEAR_FOUND_ENTITY_DESIGNATOR',
WriteDesignator(object_ids_des.writeable, []),
transitions={'written': 'SELECT_INSPECT_AREA'})
smach.StateMachine.add("SELECT_INSPECT_AREA", smach.CBState(_select_inspect_area,
cb_args=[robot,
furniture_designator,
inspect_area_des.writeable]),
transitions={"done": "SELECT_NAV_AREA",
"failed": "SELECT_NAV_AREA"})
smach.StateMachine.add("SELECT_NAV_AREA", smach.CBState(_select_nav_area,
cb_args=[furniture_designator,
inspect_area_des.writeable,
nav_area_des.writeable]),
transitions={"done": "INSPECT_FURNITURE",
"failed": "INSPECT_FURNITURE"})
smach.StateMachine.add("INSPECT_FURNITURE",
Inspect(robot=robot, entityDes=furniture_designator, searchArea=inspect_area_des,
objectIDsDes=object_ids_des, navigation_area=nav_area_des),
transitions={"done": "SELECT_ENTITY",
"failed": "SAY_INSPECTION_FAILED"}) # ToDo: fallback?
smach.StateMachine.add("SAY_INSPECTION_FAILED",
Say(robot, "I am sorry but I was not able to reach the {furniture_object}",
furniture_object=ds.AttrDesignator(furniture_designator, "uuid",
resolve_type=str)),
transitions={"spoken": "failed"})
@smach.cb_interface(outcomes=["succeeded", "no_entities"],
input_keys=["laser_dot"])
def select_entity(userdata):
"""
Selects the entity that the robot believes the operator has pointed to and that the robot will
identify later on.
Userdata contains key 'laser_dot' with value geometry_msgs.msg.PointStamped where the operator pointed
at.
:param userdata: (dict)
:return: (str) outcome
"""
assert userdata.laser_dot.header.frame_id.endswith("map"), "Provide your laser dot in map frame"
# Extract classification results
entity_ids = [cr.uuid for cr in object_ids_des.resolve()]
rospy.loginfo("Segmented entities: {}".format(entity_ids))
# Obtain all corresponding entities
all_entities = robot.ed.get_entities()
segmented_entities = [e for e in all_entities if e.uuid in entity_ids]
# Filter out 'unprobable' entities
candidates = []
for entity in segmented_entities: # type: Entity
# The following filtering has been 'copied' from the cleanup challenge
# It can be considered a first step but does not take the orientation of the convex hull into
# account
shape = entity.shape
size_x = max(shape.x_max - shape.x_min, 0.001)
size_y = max(shape.y_max - shape.y_min, 0.001)
if size_x > SIZE_LIMIT or size_y > SIZE_LIMIT:
continue
if not 1 / min(RATIO_LIMIT, 1000) <= size_x / size_y <= min(RATIO_LIMIT, 1000):
continue
candidates.append(entity)
# If no entities left: don't bother continuing
if not candidates:
rospy.logwarn("No 'probable' entities left")
return "no_entities"
candidates_distance = []
# Select entity closest to the point where the operator pointed at (i.e., closest in 2D)
closest_tuple = (None, None)
x_ref = userdata.laser_dot.point.x
y_ref = userdata.laser_dot.point.y
# ToDo: use sorting for this...
for e in candidates: # type: Entity
x_e = e.pose.frame.p.x()
y_e = e.pose.frame.p.y()
d_2d = math.hypot(x_ref - x_e, y_ref - y_e)
rospy.loginfo("Entity {} at {}, {}: distance = {}".format(e.uuid, x_e, y_e, d_2d))
candidates_distance.append((e, d_2d))
# if closest_tuple[0] is None or distance_2d < closest_tuple[1]:
# closest_tuple = (e, distance_2d)
candidates_distance.sort(key=lambda tup: tup[1], reverse=False)
best_candidate = candidates_distance[0]
rospy.loginfo("Best entity: {} at {}".format(best_candidate[0].uuid, best_candidate[1]))
entity_designator.write(list(list(zip(*candidates_distance))[0][:max_number_items]))
return "succeeded"
smach.StateMachine.add("SELECT_ENTITY",
smach.CBState(select_entity),
transitions={"succeeded": "succeeded",
"no_entities": "failed"})
if __name__ == "__main__":
rospy.init_node("test_furniture_inspection")
# Robot
_robot = get_robot_from_argv(index=1)
# Test data
furniture = ds.EdEntityDesignator(robot=_robot, uuid="kitchen_shelf")
entity_designator = ds.VariableDesignator(resolve_type=Entity)
ps = geometry_msgs.msg.PointStamped()
ps.header.frame_id = "map"
ps.point.x = 4.8
ps.point.y = 1.15
ps.point.z = 0.7
user_data = smach.UserData()
user_data["laser_dot"] = ps
sm = InspectFurniture(robot=_robot, furniture_designator=furniture, entity_designator=entity_designator.writeable)
sm.execute(user_data)
|
# -*-coding:utf-8 -*-
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dicts = {}
i = 0
Nums = iter(nums)
while True:
try:
num = next(Nums)
if num in dicts.keys():
return [dicts[num], i]
else:
dicts[target - num] = i
except StopIteration:
break
i += 1
print(i)
print(str(dicts))
return None
if __name__ == '__main__':
nums = [2, 7, 11, 15]
target = 9
obj = Solution()
rst = obj.twoSum(nums, target)
print(rst) |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = [
url(r'^media/', include('media.urls')),
url(r'^admin/', include(admin.site.urls)),
] |
import unittest
import os
import configparser
from pyramid import testing
from pyramid import request
from pyramid.registry import Registry
from sqlalchemy import (
create_engine
)
from sqlalchemy.orm import (
scoped_session,
sessionmaker
)
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
from sqlalchemy.orm.session import Session
from argux_server.rest.views.auth import RestAuthenticationViews
from argux_server.scripts import initializedb
class RestAuthViewsTests(unittest.TestCase):
def setUp(self):
config_file = os.environ['ARGUX_CONFIG']
config = configparser.ConfigParser()
config.read(config_file)
settings = config['app:main']
from argux_server import main
app = main({}, **settings)
from webtest import TestApp
self.testapp = TestApp(app)
def tearDown(self):
del self.testapp
def test_login(self):
"""
"""
# r = request.Request.blank(
# path='/argux/rest/1.0/login')
resp = self.testapp.post_json(
'/rest/1.0/login',
dict(username='admin',password='admin'))
self.assertEquals(resp.status_int, 200)
|
import numpy as np
import matplotlib.pyplot as plt
##%matplotlib inline
def draw(x1,x2):
ln=plt.plot(x1,x2)
def sigmoid(score):
return 1/(1+np.exp(-score))
##produces probability based off linear combination of every point
n_pts=100
np.random.seed(0)
bias= np.ones(n_pts)
top_region=np.array([np.random.normal(10,2,n_pts), np.random.normal(12,2,n_pts), bias]).T
bottom_region= np.array([np.random.normal(5,2, n_pts), np.random.normal(6,2, n_pts), bias]).T
all_points=np.vstack((top_region, bottom_region))
## joins botto array to top array
#arbitary values
w1=-0.2
w2=-0.35
b=3.5
line_paramters = np.matrix([w1,w2,b]).T
x1 = np.array([bottom_region[:,0].min(), top_region[:,0].max()])
x2 = -b/w2 + (x1*(-w1/w2))
##x1 and x2 are arrays of (x,y) that hold max and min co-ord of x and y values
##the line should have
linear_combination= all_points*line_paramters
#matrix multiplication
probabilities= sigmoid(linear_combination)
print(probabilities)
_, ax= plt.subplots(figsize=(4,4))
ax.scatter(top_region[:,0], top_region[:,1], color='r')
ax.scatter(bottom_region[:,0], bottom_region[:,1], color='b')
draw(x1,x2)
plt.show()
|
lis = [1, 2, 3, 4]
lis - [1]
lis.pop(i)
data = (
"this is a string", [1, 2, 3, 4], ("more tuples",
1.0, 2.3, 4.5), "this is yet another string"
)
print data
import pprint
pprint.pprint(data)
i = "a"
j = "b"
k = "c"
set(i + j + k)
%hist -f "美观打印和集合的操作.py"
|
#!/usr/bin/env python3
import json
import sys
if len(sys.argv) > 1:
if sys.argv[1] == 'supports':
# sys.argv[2] is the renderer name
sys.exit(0)
katex = '\n' + open('../.cargo/katex-header.html', 'r').read()
context, book = json.load(sys.stdin)
def fix(items):
for section in items:
if 'Chapter' in section:
section['Chapter']['content'] += katex
fix(section['Chapter']['sub_items'])
fix(book['sections'])
json.dump(book, sys.stdout)
|
{
'variables': {'module_root%': '<!(node -p \"process.cwd()\")'},
'targets': [{
'target_name': 'dependencies',
'type': 'none',
'hard_dependency': 1,
'actions': [{
'action_name': 'prepare_dependencies',
'inputs': ['<(module_root)/scripts/node/dependencies.js'],
'outputs': ['<(module_root)/dependencies'],
'action': [
'node', '<(module_root)/scripts/node/dependencies.js'
],
'message': 'Prepare dependencies'
}]
}, {
'target_name': 'enigma',
'dependencies': [
'dependencies'
],
'sources': [
'dependencies/ed25519/src/add_scalar.c',
'dependencies/ed25519/src/fe.c',
'dependencies/ed25519/src/ge.c',
'dependencies/ed25519/src/key_exchange.c',
'dependencies/ed25519/src/keypair.c',
'dependencies/ed25519/src/sc.c',
'dependencies/ed25519/src/seed.c',
'dependencies/ed25519/src/sha512.c',
'dependencies/ed25519/src/sign.c',
'dependencies/ed25519/src/verify.c',
'bindings/node/ed25519.cc',
'bindings/node/rsa.cc',
'bindings/node/init.cc'
],
'include_dirs': [
'<!@(node -p \"require(\'node-addon-api\').include\")',
'dependencies/ed25519/src'
],
'defines': [
'NAPI_DISABLE_CPP_EXCEPTIONS'
],
'conditions': [
['OS=="mac"', {
'include_dirs': ['<(module_root)/dependencies/openssl/include'],
'libraries': ['-Wl,<(module_root)/dependencies/openssl/libcrypto.a'],
'defines': [
'CFLAGS=<!(node -p \"require(\'process\').env.MACOS_DEPLOYMENT_TARGET\") ',
'CPPFLAGS=<!(node -p \"require(\'process\').env.MACOS_DEPLOYMENT_TARGET\") ',
'LDFLAGS=<!(node -p \"require(\'process\').env.MACOS_DEPLOYMENT_TARGET\") '
]
}],
['OS=="linux"', {
'include_dirs': ['<(module_root)/dependencies/openssl/include'],
'libraries': ['-L<(module_root)/dependencies/openssl', '-l:libcrypto.a', '-static-libstdc++']
}],
['OS=="win"', {
'include_dirs': ['<(module_root)/dependencies/openssl/include'],
'libraries': ['-l<(module_root)/dependencies/openssl/libcrypto.lib']
}]
]
}]
}
|
'''
@author: youxinyu
@date: 2018-8-30
'''
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def insert(self, data):
if self.data == None:
self.data = data
if data <= self.data:
if self.left != None:
self.left.insert(data)
else:
self.left = Node(data)
else:
if self.right != None:
self.right.insert(data)
else:
self.right = Node(data)
def contains(self, data):
if self.data == None:
return False
if self.data == data:
return True
if data <= self.data:
if self.left != None:
if self.left.data == data:
print('true')
print(data)
return True
else:
return self.left.contains(data)
else:
if self.right != None:
if self.right.data == data:
return True
else:
return self.right.contains(data)
def inOrderPrint(self):
if self.data == None:
print('None')
else:
if self.left != None: self.left.inOrderPrint()
print(self.data)
if self.right != None: self.right.inOrderPrint()
def preOrderPrint(self):
if self == None:
print('None')
else:
print(self.data)
if self.left != None: self.left.inOrderPrint()
if self.right != None: self.right.inOrderPrint()
def postOrderPrint(self):
if self == None:
print('None')
else:
if self.left != None: self.left.inOrderPrint()
if self.right != None: self.right.inOrderPrint()
print(self.data)
def main():
bst = Node(1)
bst.insert(10)
bst.insert(5)
bst.insert(7)
# print(bst.contains(5))
bst.inOrderPrint()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FileName : identity_check.py
# Author : wuqingfeng@
import zmq
import zhelpers
if __name__ == '__main__':
context = zmq.Context()
sink = context.socket(zmq.ROUTER)
sink.bind("inproc://example")
anonymous = context.socket(zmq.DEALER)
anonymous.connect("inproc://example")
anonymous.send_multipart([b"", b"ROUTER use a generated 5 byte identity"])
zhelpers.dump(sink)
identified = context.socket(zmq.DEALER)
identified.setsockopt(zmq.IDENTITY, b'PEER2')
identified.connect("inproc://example")
identified.send_multipart([b"", b"ROUTE socket use REQ's socket identity"])
zhelpers.dump(sink)
|
"""
报数指的是,按照其中的整数的顺序进行报数,然后得到下一个数。如下所示:
1, 11, 21, 1211, 111221, ...
1 读作 "one 1" -> 11.
11 读作 "two 1s" -> 21.
21 读作 "one 2, then one 1" -> 1211.
给定一个整数 n, 返回 第 n 个顺序。
注意事项
整数的顺序将表示为一个字符串。
"""
class Solution:
# @param {int} n the nth
# @return {string} the nth sequence
def countAndSay(self, n):
# Write your code here
#获取报数值
#报数
return self.getNumber(n)
def getNumber(self,n):
if n==1:
return 1
else:
value=str(self.getNumber(n-1))
if value=="1":
return 11
index=0
count=1
result=[]
while index<len(value)-1:
if value[index]==value[index+1]:
count+=1
index+=1
else:
result.append(str(count)+str(value[index]))
count=1
index+=1
result.append(str(count)+str(value[index]))
print(result)
return int("".join(result))
s=Solution()
for x in range(1,10):
print(s.countAndSay(x))
print()
|
# http://www.geeksforgeeks.org/count-palindromic-subsequence-given-string/
def count_palindromes(input_str, i, j):
if i < 0 or j < 0 or i >= len(input_str):
return 0
if i == j:
return 1
if input_str[i] == input_str[j]:
return count_palindromes(input_str, i + 1, j) + count_palindromes(input_str, i, j - 1) + 1
else:
return count_palindromes(input_str, i, j - 1) + \
count_palindromes(input_str, i + 1, j) - \
count_palindromes(input_str, i + 1, j - 1)
print(count_palindromes('abc', 0, 2)) |
import json
class Account:
def __init__(self, account_details: dict):
self.__account_details = account_details
self.account_id = account_details["account_id"]
self.update_timestamp = account_details["update_timestamp"]
self.account_type = account_details["account_type"]
self.display_name = account_details["display_name"]
self.currency = account_details["currency"]
self.account_number = account_details["account_number"]["number"]
self.swift_bic = account_details["account_number"]["swift_bic"]
self.sort_code = account_details["account_number"]["sort_code"]
self.provider_name = account_details["provider"]["display_name"]
self.logo_uri = account_details["provider"]["logo_uri"]
self.provider_id = account_details["provider"]["provider_id"]
self.transactions = None
def __str__(self):
return json.dumps(self.__account_details, indent=2)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-31 13:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('petition', '0024_pytitionuser_petition_templates'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='default_template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='petition.PetitionTemplate', verbose_name='Default petition template'),
),
migrations.AlterField(
model_name='petitiontemplate',
name='id',
field=models.IntegerField(db_index=True, primary_key=True, serialize=False, unique=True),
),
migrations.AlterField(
model_name='petitiontemplate',
name='name',
field=models.CharField(db_index=True, max_length=50, unique=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='pytitionuser',
name='default_template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='petition.PetitionTemplate', verbose_name='Default petition template'),
),
migrations.AlterField(
model_name='templateownership',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='petition.PetitionTemplate'),
),
]
|
sum = 0
element = int(input())
while element != 0:
if element % 2 == 0:
sum += 1
element = int(input())
print(sum)
|
n=int(input("Nhap so:"))
result = 0
for i in range (1,n+1):
result = result + 1/i
print (result)
|
import itertools
import sys
from sys import stdin, stdout
import functools
def f(fa, fb, fc, fd):
#
fd[fb] = -1
fc = fc + [fb]
#print fa, fd
#print fc, fd
if fa[fb] not in fc:
return f(fa, fa[fb], fc, fd)
elif fa[fb] == fc[0]:
return len(fc)
elif fa[fb] == fc[-2]:
return len(fc) + 1 * (fc[0] in fd) + 1 * (fc[-1] in fd)
else:
return len(fc) + 1 * (fc[0] in fd) + 1 * (fc[-1] in fd) - 1
#else:
#print fc, fd
#return len(fc) + 1 * (fc[0] in fd) + 1 * (fc[-1] in fd)
def g(ga, gb, gc):
a = stdin.readlines()
a = [[ int(x) for x in u.strip().split() if x != '' ] for u in a ]
for i in range(1, 1 + int(a[0][0])):
ca = a[2 * i - 1][0]
cb = [ x - 1 for x in a[2 * i][:]]
#print cb
cmax = 0
for j in range(ca):
cc = []
cd = cb[:]
temp = f(cb, j, cc, cd)
if temp > cmax:
cmax = temp
if cmax > ca:
cmax = ca
print "Case #{}:".format(i), cmax
|
def addition(x,y):
z=a+b
return print(z)
a=input("enter input1: ")
b=input("enter input2: ")
a=int(a)
b=int(b)
addition(a,b)
def Celcius_to_fah(p):
f=(9/5)*c+32
return print(f)
c=input("enter the temp in celcius: ")
c=int(c)
Celcius_to_fah(c)
|
import numpy as np
from sigmoid import sigmoid
def sigmoidGradient(z):
#SIGMOIDGRADIENT returns the gradient of the sigmoid function
#evaluated at z
# g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
# evaluated at z. This should work regardless if z is a matrix or a
# vector. In particular, if z is a vector or matrix, you should return
# the gradient for each element.
g = np.zeros([np.size(z, 0), np.size(z, 1)])
g = sigmoid(z) * (1.0 - sigmoid(z))
return g |
from django.core.management.base import BaseCommand
from page_muncher.quick_test import test_quick
class Command(BaseCommand):
help = 'Django quick test'
def handle(self, *args, **options):
try:
self.stdout.write('Running quick test ...')
test_quick()
self.stdout.write(self.style.SUCCESS('Successfully done'))
except:
self.stdout.write('*** Quick test died ***')
|
# coding: utf-8
# __author__ = qiuxuan.lin
import cv2
import numpy as np
from sklearn import mixture
from sklearn import decomposition
# from IPython import embed
# import matplotlib.pyplot as plt
def proba_gmm(Z, K = '' , type = ''):
g = mixture.GMM(n_components=K, covariance_type= type) # tried 'full' or 'spherical' no improvememts
g.fit(Z)
center = g.means_ # centroid RGB values
center = np.uint8(center)
proba = g.predict_proba(Z)
return center, proba, g
|
import sys,pygame
size = width, height = 350, 175
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Joystick Tester")
background_image = pygame.image.load('image.png').convert_alpha()
while True:
for events in pygame.event.get():
if events.type == pygame.QUIT:
pygame.quit()
sys.exit()
screen.fill([255, 255, 255])
screen.blit(background_image, (0, 0))
pygame.display.flip()
|
'''
Python 2.7
The clustergrammer python module can be installed using pip:
pip install clustergrammer
or by getting the code from the repo:
https://github.com/MaayanLab/clustergrammer-py
'''
from clustergrammer import Network
net = Network()
# load matrix tsv file
net.load_file('../data_mats/df_predict_merge.txt')
net.set_cat_color('row', 1, 'virus: chik', 'blue')
net.set_cat_color('row', 1, 'virus: zika', 'red')
net.cluster(enrichrgram=False)
# transfer colors from original to predicted categories
########################################################
# make category colors the same for Chik groups
for inst_cat in net.viz['cat_colors']['row']['cat-1']:
new_cat = inst_cat.replace('original','predict')
inst_color = net.viz['cat_colors']['row']['cat-1'][inst_cat]
net.set_cat_color('row', 3, new_cat, inst_color)
net.cluster(enrichrgram=False)
# write jsons for front-end visualizations
net.write_json_to_file('viz', 'json/mult_view.json', 'indent')
# net.write_json_to_file('sim_row', 'json/mult_view_sim_row.json', 'no-indent')
# net.write_json_to_file('sim_col', 'json/mult_view_sim_col.json', 'no-indent')
|
import itertools
from typing import Dict
from typing import List
from typing import Optional
from allennlp.data.token_indexers.token_indexer import IndexedTokenList
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.vocabulary import Vocabulary
@TokenIndexer.register("tiny_single_id")
class SingleIdTokenIndexer(TokenIndexer):
"""Tiny implementation of SingleIdTokenIndexer.
This class is based on allennlp SingleIdTokenIndexer.
https://github.com/allenai/allennlp/blob/master/
allennlp/data/token_indexers/single_id_token_indexer.py
"""
def __init__(
self,
lowercase_tokens: bool = False,
start_tokens: Optional[List[str]] = None,
end_tokens: Optional[List[str]] = None,
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
self.lowercase_tokens = lowercase_tokens
self._start_tokens = [Token(st) for st in (start_tokens or [])]
self._end_tokens = [Token(et) for et in (end_tokens or [])]
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]) -> None:
text = token.text
assert isinstance(text, str)
if self.lowercase_tokens:
text = text.lower()
counter["tokens"][text] += 1
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
indices: List[int] = []
for token in itertools.chain(self._start_tokens, tokens, self._end_tokens):
text = token.text
assert isinstance(text, str)
if self.lowercase_tokens:
text = text.lower()
indices.append(vocabulary.get_token_index(text, "tokens"))
return {"tokens": indices}
def get_empty_token_list(self) -> IndexedTokenList:
return {"tokens": []}
|
'''
풀이
'''
from collections import deque
N,K = map(int,input().split())
MAX = 100000+1
def bfs(N,K):
visited = [0]*(MAX)
visited[N] = 1
time = 0
min_time = MAX
cnt= 0
q = deque([[N,time]])
while q:
x,t = q.popleft()
# print("x,t,cnt : ",x,t,cnt)
visited[x] = 1
if t > min_time:
continue
if x==K:
if min_time == t:
cnt+=1
elif min_time == MAX:
min_time = t
cnt+=1
else:
for nx in [x-1,x+1,2*x]:
if 0> nx or nx>=MAX:
continue
if visited[nx]:
continue
q.append([nx,t+1])
return min_time,cnt
if N >= K:
print(N-K)
print(1)
else:
min_time, ways =bfs(N,K)
print(min_time)
print(ways)
|
import os
import random
import aiofiles
import yaml
# import pyz3r
from alttprbot import models
from alttprbot.database import config
from alttprbot.exceptions import SahasrahBotException
from alttprbot_discord.util.alttpr_discord import ALTTPRDiscord
from alttprbot_discord.util.alttprdoors_discord import AlttprDoorDiscord
from alttprbot_discord.util.sm_discord import SMDiscord, SMZ3Discord
# SMZ3_ENVIRONMENTS = {
# 'live': {
# 'smz3': 'https://samus.link',
# 'sm': 'https://sm.samus.link',
# },
# 'beta': {
# 'smz3': 'https://beta.samus.link',
# 'sm': 'https://sm.beta.samus.link',
# },
# 'alpha': {
# 'smz3': 'https://alpha.samus.link',
# 'sm': 'https://sm.beta.samus.link',
# }
# }
class PresetNotFoundException(SahasrahBotException):
pass
# this is until I port the existing code over to the new fetch_preset and generate_preset coroutines, or do something else that isn't as terrible
async def get_preset(preset, hints=False, nohints=False, spoilers="off", tournament=True, randomizer='alttpr', allow_quickswap=False):
preset_dict = await fetch_preset(preset, randomizer)
seed = await generate_preset(preset_dict, preset=preset, hints=hints, nohints=nohints, spoilers=spoilers, tournament=tournament, allow_quickswap=allow_quickswap)
return seed, preset_dict
async def fetch_preset(preset, randomizer='alttpr'):
preset = preset.lower()
# make sure someone isn't trying some path traversal shennaniganons
basename = os.path.basename(f'{preset}.yaml')
try:
async with aiofiles.open(os.path.join(f"presets/{randomizer}", basename)) as f:
preset_dict = yaml.safe_load(await f.read())
if preset_dict.get('festive') and not await config.get(0, 'FestiveMode') == "true":
raise PresetNotFoundException(
f'Could not find preset {preset}. See a list of available presets at https://sahasrahbot.synack.live/presets.html')
except FileNotFoundError as err:
raise PresetNotFoundException(
f'Could not find preset {preset}. See a list of available presets at https://sahasrahbot.synack.live/presets.html') from err
if preset_dict.get('customizer', False) and preset_dict.get('randomizer', 'alttpr') and not preset_dict.get('doors', False):
if 'l' not in preset_dict['settings']:
preset_dict['settings']['l'] = {}
for i in preset_dict.get('forced_locations', {}):
location = random.choice([l for l in i['locations'] if l not in preset_dict['settings']['l']])
preset_dict['settings']['l'][location] = i['item']
return preset_dict
async def generate_preset(preset_dict, preset=None, hints=False, nohints=False, spoilers="off", tournament=True, allow_quickswap=False):
randomizer = preset_dict.get('randomizer', 'alttpr')
settings = preset_dict['settings']
if randomizer == 'alttpr':
if preset_dict.get('doors', False):
if hints:
settings['hints'] = 'on'
elif nohints:
settings['hints'] = 'off'
if allow_quickswap:
settings['quickswap'] = True
seed = await AlttprDoorDiscord.create(
settings=settings,
spoilers=spoilers == "on"
)
hash_id = seed.hash
else:
if hints:
settings['hints'] = 'on'
elif nohints:
settings['hints'] = 'off'
settings['tournament'] = tournament
settings['spoilers'] = spoilers
if not 'allow_quickswap' in settings:
settings['allow_quickswap'] = allow_quickswap
if preset_dict.get('customizer', False):
endpoint = "/api/customizer"
elif preset_dict.get('festive', False):
endpoint = "/api/festive"
else:
endpoint = "/api/randomizer"
seed = await ALTTPRDiscord.generate(
settings=settings,
endpoint=endpoint
)
hash_id = seed.hash
# elif randomizer == 'alttprdoors':
elif randomizer in ['sm']:
settings['race'] = "true" if tournament else "false"
seed = await SMDiscord.create(
settings=settings,
)
hash_id = seed.slug_id
elif randomizer in ['smz3']:
settings['race'] = "true" if tournament else "false"
seed = await SMZ3Discord.create(
settings=settings,
)
hash_id = seed.slug_id
else:
raise SahasrahBotException(f'Randomizer {randomizer} is not supported.')
await models.AuditGeneratedGames.create(
randomizer=randomizer,
hash_id=hash_id,
permalink=seed.url,
settings=settings,
gentype='preset',
genoption=preset,
customizer=1 if preset_dict.get('customizer', False) else 0
)
return seed
|
a=int(input(""))
x=[int(a) for a in input("").split(" ")]
l=len(x)
for i in range(l):
min=x[0]
if x[i]<min:
min=x[i]
print(min)
|
import json
from collections import Counter
dataset_path = "E:/data/caption_datasets/"
dataset_name = "dataset_coco.json"
train_path = []#restval
test_path = []
val_path = []
train_tokes = []
test_tokes = []
val_tokes = []
words_count = Counter()
def read_dataset_json():
f = open(dataset_path+dataset_name,'r')
content = f.read()
json_dic = json.loads(content)
for i in range(len(json_dic["images"])):
image = json_dic["images"][i]
alist = []
if image['split']=='train' or image['split']=='restval':
train_path.append(image['filepath'] + '/' + image['filename'])
for j in range(len(image['sentences'])):
alist.append(image['sentences'][j]['tokens'])
train_tokes.append(alist)
elif image['split'] == 'test':
test_path.append(image['filepath'] + '/' + image['filename'])
for j in range(len(image['sentences'])):
alist.append(image['sentences'][j]['tokens'])
test_tokes.append(alist)
else:
val_path.append(image['filepath'] + '/' +image['filename'])
for j in range(len(image['sentences'])):
alist.append(image['sentences'][j]['tokens'])
val_tokes.append(alist)
for k in range(len(alist)):
words_count.update(alist[k])
return words_count
def set_wordmap():
words = [w for w in words_count.keys()]
word_map = {k: v + 1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map)+1
word_map['<start'] = len(word_map)+1
word_map['<end>'] = len(word_map)+1
word_map['<pad>'] = 0
with open("E:\data\mycaption_data\show_attention_tell\wordmap.json", "w") as f:
json.dump(word_map, f)
print("写文件完成...")
read_dataset_json()
set_wordmap() |
import FWCore.ParameterSet.Config as cms
hltPhase2PixelFitterByHelixProjections = cms.EDProducer("PixelFitterByHelixProjectionsProducer",
scaleErrorsForBPix1 = cms.bool(False),
scaleFactor = cms.double(0.65)
)
|
def count_doubles(seq):
result = 0
for i in range(0, len(seq)):
# INVARIANT: result contains the number of doubles that are found for seq for the values in interval [0, i[ of seq
for j in range(i+1, len(seq)):
# INVARIANT: the outer invariant holds and result also contains the number of doubles for the i-th item of seq in interval [i+1,j[
if seq[i] == seq[j]:
result += 1
return result
assert count_doubles([]) == 0
assert count_doubles([1, 1]) == 1
assert count_doubles([1, 1, 1]) == 3
assert count_doubles([2, 2, 1, 2, 2]) == 6
assert count_doubles([4, 6, 3, 2, 4, 1, 8, 3, 8]) == 3
### Time complexity:
# The outer loop executes O(n) times
# The inner loop executes O(n) times
# Overall, the algorithm runs in O(n^2) time.
### When seq is sorted, the doubles can be found in linear time. Note that this is similar to the longest plateau exercises.
def count_doubles_sorted(seq):
start = 0
result = 0
if len(seq) == 0:
return 0
for i in range(0, len(seq)):
# INVARIANT result contains the total number of doubles in the interval [0, start[ of seq
if seq[i] != seq[start]:
# If we get to a different value, we use the number of occurrences of the previous value to determine the number of doubles.
# This is calculated as n*(n-1)/2 with n the number of occurrences of the value
nb_occ = i - start
result += nb_occ * (nb_occ - 1) / 2
start = i
return result + ((len(seq) - start) * (len(seq) - start - 1) / 2)
assert count_doubles_sorted([]) == 0
assert count_doubles_sorted([1, 1]) == 1
assert count_doubles_sorted([1, 1, 1]) == 3
assert count_doubles_sorted([1, 2, 2, 2, 2]) == 6
assert count_doubles_sorted([1, 2, 3, 3, 4, 4, 6, 8, 8]) == 3
### Time complexity:
# The only loop executes O(n) times, hence the overall algorithm has time complexity O(n)
|
from typing import Optional
from utils.validation import BaseModelExtend
class IndexModel(BaseModelExtend):
"""
首页视图函数使用的数据验证模型
"""
name: str
age: int
description: Optional[int] = None
|
#!/usr/bin/python
import time
import sys
import atexit
from threading import Timer
import RPi.GPIO as GPIO
from birdy.twitter import UserClient
# The client is instantiated using the keys provided by the Twitter API
client = None
# The name of the file storing the development keys ...
twitterKeysFileName = 'twitter.txt'
# Display detailed information when set to true
verbose = False
# The lastTweed is the ID of the last processed tweet. It's initialized to 1
# for the first call since the API returns an error with since_id=0
lastTweet = 1
# The filename used to save the id of the last tweet processed by the application
# This allows the Raspberry Pi or computer to be turned off without re-processing old tweets
fileName = 'id.txt'
# The delay between each call to the Twitter API. The function mentions_timeline is limited
# to 15 calls per block of 15 minutes. The extra 1 is to play safe.
sleepDelay = 61
IO_ACTION1 = 18
IO_ACTION2 = 23
DELAY_ACTION1 = 5
DELAY_ACTION2 = 10
# Create the client using the keys found in the twitter.txt file
def createClient():
global client
file = open(twitterKeysFileName, 'r')
k1 = file.readline().replace("\n", "")
k2 = file.readline().replace("\n", "")
k3 = file.readline().replace("\n", "")
k4 = file.readline().replace("\n", "")
client = UserClient(k1,k2,k3,k4)
file.close()
# Display a message to the console if the verbose mode is enabled
def printInfo(msg):
if verbose:
print msg
# Print an error message
def printError(error):
print(error)
# Set the lastTweet variable to the most recent mention of the twitter account
def ignoreCurrentTweets():
tweets = getTweets()
if(len(tweets) > 0):
markAsRead(tweets[0].id)
# Process the command line arguments if any
def processArgv():
global verbose
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-i':
print('-i: Ignoring current mentions')
ignoreCurrentTweets()
if sys.argv[i] == '-v':
print('-v: Activating verbose mode')
verbose = True
# Returns a list of tweets that mentioned the user using the @username tag
# A maximum of 100 tweets will be loaded since the last tweet identified by lastTweet
def getTweets():
try:
tweets = client.api.statuses.mentions_timeline.get(count=100,since_id=lastTweet)
printInfo('Found ' + str(len(tweets.data)) + ' mentions')
return tweets.data
except:
return []
# Save the ID of the last tweet processed
def markAsRead(id):
global lastTweet
try:
lastTweet = id
file = open(fileName,'w')
file.write(str(id))
file.close()
printInfo('Last tweet mention processed: ' + str(id))
except:
printError('Failed to save the last processed tweet mention id ' + str(id))
# Load the ID of the last tweet processed from the file system
def loadLastTweetId():
global lastTweet
try:
file = open(fileName,'r')
lastTweet = long(file.read())
file.close()
printInfo('Last tweet mention id found: ' + str(lastTweet))
except:
printInfo('Last tweet mention id not found. Will load all mentions.')
# Read the content of the tweet message and trigger the right action
def processTweets(tweets):
for i in range(len(tweets)-1, -1, -1):
text = tweets[i].text
printInfo('Processing: ' + text)
for key in actions:
if key in text:
actions[key](text)
markAsRead(tweets[i].id)
return
def clearGPIO():
GPIO.cleanup()
print('Bye!')
atexit.register(clearGPIO)
def initGPIO():
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
def action1(text):
print('action1: ' + text)
GPIO.output(IO_ACTION1, True)
Timer(DELAY_ACTION1, action1Stop).start()
def action1Stop():
GPIO.output(IO_ACTION1, False)
def action2(text):
print('action2: ' + text)
GPIO.output(IO_ACTION2, True)
Timer(DELAY_ACTION2, action2Stop).start()
def action2Stop():
GPIO.output(IO_ACTION2, False)
def action3(text):
print('action3: ' + text)
# A dict of functions associated to the hashtag they are associated to
actions = {'#action1':action1, '#action2':action2, '#action3':action3}
createClient()
initGPIO()
# Read the command line arguments
processArgv()
printInfo('rkTwitter starting ...')
# Load the ID of the last tweet read by the application from the file system
loadLastTweetId()
while True:
tweets = getTweets()
processTweets(tweets)
# Wait before to call the API again to avoid being blocked
time.sleep(sleepDelay)
|
name_list = ['TOM', 'Lily', 'ROSE']
print(name_list)
print(name_list[1])
print(name_list[0])
print(name_list[2])
|
# # 자식 클래스는 부모 클래스의 변수오 함수를 포함한다.
# # 부모 클래스
# class Member:
# def __init__(self):
# self.memberid='0'
# def __repr__(self): #__repr__ : print()시 출력 시 원하는 내용 지정
# return 'memberid='+self.memberid
#
# # 자식 클래스
# # 클래스 이름 뒤 괄호에 부모 클래스 이름을 적어 상속 받음
# class Student(Member):
# pass
#
# s = Student() # 상속된 Member를 받음
# print(s)
# 자식클래스에 __init__이 있는 경우 부모의 __init__이 자동으로 호츨되지 않음. 따라서 명시적으로 불러줘야 한다
class Member:
def __init__(self):
self.memberid='0'
def __repr__(self):
return 'memberid='+self.memberid
class Student(Member):
def __init__(self, major='computer'):
super().__init__(); # 이 줄이 빠지면 memberid를 사용할 수 없다.
self.major=major
s = Student()
print(s.memberid , s.major)
|
import os
import tensorflow as tf
# ================================================================
# Saving variables
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
|
"""
gitstate: package to systematically list the state of imported packages under Git version control.
To ensure traceability in scientific results, it is helpful to know, as close as possible, the exact state of code that
produced figures or data. This package is meant to help with that.
Scenario 1: you're making plots in a Jupyter notebook and want to include information about the state of imported
packages used to make those figures::
from gitstate import print_repos
... make plots ...
print_repos()
This will print, to standard output, a list of imported packages under Git version control, giving the package name,
current HEAD SHA256 hash, current branch, and numbers of untracked and modified files.
Scenario 2: you're generating intermediate data and want a record of the exact state of the repositories used to create
that data::
from gitstatus import get_repo_states
... make data ...
repo_states = get_repo_state()
# This is a stand-in for however you'd save the data
save( data, repo_states )
In this second example, ``repo_states`` will be a dictionary with keys equal to the package name, and values each a
dictionary storing the full HEAD sha256, the current branch name, the lists of untracked and modified files, and the
actual difference of the working directory vs. the HEAD (as a string).
Both of these will, internally, search _all_ imported packages/modules/classes/functions to see if they belong to a Git
repository on your Python path. This should exclude packages installed from PyPI, and assumes that the packages you
want to print the state of are ones you've installed to your system Python or environment by cloning or creating the
desired Git repository, then in the directory with the ``setup.py`` file, doing one of the following::
python setup.py install [--user]
python setup.py develop
pip install [-e] [-user] .
Under these assumptions, any of the functions in gitstate should automatically find these packages if they are imported.
For any functions in the top level of this repo that take a ``detail`` parameter, this controls how much information, is
included in the output:
0: Only the HEAD SHA256 and the branch name
1: Level 0 plus the number of untracked and modified files
2: Level 1 plus the list of untracked and modified files
3: Level 2 plus the actual diff of the working directory against the HEAD.
"""
from . import repo_utils
#TODO: how to deal with subrepos?
# I've got it working that it'll at least find the parent repo, but it prints with the subrepo's package name.
# Two possibilities:
# a) make it so that the containing project, whether it's a package or not, is what gets printed
# b) filter the results to just the subrepo files, probably with a wrapper class around the repo, but then how to
# avoid double printing them from the parent project?
def print_repos(detail=1):
"""
Print the state of all imported packages' repos to standard output.
:param detail: optional, default is 1. Controls amount of information printed. See module help for the specific
levels.
:type detail: int
:return: None
"""
repos = repo_utils.list_imported_repos()
for name, repo in repos.items():
status = repo_utils.repo_state_string(repo, detail=detail)
print(name + ': ' + status)
def save_states_to_file(filename, detail=3):
"""
Write the repo states to the given file.
:param filename: the path of the file to write to. Will be overwritten if exists.
:type filename: str
:param detail: optional, default is 3. Controls amount of information printed. See module help for the specific
levels.
:type detail: int
:return: None
"""
with open(filename, 'w') as fobj:
repos = repo_utils.list_imported_repos()
for name, repo in repos.items():
status = repo_utils.repo_state_string(repo, detail=detail)
fobj.write(name + ': ' + status + '\n')
def get_repo_states():
"""
Get a dictionary describing the states of all imported packages' repos.
:return: dictionary with package names as keys and subdicts describing the state as values.
:rtype: dict
"""
repos = repo_utils.list_imported_repos()
return {name: repo_utils.list_repo_state(repo) for name, repo in repos.items()}
|
#
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import os
import numpy
import logging
import matplotlib.pyplot as plot
import matplotlib.ticker as mticker
import enum
import time
# Import required src
from usienarl import Environment, Volley, Agent, Interface
class EpisodeVolleyType(enum.Enum):
"""
Enum type of the episode volley: training, validation or test.
"""
training = 0
validation = 1
test = 2
class EpisodeVolley(Volley):
"""
Episode based volley. Used for training, validation and test volleys. It is executed for a certain amount of
episodes. When run in training and validation modes, plots of each episodes (averaged to always be 100 episodes)
are saved.
"""
def __init__(self,
environment: Environment,
agent: Agent,
interface: Interface,
parallel: int,
episode_volley_type: EpisodeVolleyType,
plots_path: str or None, plots_dpi: int or None,
episodes_required: int, episode_length: int):
# Generate base volley
super(EpisodeVolley, self).__init__(environment, agent, interface, parallel)
# Make sure additional parameters are correct
assert(episode_volley_type is not None)
# Note: plots path and DPI are required only if the volley is not a test one
if episode_volley_type != EpisodeVolleyType.test:
assert(plots_path is not None and plots_path)
assert(plots_dpi is not None and plots_dpi > 0)
assert(episodes_required > 0 and episode_length > 0)
assert(parallel > 0)
assert(episodes_required % parallel == 0)
# Define internal attributes
self._episode_volley_type: EpisodeVolleyType = episode_volley_type
self._plots_path: str or None = plots_path
self._plots_dpi: int = plots_dpi
self._episodes_required: int = episodes_required
self._episode_length: int = episode_length
# Define empty attributes
self._last_episode_done: numpy.ndarray or None = None
self._last_reward: numpy.ndarray or None = None
self._avg_total_reward: float or None = None
self._avg_scaled_reward: float or None = None
self._std_total_reward: float or None = None
self._std_scaled_reward: float or None = None
self._avg_episode_length: int or None = None
self._avg_action_duration: float or None = None
self._rewards: [] = []
self._total_rewards: [] = []
self._scaled_rewards: [] = []
self._episode_lengths: [] = []
self._actions_durations: [] = []
def _initialize(self) -> bool:
# Reset empty attributes
self._last_episode_done = None
self._last_reward = None
self._avg_total_reward = None
self._avg_scaled_reward = None
self._std_total_reward = None
self._std_scaled_reward = None
self._avg_episode_length = None
self._avg_action_duration = None
self._rewards = []
self._total_rewards = []
self._scaled_rewards = []
self._episode_lengths = []
self._actions_durations = []
# This initialization always succeed
return True
def run(self,
logger: logging.Logger,
session,
render: bool = False):
# Print info
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Training for " + str(self._episodes_required) + " episodes...")
elif self._episode_volley_type == EpisodeVolleyType.validation:
logger.info("Validating for " + str(self._episodes_required) + " episodes...")
else:
logger.info("Testing for " + str(self._episodes_required) + " episodes...")
# Get the amount of parallel batches required
parallel_episode_batches: int = self._episodes_required // self._parallel
# Execute the parallel episode batches
for parallel_episode_batch in range(parallel_episode_batches):
# Print current progress every once in a while (if length is not too short)
if parallel_episode_batches >= 100 and (parallel_episode_batch + 1) % (parallel_episode_batches // 10) == 0 and parallel_episode_batch > 0:
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Trained for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
elif self._episode_volley_type == EpisodeVolleyType.validation:
logger.info("Validated for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
else:
logger.info("Tested for " + str((parallel_episode_batch + 1) * self._parallel) + "/" + str(self._episodes_required) + " episodes...")
# Initialize last reward and last episode done flags
self._last_reward = numpy.nan * numpy.ones(self._environment.parallel, dtype=float)
self._last_episode_done = numpy.zeros(self._environment.parallel, dtype=bool)
# Execute actions until the all parallel step batches are completed or the maximum episode length is exceeded
episode_rewards: [] = []
episode_actions_durations: [] = []
state_current: numpy.ndarray = self._environment.reset(logger, session)
for parallel_step_batch in range(self._episode_length):
# Get the action decided by the agent
observation_current: numpy.ndarray = self._interface.environment_state_to_observation(logger, session, state_current)
time_before_action = time.clock()
if self._episode_volley_type == EpisodeVolleyType.training:
agent_action: numpy.ndarray = self._agent.act_train(logger, session, self._interface, observation_current,
self._start_steps + self._steps, self._start_episodes + self._episodes)
else:
agent_action: numpy.ndarray = self._agent.act_inference(logger, session, self._interface, observation_current,
self._start_steps + self._steps, self._start_episodes + self._episodes)
time_after_action = time.clock()
# Save the time, converted to milliseconds
episode_actions_durations.append((time_after_action - time_before_action) * 1000)
# Get the next state with relative reward and episode done flag
environment_action: numpy.ndarray = self._interface.agent_action_to_environment_action(logger, session, agent_action)
state_next, reward, episode_done = self._environment.step(logger, session, environment_action)
# Send back information to the agent
observation_next: numpy.ndarray = self._interface.environment_state_to_observation(logger, session, state_next)
# Complete the step
if self._episode_volley_type == EpisodeVolleyType.training:
self._agent.complete_step_train(logger, session, self._interface,
observation_current, agent_action, reward, episode_done, observation_next,
self._start_steps + self._steps, self._start_episodes + self._episodes)
else:
self._agent.complete_step_inference(logger, session, self._interface,
observation_current, agent_action, reward, episode_done, observation_next,
self._start_steps + self._steps, self._start_episodes + self._episodes)
# Render if required
if render:
self._environment.render(logger, session)
# Save the reward at the last step
self._last_reward = numpy.where(episode_done * (1 - self._last_episode_done), reward, self._last_reward)
if parallel_step_batch + 1 == self._episode_length:
self._last_reward = numpy.where(numpy.isnan(self._last_reward), reward, self._last_reward)
# Add the reward to the list of rewards for this episode
# Note: make sure the reward saved is NaN for all already completed episodes in the parallel batch
episode_rewards.append(numpy.where(self._last_episode_done, numpy.nan, reward))
# Update the current state with the previously next state
state_current = state_next.copy()
# Increase the number of trained steps
# Note: the counter should be increased according to the completed episodes of the current parallel batch
self._steps += numpy.count_nonzero(episode_done == 0)
# Save volley steps at termination time
# Note: saving the step of each final step of each volley is required to compute the average episode length
step_array: numpy.ndarray = numpy.ones(self._environment.parallel, dtype=int) * (parallel_step_batch + 1)
self._episode_lengths += step_array[episode_done * numpy.logical_not(self._last_episode_done)].tolist()
if parallel_step_batch + 1 == self._episode_length:
self._episode_lengths += step_array[numpy.logical_not(episode_done)].tolist()
# Increase the episode counter
self._episodes += numpy.count_nonzero((episode_done * (1 - self._last_episode_done)) == 1)
# Save the current episode done flags
# Note: episode done flag is a numpy array so it should be copied
self._last_episode_done = episode_done.copy()
# Check if the episode is already completed
if all(episode_done):
break
# Consider done also all parallel episodes truncated but not terminated
self._episodes += numpy.count_nonzero(self._last_episode_done == 0)
# Save rewards per episode and total/scaled rewards
self._rewards.append(episode_rewards.copy())
self._total_rewards += numpy.nansum(numpy.array(episode_rewards), axis=0).tolist()
self._scaled_rewards += numpy.nanmean(numpy.array(episode_rewards), axis=0).tolist()
# Save the average actions duration for the episode
self._actions_durations.append(round(numpy.average(numpy.array(episode_actions_durations)), 3))
# Complete the episode and send back information to the agent
if self._episode_volley_type == EpisodeVolleyType.training:
self._agent.complete_episode_train(logger, session, self._interface,
self._last_reward, self._total_rewards[-1],
self._start_steps + self._steps, self._start_episodes + self._episodes)
else:
self._agent.complete_episode_inference(logger, session, self._interface,
self._last_reward, self._total_rewards[-1],
self._start_steps + self._steps, self._start_episodes + self._episodes)
# Compute statistics
self._avg_total_reward = numpy.round(numpy.average(numpy.array(self._total_rewards)), 3)
self._avg_scaled_reward = numpy.round(numpy.average(numpy.array(self._scaled_rewards)), 3)
self._std_total_reward = numpy.round(numpy.std(numpy.array(self._total_rewards)), 3)
self._std_scaled_reward = numpy.round(numpy.std(numpy.array(self._scaled_rewards)), 3)
self._avg_episode_length = numpy.rint(numpy.average(numpy.array(self._episode_lengths)))
self._avg_action_duration = numpy.round(numpy.average(numpy.array(self._actions_durations)), 3)
# Print results
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Training for " + str(self._episodes_required) + " episodes finished with following result:")
elif self._episode_volley_type == EpisodeVolleyType.validation:
logger.info("Validating for " + str(self._episodes_required) + " episodes finished with following result:")
else:
logger.info("Testing for " + str(self._episodes_required) + " episodes finished with following result:")
logger.info("Average total reward: " + str(self._avg_total_reward))
logger.info("Standard deviation of total reward: " + str(self._std_total_reward))
logger.info("Average scaled reward: " + str(self._avg_scaled_reward))
logger.info("Standard deviation of scaled reward: " + str(self._std_scaled_reward))
logger.info("Average episode length: " + str(self._avg_episode_length) + " steps")
logger.info("Average action duration: " + str(self._avg_action_duration) + " msec")
# Save the episodes plots
self._save_plots(logger)
def _save_plots(self,
logger: logging.Logger):
"""
Save plots for each episode of the volley. If more than 100 episodes have been executed, an average of some
interval of them is used to normalize to 100. This does nothing if volley type is test.
"""
# If this is a test volley return
if self._episode_volley_type == EpisodeVolleyType.test:
return
# Make sure there is a plots path
assert (self._plots_path is not None and self._plots_path)
# Make sure all lengths are the same
assert (len(self._total_rewards) == len(self._scaled_rewards) == len(self._episode_lengths))
# Print info
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Save training volley " + str(self._number) + " episodes plots...")
else:
logger.info("Save validation volley " + str(self._number) + " episodes plots...")
# Compute statistic on the volley episodes
# Note: if more than 100 episodes normalize to 100 episodes using interval averages (in order to avoid cluttering the plot)
amount: int = len(self._total_rewards)
interval: int = max(1, amount // 100)
avg_total_rewards: [] = [sum(self._total_rewards[i:i + interval]) / interval for i in range(0, amount, interval)]
avg_scaled_rewards: [] = [sum(self._scaled_rewards[i:i + interval]) / interval for i in range(0, amount, interval)]
avg_episode_lengths: [] = [sum(self._episode_lengths[i:i + interval]) / interval for i in range(0, amount, interval)]
# Generate volley dir:
plot_directory = os.path.dirname(self._plots_path + "/volley_" + str(self._number) + "/")
if not os.path.isdir(plot_directory):
try:
os.makedirs(plot_directory)
except FileExistsError:
pass
# Save plots according to the requested type
if self._episode_volley_type == EpisodeVolleyType.training:
plot.plot(list(range(len(avg_total_rewards))), avg_total_rewards, 'r-')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Training episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Training episode")
plot.ylabel("Total reward")
plot.savefig(plot_directory + "/v" + str(self._number) + "_training_episodes_total_rewards.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
plot.plot(list(range(len(avg_scaled_rewards))), avg_scaled_rewards, 'r--')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Training episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Training episode")
plot.ylabel("Scaled reward")
plot.savefig(plot_directory + "/v" + str(self._number) + "_training_episodes_scaled_rewards.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
plot.plot(list(range(len(avg_episode_lengths))), avg_episode_lengths, 'b-.')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Training episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Training episode")
plot.ylabel("Episode length (steps)")
plot.savefig(plot_directory + "/v" + str(self._number) + "_training_episodes_lengths_v.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
else:
plot.plot(list(range(len(avg_total_rewards))), avg_total_rewards, 'r-')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Validation episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Validation episode")
plot.ylabel("Total reward")
plot.savefig(plot_directory + "/v" + str(self._number) + "_validation_episodes_total_rewards_v.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
plot.plot(list(range(len(avg_scaled_rewards))), avg_scaled_rewards, 'r--')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Validation episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Validation episode")
plot.ylabel("Scaled reward")
plot.savefig(plot_directory + "/v" + str(self._number) + "_validation_episodes_scaled_rewards_v.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
plot.plot(list(range(len(avg_episode_lengths))), avg_episode_lengths, 'b-.')
plot.gca().xaxis.set_major_locator(mticker.MaxNLocator(integer=True))
if interval > 1:
plot.xlabel("Validation episode (averaged every " + str(interval) + " episodes)")
else:
plot.xlabel("Validation episode")
plot.ylabel("Episode length (steps)")
plot.savefig(plot_directory + "/v" + str(self._number) + "_validation_episodes_lengths_v.png", dpi=self._plots_dpi, transparent=True)
plot.clf()
# Print info
if self._episode_volley_type == EpisodeVolleyType.training:
logger.info("Plots of training volley " + str(self._number) + " saved successfully")
else:
logger.info("Plots of validation volley " + str(self._number) + " saved successfully")
@property
def last_episode_done(self) -> numpy.ndarray or None:
"""
The episode done flag in the last step of the last episode of the volley.
It is wrapped in a numpy array.
It is None if volley is not setup.
"""
return self._last_episode_done
@property
def last_reward(self) -> numpy.ndarray or None:
"""
The reward in the last step of the last episode of the volley.
It is wrapped in a numpy array.
It is None if volley is not setup.
"""
return self._last_reward
@property
def avg_total_reward(self) -> float or None:
"""
The average total reward of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._avg_total_reward
@property
def avg_scaled_reward(self) -> float or None:
"""
The average scaled reward of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._avg_scaled_reward
@property
def std_total_reward(self) -> float or None:
"""
The standard deviation of total reward of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._std_total_reward
@property
def std_scaled_reward(self) -> float or None:
"""
The standard deviation of scaled reward of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._std_scaled_reward
@property
def avg_episode_length(self) -> float or None:
"""
The average episode length of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._avg_episode_length
@property
def avg_action_duration(self) -> float or None:
"""
The average action duration in millisecond (msec) of all episodes of the volley.
It is None if volley is not setup or if volley has not finished execution.
"""
return self._avg_action_duration
@property
def rewards(self) -> []:
"""
The list of rewards per step grouped by episode of all episodes already executed.
"""
reward_list: [] = []
# Note: each reward block consists in a set of arrays for each parallel step in an episode
for reward_block in self._rewards:
for i in range(len(reward_block[0])):
episode_rewards: [] = []
for parallel_step in reward_block:
if not numpy.isnan(parallel_step[i]):
episode_rewards.append(parallel_step[i])
reward_list.append(episode_rewards)
return reward_list
@property
def total_rewards(self) -> []:
"""
The list of total rewards of all episodes already executed.
"""
return self._total_rewards
@property
def scaled_rewards(self) -> []:
"""
The list of scaled rewards of all episodes already executed.
"""
return self._scaled_rewards
@property
def episode_lengths(self) -> []:
"""
The list of episode lengths of all episodes already executed.
"""
return self._episode_lengths
@property
def action_durations(self) -> []:
"""
The list of average action durations of all episodes already executed.
"""
return self._actions_durations
|
f = open("testfile0.txt", 'w')
f.write("This is testfile0\n")
f.write("Testing the skills\n \nHave a nice day!\n")
f.close()
f = open("testfile0.txt", 'r')
print("------- f.read() -------")
print(f.read())
f.seek(0)
print("------- f.readline() -------")
print(f.readline())
f.seek(0)
print("------- f.readlines() -------")
print(f.readlines())
f.close()
|
from PyPDF2 import PdfFileReader
import coletaLinkPDFSoja as pdfSoja
import pandas as pd
from tabula import read_pdf
import re
import bancoDados as bd
#função para limpar sujeira coluna Nordeste
def limpaValores(strLimpar):
if "/" in strLimpar:
arrTemp = strLimpar.split('/')
formulaRegex = r"\D"
valorPosVirgula = re.sub(formulaRegex, "", arrTemp[1])
ValorAntesVirgula = str(arrTemp[0][len(arrTemp[0])-3])+str(arrTemp[0][-1])
retorno = str(ValorAntesVirgula) + '.' + str(valorPosVirgula)
else:
retorno = strLimpar
return retorno
def ajustaData(strData):
mes = {
'jan': '1',
'fev': '2',
'mar': '3',
'abr': '4',
'mai': '5',
'jun': '6',
'jul': '7',
'ago': '8',
'set': '9',
'out': '10',
'nov': '11',
'dez': '12'
}
arrData = strData.split('-')
dataAjustada = '20'+arrData[2] + '-' + mes[arrData[1]] + '-' + arrData[0]
return dataAjustada
# Funcção para processar arquivos PDF
def processaPDF():
pdfurls = pdfSoja.scrapingPDF()
# Pega a primeira linha do Dataframe, pois o ultimo publicado acumula os dados
tabelaComum = read_pdf(pdfurls[0])
# Tranforma os dados gerados do PDF em um DataFrame
dfSoja = pd.DataFrame(tabelaComum[0])
dfSoja.pop('Unnamed: 0')
for coluna in dfSoja.columns:
dfSoja[coluna] = dfSoja[coluna].apply(lambda x : str(x).replace("%", ""))
dfSoja[coluna] = dfSoja[coluna].apply(lambda x : str(x).replace(",", "."))
dfSoja['Regiões do IMEA Centro-Sul'] = dfSoja['Regiões do IMEA Centro-Sul'].apply(lambda x : str(x).replace("10/jan", ""))
new = dfSoja["Regiões do IMEA Centro-Sul"].str.split(" ", n = 1, expand = True)
dfSoja['data'] = new[0]
dfSoja['Centro-Sul'] = new[1]
dfSoja.pop('Regiões do IMEA Centro-Sul')
# Apaga as 6 ultimalinhas geradas para limpar o dataframe
dfSoja = dfSoja.apply(lambda x: x.head(-6)).reset_index(0, drop=True)
dfSoja['data'] = dfSoja['data'].apply(ajustaData)
# Retira coluna gerada com sujeira
dfSoja['Nordeste'] = dfSoja['Nordeste'].map(limpaValores)
df_unpivoted = dfSoja.melt(id_vars=['data', ], var_name='regioesIMEA', value_name='percentual')
sql = "insert into soja.producaoSoja(\
datacotacao, \
regioesimea, \
percentual\
) values"
conectaInsert = ''
executa = False
for index, row in df_unpivoted.iterrows():
bd.cur.execute("select datacotacao\
from soja.producaosoja \
where datacotacao = '" + row['data'] + "' \
and regioesimea = '" + row['regioesIMEA'] + "'")
recset = bd.cur.fetchall()
if len(recset) <= 0:
sql = sql + conectaInsert + "(\
'" + row['data'] +"',\
'" + row['regioesIMEA'] +"',\
" + row['percentual'] +"\
)"
executa = True
conectaInsert = ', '
if executa:
bd.cur.execute(sql)
bd.con.commit()
bd.con.close()
print(df_unpivoted)
|
# Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
compra = float(input("Valor da compra: "))
desconto = compra*(5/100)
if(compra >= 200):
print(round(compra - desconto,2))
else:
print(compra) |
import typing as t
import numpy as np
import matplotlib.pyplot as plt
import sklearn.decomposition
import sklearn.datasets
class Eigenfaces(sklearn.base.TransformerMixin):
def __init__(
self,
face_shape: t.Tuple[int, int],
copy: bool = True,
n_components: t.Optional[t.Union[float, int, str]] = 128,
iterated_power: t.Union[int, str] = "auto",
svd_solver: str = "randomized",
random_state: t.Optional[int] = None,
):
self.pca = sklearn.decomposition.PCA(
n_components=n_components,
svd_solver=svd_solver,
whiten=True,
iterated_power=iterated_power,
random_state=random_state,
)
self.eigenfaces = np.empty(0)
self.copy = copy
self.face_shape = face_shape
def fit(self, X, y=None):
if self.copy:
X = np.copy(X).astype(float, copy=False)
else:
X = np.asfarray(X)
self.pca.fit(X)
self.eigenfaces = self.pca.components_.reshape(
(self.pca.n_components_, *self.face_shape)
)
return self
def transform(self, X, y=None):
return self.pca.transform(X)
def plot_samples(X, rows: int = 4, cols: int = 4, random: bool = True):
fig, axes = plt.subplots(rows, cols)
if random:
samples = np.random.randint(X.shape[0], size=rows * cols)
else:
samples = np.arange(rows * cols)
for i in range(rows * cols):
im = X[samples[i]].reshape((62, 47))
ax = axes[i // rows][i % cols]
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(im, cmap="hot")
plt.axis("off")
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
def load_data(print_shape: bool = True):
X, y = sklearn.datasets.fetch_lfw_people(min_faces_per_person=25, return_X_y=True)
if print_shape:
print("X shape:", X.shape)
print("y shape:", y.shape)
return X, y
def _test():
import sklearn.svm
import sklearn.model_selection
import sklearn.metrics
import scipy.stats
import pandas as pd
random_search = False
X, y = load_data(print_shape=True)
# plot_samples(X, random=True)
X_train, X_eval, y_train, y_eval = sklearn.model_selection.train_test_split(
X,
y,
test_size=0.2,
shuffle=True,
random_state=16,
)
model = Eigenfaces(
face_shape=(62, 47), n_components=130, random_state=16, iterated_power=10
)
proj_train = model.fit_transform(X_train)
proj_eval = model.transform(X_eval)
# plot_samples(model.eigenfaces, random=False)
classifier = sklearn.svm.SVC(
C=40000,
gamma=0.0035,
kernel="rbf",
cache_size=2000,
random_state=16,
)
if random_search:
param_distributions = {
"C": scipy.stats.uniform(1e3, 1e6),
"gamma": scipy.stats.loguniform(1e-4, 1e-2),
}
classifier = sklearn.model_selection.RandomizedSearchCV(
classifier,
param_distributions,
cv=5,
n_iter=10,
n_jobs=-1,
scoring="f1_weighted",
random_state=16,
)
classifier.fit(proj_train, y_train)
if random_search:
print("Best parameters from random search:", classifier.best_params_)
classifier_preds = classifier.predict(proj_eval)
baseline = sklearn.svm.SVC(kernel="linear", cache_size=2000)
baseline.fit(X_train, y_train)
baseline_preds = baseline.predict(X_eval)
classifier_f1 = sklearn.metrics.f1_score(
classifier_preds, y_eval, average="weighted"
)
baseline_f1 = sklearn.metrics.f1_score(baseline_preds, y_eval, average="weighted")
classifier_accuracy = sklearn.metrics.accuracy_score(classifier_preds, y_eval)
baseline_accuracy = sklearn.metrics.accuracy_score(baseline_preds, y_eval)
cls, freqs = np.unique(y_train, return_counts=True)
print(f"Maj class : {np.max(freqs / float(np.sum(freqs))):.4f}")
print(f"Baseline ACC : {baseline_accuracy:.4f}")
print(f"Classifier ACC : {classifier_accuracy:.4f}")
print(f"Baseline F1 : {baseline_f1:.4f}")
print(f"Classifier F1 : {classifier_f1:.4f}")
if __name__ == "__main__":
_test()
|
#!/usr/bin/env python3
import sys, os
import socket
import filecmp
import subprocess
import time
chunk_size = 2045
delay = 0.01
sizelist = [500, 1048576, 104857600, 104729] # throw in a prime number, why not
srv_port = '3000'
srv_host = 'localhost'
srv_dir = 'save/'
inf_dir = 'infiles/'
infile = inf_dir + 'infile{}.txt'
subprocess.run(['rm', '-rf', srv_dir])
subprocess.run(['rm', '-rf', inf_dir])
os.mkdir(srv_dir)
os.mkdir(inf_dir)
subprocess.run(['pkill', 'server'])
server = subprocess.Popen(['./server', srv_port, srv_dir])
def create_file(name, size):
with open(name, 'wb') as f:
f.write(os.urandom(size))
time.sleep(1)
for i, size in enumerate(sizelist[:2], start=1):
create_file(infile.format(i), size)
with open(infile.format(i), 'rb') as f:
s = socket.create_connection((srv_host, srv_port))
b = f.read(chunk_size)
while b:
time.sleep(delay)
s.send(b)
b = f.read(chunk_size)
s.close()
time.sleep(0.1)
if filecmp.cmp(infile.format(i), 'save/{}.file'.format(i), shallow=False):
print('{}B file transfer successful'.format(size))
else:
print('file transfer failed')
server.kill()
|
import numpy as np
import pandas as pd
import pytest
from locan.data.locdata_utils import (
_bump_property_key,
_check_loc_properties,
_get_linked_coordinates,
_get_loc_property_key_per_dimension,
)
@pytest.fixture()
def df_only_coordinates():
return pd.DataFrame.from_dict(
{"position_y": np.arange(3), "position_z": np.arange(3)}
)
@pytest.fixture()
def df_with_zero_uncertainty():
return pd.DataFrame.from_dict(
{
"position_y": np.arange(3),
"uncertainty": np.arange(3),
"position_z": np.arange(3),
"uncertainty_z": np.arange(3),
}
)
@pytest.fixture()
def df_with_uncertainty():
return pd.DataFrame.from_dict(
{
"position_y": np.arange(1, 3),
"uncertainty": np.arange(1, 3),
"position_z": np.arange(1, 3),
"uncertainty_z": np.arange(1, 3),
}
)
@pytest.fixture()
def df_empty():
return pd.DataFrame()
@pytest.fixture()
def df_single():
return pd.DataFrame.from_dict(
{
"position_y": [1],
"position_z": [2],
"uncertainty_z": [5],
}
)
def test__get_loc_property_key_per_dimension(df_with_uncertainty):
results = _get_loc_property_key_per_dimension(
locdata=df_with_uncertainty, property_key="position"
)
assert results == [None, "position_y", "position_z"]
results = _get_loc_property_key_per_dimension(
locdata=df_with_uncertainty, property_key="uncertainty"
)
assert results == ["uncertainty", "uncertainty", "uncertainty_z"]
results = _get_loc_property_key_per_dimension(
locdata=df_with_uncertainty, property_key="not_existing"
)
assert results == [None, None, None]
def test__get_linked_coordinates(
df_only_coordinates,
df_with_zero_uncertainty,
df_with_uncertainty,
df_empty,
df_single,
caplog,
):
results = _get_linked_coordinates(locdata=df_only_coordinates)
assert results == {
"position_y": 1.0,
"uncertainty_y": 0.5773502691896257,
"position_z": 1.0,
"uncertainty_z": 0.5773502691896257,
}
results = _get_linked_coordinates(locdata=df_with_zero_uncertainty)
assert all(np.isnan(results[key]) for key in results.keys())
assert caplog.record_tuples[-1] == (
"locan.data.locdata_utils",
30,
"Zero uncertainties occurred resulting in nan for weighted_mean and weighted_variance.",
)
results = _get_linked_coordinates(locdata=df_with_uncertainty)
assert results == {
"position_y": 1.2,
"uncertainty_y": 0.32,
"position_z": 1.2,
"uncertainty_z": 0.32,
}
results = _get_linked_coordinates(
locdata=df_only_coordinates, coordinate_keys="position_y"
)
assert results == {"position_y": 1.0, "uncertainty_y": 0.5773502691896257}
results = _get_linked_coordinates(
locdata=df_with_uncertainty, coordinate_keys="position_y"
)
assert results == {
"position_y": 1.2,
"uncertainty_y": 0.32,
}
results = _get_linked_coordinates(
locdata=df_only_coordinates, coordinate_keys="position_z"
)
assert results == {"position_z": 1.0, "uncertainty_z": 0.5773502691896257}
results = _get_linked_coordinates(
locdata=df_with_uncertainty, coordinate_keys="position_z"
)
assert results == {
"position_z": 1.2,
"uncertainty_z": 0.32,
}
results = _get_linked_coordinates(locdata=df_empty)
assert results == {}
results = _get_linked_coordinates(locdata=df_single)
assert results == {
"position_y": 1,
"uncertainty_y": 0,
"position_z": 2,
"uncertainty_z": 5,
}
def test__bump_property_label():
new_property = _bump_property_key(
loc_properties=["test", "other_test"],
loc_property="test",
extension="_extended",
)
assert new_property == "test_extended"
new_property = _bump_property_key(
loc_properties=["test", "test_0"],
loc_property="test",
)
assert new_property == "test_0_0"
def test__check_loc_properties(locdata_2d):
with pytest.raises(ValueError):
_check_loc_properties(locdata=locdata_2d, loc_properties=["test"])
result = _check_loc_properties(locdata=locdata_2d, loc_properties="position_x")
assert result == ["position_x"]
result = _check_loc_properties(locdata=locdata_2d, loc_properties=None)
assert result == ["position_x", "position_y"]
result = _check_loc_properties(
locdata=locdata_2d, loc_properties=locdata_2d.data.columns
)
assert result == locdata_2d.data.columns.tolist()
|
import sys
from datetime import *
#from __future__ import print_function
def getTime():
return datetime.now()
class Timer:
"""Timer: A simple stopwatch for timing processes."""
def __init__(self):
self.startTime = datetime.now()
self.stopTime = self.startTime
self.delta = 0.0
def start(self):
self.startTime = datetime.now()
def stop(self):
self.stopTime = datetime.now()
self.__computeDelta()
def __computeDelta(self):
self.delta = self.stopTime - self.startTime
def getTime(self):
return self.delta
def printTime(self):
print("Time between start and stop: " + str(self.delta))
class Log:
"""Log: A simple logging utility that incorporates timers."""
def __init__(self,outFile='default.log',verbose=True,timeStamp=True):
self.outFile = outFile #str:the name of output file to be created
self.verbose = verbose #bool:whether log messages are sent to the console
self.timeStamp = timeStamp #bool:whether log messages are time stamped
self.__file = '' #file handle:private, simply the handle to the log file
self.masterTimer = Timer() #Timer:tracks entire time a file is open
self.processTimer = Timer() #Timer:used for timing blocks of code
self.__start = True #bool:private, whether the process timer needs to be started or stopped
#--------------------------------
#private, simply returns the time so log entries can be stamped
#input: none
#output: the current datetime
#--------------------------------
def __timeStamp(self):
return datetime.now()
#--------------------------------
#writes a message to the log
#input:
# msg -- the string to write to log
# verboseOverride -- overrides the log's verbose attribute, can only be used to print to console, not to hide a message from console (assuming the attribute is set to False)
# timeStampOverride -- same as above, but with regard to time stamps
#output:
# none. message written to log (and possibly console, potentially timestamped)
#--------------------------------
def write(self,msg,verboseOverride=False,timeStampOverride=False):
ts = self.__timeStamp()
#when verbose, timestamp is ALWAYS written to console, even if not written to log
if self.verbose or verboseOverride:
print(("[%s] %s"%(ts,msg)))
if self.timeStamp or timeStampOverride:
print("[%s]" %(ts), end=' ', file=self.__file)
print("%s" % msg, file=self.__file)
#--------------------------------
#writes a message to the log as well as starting/stopping the process timer (for timing/profiling blocks of code)
#used in pairs... the first call starts timer, second call stops timer... and so on
#input:
# msg -- the string to write to log
# verboseOverride -- overrides the log's verbose attribute, can only be used to print to console, not to hide a message from console (assuming the attribute is set to False)
# timeStampOverride -- same as above, but with regard to time stamps
#output:
# none. message written to log (and possibly console, potentially timestamped)
# elapsed time is also logged, assuming the process timer is stopped.
#--------------------------------
def profile(self,msg,verboseOverride=False,timeStampOverride=True):
if self.__start:
self.processTimer.start()
self.__start = False
#self.write("------------------------------------------------",verboseOverride)
#write message to log
self.write(msg,verboseOverride,timeStampOverride)
else:
self.processTimer.stop()
self.__start = True
#get time delta
processTime = self.processTimer.getTime()
#write message to log
self.write(msg,verboseOverride,timeStampOverride)
#write time delta to log
self.write("Elapsed process time: %s" %(processTime),verboseOverride,timeStampOverride)
#self.write("------------------------------------------------",verboseOverride)
#--------------------------------
#opens the log file for writing and sets file handle, or fails and exits
#input: none
#output: none
#--------------------------------
def open(self):
#attempt to open outFile for writing
try:
self.__file = open(self.outFile,'w')
except IOerr:
print(("Error opening %s for writing."%(self.outFile)))
sys.exit(-1)
#start master timer
self.masterTimer.start()
#broadcast message to console, timestamp file, write message to file
self.write("%s opened for writing."%(self.outFile),True,True)
self.write("================================================",True)
#--------------------------------
#closes the file from further writing
#input: none
#output: none
#--------------------------------
def close(self):
#stop master timer
self.masterTimer.stop()
self.write("================================================",True)
#show total time file was open (delta master time)
masterDelta = self.masterTimer.getTime()
self.write("%s was open for %s"%(self.outFile,masterDelta),True,True)
#broadcast message to console, timestamp file, write message to file
self.write("%s closed for writing."%(self.outFile),True,True)
#close the file from further writing
self.__file.close()
|
import copy as cp
distanceToB = {
'A' : 366,
'B' : 0,
'C' : 160,
'D' : 242,
'E' : 161,
'F' : 178,
'G' : 77,
'H' : 151,
'I' : 226,
'L' : 244,
'M' : 241,
'N' : 234,
'O' : 380,
'P' : 98,
'R' : 193,
'S' : 253,
'T' : 329,
'U' : 80,
'V' : 199,
'Z' : 374
}
class city:
name = ''
linkedCity = []
def __init__(self, name, linked):
self.name = name
self.linkedCity = cp.deepcopy(linked)
def getF_x(self, length):
f_x = []
for i in range(len(self.linkedCity)):
f_x.append(self.linkedCity[i][1] + length + distanceToB[self.linkedCity[i][0]])
return f_x
def getLinkedName(self, index):
return self.linkedCity[index][0]
def getLinkedNDis(self, index):
return self.linkedCity[index][1]
def nameFind(n, m):
for i in range(len(m)):
if m[i].name == n:
return m[i]
def findIndex(value, list):
for i in range(len(list)):
if list[i] == value:
return i
def checkPath(city, list, f_x):
for i in range(len(list)):
for j in range(len(city)):
if city[j][0] == list[i]:
f_x[j] == 99999
return
def buildMap():
A = city('A', [['Z', 75], ['T', 118], ['S', 140]])
B = city('B', [['U', 85], ['G', 90], ['P', 101], ['F', 211]])
C = city("C", [['D', 120], ['P', 138], ['R', 146]])
D = city("D", [['M', 70], ['C', 120]])
E = city("E", [['H', 86]])
F = city("F", [['S', 99], ['B', 211]])
G = city("G", [['B', 90]])
H = city("H", [['E', 86], ['U', 98]])
I = city("I", [['N', 87], ['V', 92]])
L = city("L", [['M', 70], ['T', 111]])
M = city("M", [['L', 70], ['D', 75]])
N = city("N", [['I', 87]])
O = city("O", [['Z', 71], ['S', 151]])
P = city("P", [['R', 97], ['B', 101], ['C', 138]])
R = city("R", [['S', 80], ['P', 97], ['C', 146]])
S = city("S", [['R', 80], ['F', 99], ['A', 140], ['O', 151]])
T = city("T", [['L', 111], ['A', 118]])
U = city("U", [['B', 85], ['H', 98], ['V', 142]])
V = city("V", [['I', 92], ['U', 142]])
Z = city("Z", [['O', 71], ['A', 75]])
return [A, B, C, D, E, F, G, H, I, L, M, N, O, P, R, S, T, U, V, Z]
def findPath():
map = buildMap()
length = 0
path = []
lengthCount = []
start = city('', [])
dest = city('', [])
s = input("start city:")
d = input("destination city:")
for i in range(len(map)):
if map[i].name == s:
start = map[i]
if map[i].name == d:
dest = map[i]
next = start
path.append(start.name)
lengthCount.append(length)
while(next != dest):
f_x = next.getF_x(length)
checkPath(next.linkedCity, path, f_x)
minF = findIndex(min(f_x), f_x)
next_city_Name = next.getLinkedName(minF)
temp = next.getLinkedNDis(minF)
path.append(next_city_Name)
length += temp
lengthCount.append(length)
next = nameFind(next_city_Name, map)
print("The path is:")
string = ''
for i in range(len(path)):
string += path[i]
string += '//using:'
string += str(lengthCount[i])
if i != len(path) - 1:
string += " -> "
print(string)
def main():
findPath()
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# mirror_facedetect.py: Detect faces in the view of webcam
import cv2
import numpy
import face_recognition
import sys
import os
# Increase this number to speed up and lower the accuracy
k=4
font=cv2.FONT_HERSHEY_PLAIN
# Allow users to change k while running
if len(sys.argv)>=2:
k=int(sys.argv[1])
colors=[(0,0xFF,0),(0xFF,0xFF,0),(0xFF,0,0)]
def mark_faces(frm):
smaller_frm=cv2.resize(frm,(0,0),fx=1/k,fy=1/k) # To increase speed
# Convert BGR (OpenCV uses) to RGB to reduce time on dealing with colors
rgb_frm=smaller_frm[:,:,::-1]
faces=face_recognition.face_locations(rgb_frm)
for i,face in enumerate(faces):
top,right,bottom,left=face
frm=cv2.rectangle(frm,(k*left,k*top),(k*right,k*bottom),colors[i%3],3)
os.system("clear") # To make the screen not that messy
print("Found {} faces".format(len(faces)))
return frm
camera=cv2.VideoCapture(0)
while camera.isOpened():
ret, frm=camera.read()
if ret==False:
break
frm=cv2.flip(frm,1)
# This is just to put rectangle to highlight the faces
frm=mark_faces(frm)
cv2.imshow('The Mirror',frm)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from networkapi.ambiente.models import Ambiente
from networkapi.ambiente.models import EnvironmentVip
from networkapi.exception import EnvironmentVipNotFoundError
from networkapi.requisicaovips.models import DsrL3_to_Vip
from networkapi.requisicaovips.models import RequisicaoVips
from networkapi.requisicaovips.models import VipPortToPool
class DsrL3toVipSerializer(serializers.ModelSerializer):
id = serializers.Field()
requisicao_vip = serializers.PrimaryKeyRelatedField()
id_dsrl3 = serializers.Field()
class Meta:
model = DsrL3_to_Vip
fields = (
'id_dsrl3',
)
class VipPortToPoolSerializer(serializers.ModelSerializer):
id = serializers.Field()
requisicao_vip = serializers.PrimaryKeyRelatedField(
many=False,
read_only=True
)
class Meta:
model = VipPortToPool
fields = (
'id',
'requisicao_vip',
'server_pool',
'port_vip',
)
def validate_port_vip(self, attrs, attr_name):
try:
port_vip = int(attrs.get(attr_name, 0))
if port_vip > 65535 or 1 > port_vip:
raise serializers.ValidationError(
u'The port number must be between 1 and 65535.')
return attrs
except ValueError:
raise serializers.ValidationError(u'The port must be a number.')
class RequestVipSerializer(serializers.ModelSerializer):
ip = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
ipv6 = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
trafficreturn = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
healthcheck_expect = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
cliente = serializers.CharField(
required=False
)
rule = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
rule_applied = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
rule_rollback = serializers.PrimaryKeyRelatedField(
many=False,
required=False
)
areanegocio = serializers.CharField(
required=True
)
nome_servico = serializers.CharField(
required=True
)
host = serializers.CharField(
required=True
)
vip_ports_to_pools = VipPortToPoolSerializer(
many=True,
required=True
)
finalidade = serializers.CharField(
required=True
)
ambiente = serializers.CharField(
required=True
)
dsrl3id = DsrL3toVipSerializer()
def validate(self, attrs):
"""
Check the Environment Vip is valid.
"""
try:
finalidade = attrs.get('finalidade')
cliente = attrs.get('cliente')
ambiente = attrs.get('ambiente')
EnvironmentVip.get_by_values(
finalidade,
cliente,
ambiente
)
ip_to_vip = attrs.get('ip') or attrs.get('ipv6')
if not ip_to_vip:
raise serializers.ValidationError(
'Is required to enter any Ip')
except EnvironmentVipNotFoundError, exception:
raise serializers.ValidationError(exception.message)
return attrs
class Meta:
model = RequisicaoVips
depth = 1
fields = (
'id', 'ip', 'ipv6', 'l7_filter',
'filter_applied', 'filter_rollback',
'filter_valid', 'applied_l7_datetime',
'healthcheck_expect', 'rule', 'rule_applied',
'rule_rollback', 'areanegocio', 'nome_servico',
'host', 'vip_ports_to_pools', 'finalidade',
'cliente', 'ambiente', 'trafficreturn'
)
class EnvironmentOptionsSerializer(serializers.ModelSerializer):
name = serializers.Field()
class Meta:
model = Ambiente
fields = (
'id',
'name'
)
|
"""
Strategy. Pattern that allows separating action (strategy) from it's user. You can define many strategies that can be used.
here:
Strategy (and subclasses) - strategies for performing an action
ActionTaker - user of strategies
"""
from abc import ABC, abstractmethod
class Strategy(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def action(self):
pass
class Strategy1(Strategy):
def __init__(self):
super().__init__()
pass
def action(self):
print('I do some kind of stuff.')
return 1
class Strategy2(Strategy):
def __init__(self):
super().__init__()
pass
def action(self):
print('I do other kind of stuff.')
return 2
class Strategy3(Strategy):
def __init__(self):
super().__init__()
pass
def action(self):
print('I do two kinds of stuff.')
return [1, 2]
class ActionTaker:
def __init__(self):
self._strategy = None
@property
def strategy(self):
return self._strategy
@strategy.setter
def strategy(self, strategy: Strategy):
if isinstance(strategy, Strategy1):
print(f'Strategy changed from {type(self.strategy)} to {type(strategy)}')
self._strategy = strategy
elif isinstance(strategy, Strategy2):
print(f'Strategy changed from {type(self.strategy)} to {type(strategy)}')
self._strategy = strategy
elif isinstance(strategy, Strategy3):
print(f'Strategy changed from {type(self.strategy)} to {type(strategy)}')
self._strategy = strategy
else:
print('Incorrect strategy.')
def action(self):
if self.strategy is not None:
rv = self.strategy.action()
return rv
'''# uncomment for demonstration
at = ActionTaker()
s1 = Strategy1()
s2 = Strategy2()
s3 = Strategy3()
at.strategy = s1
print(at.action())
at.strategy = s2
print(at.action())
at.strategy = s3
print(at.action())
''' |
#!/usr/local/bin/python3
# There is a list that contains integers, list of integers or nested lists. Put all integer values in one list.
# Input data: A nested list or simple list.
# Output data: One-dimensional list.
def checkio(data):
#replace this for solution
answer = []
for item in data[:]:
if type(item) == int:
answer.append(item)
else:
# item is list
answer += checkio(item)
return answer
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([1, 2, 3]) == [1, 2, 3], 'First example'
assert checkio([1, [2, 2, 2], 4]) == [1, 2, 2, 2, 4], 'Second example'
assert checkio([[[2]], [4, [5, 6, [6], 6, 6, 6], 7]]) \
== [2, 4, 5, 6, 6, 6, 6, 6, 7], 'Third example'
|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class Category(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=50)
price = models.DecimalField(decimal_places=2, max_digits=10)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='products')
inventory = models.PositiveSmallIntegerField()
date_added = models.DateField(auto_now_add=True)
description = models.TextField(null=True, blank=True)
main_image = models.URLField()
def __str__(self):
return f"{self.name} - {self.category.name} - {self.price} - {self.inventory}"
class Image(models.Model):
img = models.URLField()
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
def __str__(self):
return self.product.name
class Country(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Address(models.Model):
country = models.ForeignKey(Country, on_delete=models.DO_NOTHING, related_name="locations")
city = models.CharField(max_length=50)
street = models.CharField(max_length=150)
phone = models.CharField(max_length=15)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses')
active = models.BooleanField(default=True)
def __str__(self):
return f"{self.user.username} - {self.country.name} - {self.city}"
class Order(models.Model):
buyer = models.ForeignKey(User, on_delete=models.CASCADE, related_name='orders')
datetime = models.DateTimeField(auto_now=True)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
is_paid = models.BooleanField(default=False)
address = models.ForeignKey(Address, on_delete=models.DO_NOTHING, related_name="deliveries", null=True, blank=True)
def clean(self):
if self.items.exists():
for item in self.items.all():
item.clean()
def items_in_stock(self):
for item in self.items.all():
if item.quantity > item.product.inventory:
return False
return True
def __str__(self):
return f"{self.buyer.username} on {self.datetime} total: {self.total}"
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.DO_NOTHING, related_name="items")
quantity = models.PositiveSmallIntegerField(default=1)
order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name="items")
def clean(self):
if self.quantity > self.product.inventory:
self.quantity = self.product.inventory
self.save()
def update_quantity(self, created, quantity, increased):
if created:
self.quantity = quantity
elif increased:
self.quantity += quantity
else:
self.quantity -= quantity
self.save()
def __str__(self):
return f"{self.order.buyer.username} item: {self.product.name} qty: {self.quantity}"
|
# -*- coding: utf-8 -*-
def funcName(arg1,arg2):
print(arg1,arg2)
def add(num1,num2):
return num1+num2
funcName("The number is ",125)
print(add(12,90))
print(len([12,15,56]))
def ourOwnLenFunc(l):
count =0
for item in l:
count += 1
return count |
"""
Author:
Liwei Lu 20294187
Xinyi Ai 37489204
Yiming Tang 60899836
Jiadong Zhang 51693147
"""
import re
import json
class Posting:
"""
Contain context of term occurrence in a document
docid is the id of document, from 0 to n, n is the number of total documents
tfidf is the relevance number
"""
def __init__(self, docid=-1, tf=0, tfidf=0.0):
self.docid = docid
self.tf = tf
self.tfidf = tfidf # use word frequency here for now
'''
Read string format posting
'''
@classmethod
def read(cls, str_representation: str):
data = re.findall("[0-9.]+", str_representation)
return Posting(int(data[0]), int(data[1]), float(data[2]))
'''
read posting list
postings should be separated by ;
'''
@classmethod
def read_posting_list(cls, posting_list: str):
postings = re.split(";", posting_list.strip())
return [cls.read(p) for p in postings if len(p) > 2]
def __str__(self):
return str([self.docid, self.tf, self.tfidf])
def __lt__(self, other):
return self.tfidf < other.tfidf
def __le__(self, other):
return self.tfidf <= other.tfidf
def __eq__(self, other):
return self.tfidf == other.tfidf
def __ge__(self, other):
return self.tfidf >= other.tfidf
def __gt__(self, other):
return self.tfidf > other.tfidf
def __ne__(self, other):
return self.tfidf != other.tfidf
if __name__ == '__main__':
postings = []
print(postings[1])
|
#!/usr/bin/python2.7
import sys,math,os,string,random
from subprocess import Popen, PIPE
"""
takes cell evol formatted files and return fasta files of a certain time step
argument should be file and time step
"""
#### ARGUMENTS ####
if len(sys.argv)!=3:
print "Specify some path/to/filename.txt and the time step"
sys.exit(1)
else:
for arg in sys.argv[1:]:
try:
whichtime=int(arg)
except:
filepath=arg
print "Time we fetch:", whichtime
print "From file:", filepath
#open filename once to see what is the first time step... for now it is typically zero but in the future?
ldata=[]
success=False
with open(filepath,"r") as fin:
for line in fin:
line=line.split()
time=int(line[0])
if time<whichtime: continue
if time>whichtime:
if success==False:
print "time", whichtime,"not found in file", filepath
break
if time==whichtime:
if success!=True:
success=True;
ldata.append(line)
random.shuffle(ldata)
#we randomise the order of the list
outfile=".".join(filepath.split('.')[:-1]) +"_fasta_time"+str(whichtime)+".fa"
with open(outfile,"w") as fout:
for i,line in enumerate(ldata):
fout.write(">"+str(i)+"_time"+str(whichtime)+"_tau"+str(line[1])+"\n")
bla="".join([ 'A' if x=='0' else 'G' for x in line[3]+line[4] ])
fout.write(bla+"\n")
print "File generated:", outfile
|
"""gin-config compatibility"""
import os
import sys
try:
import gin
from absl import flags
import divik.core._gin_bugfix
_HAS_GIN = True
except ImportError:
_HAS_GIN = False
MISSING_GIN_ERROR = """
gin-config package missing. You should install divik with appropriate extras:
pip install divik[gin]
"""
def parse_gin_args():
"""Parse arguments with gin-config
If you have `gin` extras installed, you can call `parse_gin_args`
to parse command line arguments or config files to configure
your runs.
Command line arguments are used like `--param='DiviK.k_max=50'`.
Config files are passed via `--config=path.gin`.
More about format of `.gin` files can be found here:
https://github.com/google/gin-config
"""
try:
import gin
from absl import flags
except ImportError as ex:
raise ImportError(MISSING_GIN_ERROR) from ex
flags.DEFINE_multi_string(
'config', None, 'List of paths to the config files.')
flags.DEFINE_multi_string(
'param', None, 'Newline separated list of Gin parameter bindings.')
FLAGS = flags.FLAGS
FLAGS(sys.argv)
gin.parse_config_files_and_bindings(FLAGS.config, FLAGS.param)
def dump_gin_args(destination):
"""Dump gin-config effective configuration
If you have `gin` extras installed, you can call `dump_gin_args`
save effective gin configuration to a file.
"""
try:
import gin
except ImportError as ex:
raise ImportError(MISSING_GIN_ERROR) from ex
with open(os.path.join(destination, 'config.gin'), 'w') as outfile:
outfile.write(gin.operative_config_str())
if _HAS_GIN:
configurable = gin.configurable
else:
def configurable(name_or_fn=None, *args, **kwargs):
if name_or_fn is None:
return lambda x: x
return name_or_fn
|
from django.contrib.auth.models import User
from django.db import models
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
image = models.ImageField(upload_to='images/profile', default='images/profile/default.png')
location = models.CharField(max_length=30, blank=True, null=True)
def __str__(self):
return self.user.username
class Contact(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='contact')
people = models.ManyToManyField('self', blank=True, related_name='contacts')
blocked = models.ManyToManyField('self', blank=True, related_name='blocked_people')
logged_in = models.BooleanField(default=False)
last_login = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
def get_full_name(self):
first_name = self.user.first_name.capitalize()
last_name = self.user.last_name.capitalize()
full_name = f'{first_name} {last_name}'
return full_name
class FriendRequest(models.Model):
sender = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='sender')
recipient = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='recipient')
message = models.TextField()
accepted = models.BooleanField(default=False)
class Meta:
unique_together = [['sender', 'recipient', 'accepted'], ['recipient', 'sender', 'accepted']]
def __str__(self):
return '%s %s' % (self.sender, self.recipient) |
from soundgenerator import downloadFile, getGoogleSpeechURL, speakSpeechFromText, getMashedText
from tweet import Geocode_Search
import time
last_id = 0
fileName = 'tts'
while 1:
fileName = 'tts'
results = Geocode_Search(34.1844709,-118.131809,'1km',last_id)
print len(results[0])
for x in range(0,3):
finaltext = finaltext = getMashedText(results[0])
speakSpeechFromText(finaltext,fileName + '.mp3')
print 'generated ' + fileName + '.mp3'
fileName += '1'
last_id = results[1]
time.sleep(60)
|
from ariadne import UnionType
from src.model import User,UserFailedResponse
error = UnionType("UserResult")
@error.type_resolver
def resolve_error_type(obj, *_):
# print(obj)
if isinstance(obj,User.User):
return 'User'
if isinstance(obj,UserFailedResponse.UserFailedResponse):
return 'UserFailedResponse'
return None
|
# -*- coding: utf-8 -*-
""" This module works as a function generator
It includes:
Defined functions for several waveforms incorporating a switcher to make choosing easier.
A class for evaluating the multiple waveforms
A class for calculating fourier partial sums and evaluating it.
"""
import numpy as np
from scipy.signal import sawtooth, square
def create_sine(time, freq, *args):
""" Creates sine wave
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of sine wave
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated sine wave of given frequency
"""
wave =np.sin(2 * np.pi * time * freq)
return wave
def create_ramps(time, freq, type_of_ramp=1):
""" Creates ascending and descending sawtooth wave,
or a tringle wave, depending on the value of type_of_ramp,
using the function 'sawtooth' from scypy signal module.
Used by create_sawtooth_up, create_sawtooth_down and
create_triangular.
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of created wave
type_of_ramp : {0, 1, 2}
0 returns a sawtooth waveform with positive slope
1 returns a sawtooth waveform with negative slope
0 returns a triangle waveform
Returns
-------
Evaluated sawtooth or triangle wave of given frequency
"""
wave = sawtooth(2 * np.pi * time * freq, type_of_ramp)
return wave
def create_sawtooth_up(time, freq, *args):
""" Creates sawtooth waveform with positive slope
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of sawtooth wave
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated sawtooth waveform with positive slope and given frequency
"""
wave = create_ramps(time ,freq, 1)
return wave
def create_sawtooth_down(time, freq, *args):
""" Creates sawtooth waveform with negative slope
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of sawtooth wave
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated sawtooth waveform with negative slope and given frequency
"""
wave = create_ramps(time, freq, 0)
return wave
def create_triangular(time, freq, *args):
""" Creates a triangular wave with symmetric ramps
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of triangular wave
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated triangular waveform with given frequency
"""
wave = create_ramps(time, freq, .5)
return wave
def create_square(time, freq, dutycycle = .5, *args):
""" Creates a square wave. Uses square function from
scypy signal module
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of square wave
dutycycle=.5 : scalar or numpy array
Duty cycle. Default is 0.5 (50% duty cycle). If
an array, causes wave shape to change over time,
and must be the same length as time.
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated square waveform with given frequency
"""
#dutycycle not implemented due to bug
wave = square(2 * np.pi * time * freq)
return wave
def create_custom(time, freq, *args):
""" Creates a wave from given custom function.
Useful to get compatibility between the custom function provided and other
modules like PyAudioWave.
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : int or float
expected frequency of custom wave
args : (*params, custom_func)
*params should contain the parameters that will be passed to the custom
function provided
Returns
-------
Evaluated square waveform with given frequency
"""
#last argument is the function, the rest are parameters
*params, custom_func = args
wave = custom_func(time, freq, *params)
return wave
def create_sum(time, freq, amp, *args):
""" Creates an arbitraty sum of sine waves.
It uses the frequencies in freq and either uniform
amplitude if amp is None, or the given amplitudes if
amp is array-like. Output comes out normalized.
Parameters
----------
time : array
time vector in which to evaluate the funcion
freq : array-like
expected frequency of sine wave
amp : None or array-like
if None, amplitude of all summed waves is equal. If
array-like, it should be same length as freq.
args : dummy
used to give compatibility with other functions
Returns
-------
Evaluated square waveform with given frequency
"""
if len(amp)==0:
#If am wasn't given, it is an empty tuple
amp = np.ones(len(freq))
if len(freq) != len(amp):
raise ValueError('Amplitud and frequency arrays should e the same leght!')
#to be able to handle time vectors and scalars
if hasattr(time, '__len__'):
time= np.array(time)
wave = np.zeros(time.shape)
else:
wave = 0
for f, a in zip(freq, amp):
wave += create_sine(time, f) * a
#Normalize it:
wave /= sum(amp)
return wave
def given_waveform(input_waveform):
""" Switcher to easily choose waveform.
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
input_waveform : string
name of desired function to generate
Returns
-------
Chosen waveform function
"""
switcher = {
'sine': create_sine,
'sawtoothup': create_sawtooth_up,
'sawtoothdown': create_sawtooth_down ,
'ramp': create_sawtooth_up, #redirects to sawtoothup
'sawtooth': create_sawtooth_up, #redirects to sawtoothup
'triangular': create_triangular,
'square': create_square,
'custom': create_custom,
'sum': create_sum
}
func = switcher.get(input_waveform, wrong_input_build(list(switcher.keys())))
return func
def wrong_input_build(input_list):
def wrong_input(*args):
msg = 'Given waveform is invalid. Choose from following list:{}'.format(input_list)
raise ValueError(msg)
return wrong_input
#%% Clase que genera ondas
class Wave:
'''Generates an object with a single method: evaluate(time).
Attributes
----------
waveform : str {'sine', 'sawtoothup', 'sawtoothdown', 'ramp', 'triangular', 'square', 'custom'} optional
waveform type. If 'custom', function should acept inputs
(time, frequency, *args). Default = 'sine'
frequency : float (optional)
wave frequency
amplitude : float (optional)
wave amplitud
Methods
----------
evaluate(time)
returns evaluated function type
'''
def __init__(self, waveform='sine', frequency=400, amplitude=1, *args):
''' See class atributes.
If wave is 'custom', the custom function should be passed to *args.
'''
self._frequency = frequency
self.amplitude = amplitude
self.waveform = given_waveform(waveform)
self.extra_args = args
@property
def frequency(self):
'''Frequency getter: returns frequency of wave.
If frequency is an iterable, as it be in a sum or a
custom function, returns first value. Used to have
backwards compatibility wen sum and custom were added.'''
if isinstance(self._frequency, (list, tuple, np.ndarray)):
return self._frequency[0]
else:
return self._frequency
@frequency.setter
def frequency(self, value):
'''Frequency setter: sets value as self._frequency.'''
self._frequency = value
def evaluate(self, time, *args):
"""Takes in an array-like object to evaluate the funcion in.
Parameters
----------
time : array
time vector in which to evaluate the funcion
args : tuple (optional)
extra arguments to be passed to evaluated function
Returns
-------
Evaluated waveform
"""
if isinstance(self.amplitude, (list, tuple, np.ndarray)):
#for sums
wave = self.waveform(time, self._frequency, self.amplitude)
else:
wave = self.waveform(time, self._frequency, *args, self.extra_args) * self.amplitude
return wave
#%% Fourier series classfor wave generator
def fourier_switcher(input_waveform):
""" Switcher to easily choose waveform.
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
input_waveform : string
name of desired function to generate
Returns
-------
Chosen waveform function
"""
switcher = {
'square': square_series,
'triangular': triangular_series,
'sawtooth': sawtooth_series,
'custom': custom_series}
func = switcher.get(input_waveform, wrong_input_build(list(switcher.keys())))
return func
def square_series(order, freq, *args):
""" Creates parameters for a square series
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
order : int
order up to which to calculate fourier partial sum
frequency : float
fundamental frequency of generated fourier wave
Returns
-------
amps, freqs
amplitude and frequency vectors used in calculation of partial sum
"""
amps = [1/n for n in range(1, 2*order+1, 2)]
freqs = np.arange(1, 2*order+1, 2) * freq
return amps, freqs
def sawtooth_series(order, freq, *args):
""" Creates parameters for a sawtooth series
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
order : int
order up to which to calculate fourier partial sum
frequency : float
fundamental frequency of generated fourier wave
Returns
-------
amps, freqs
amplitude and frequency vectors used in calculation of partial sum
"""
amps = [1/n for n in range(1, order+1)]
freqs = np.arange(1, order+1) * freq
return amps, freqs
def triangular_series(order, freq, *args):
""" Creates parameters for a triangluar series
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
order : int
order up to which to calculate fourier partial sum
frequency : float
fundamental frequency of generated fourier wave
Returns
-------
amps, freqs
amplitude and frequency vectors used in calculation of partial sum
"""
amps = [(-1)**((n-1)*.5)/n**2 for n in range(1, 2*order+1, 2)]
freqs = np.arange(1, 2*order+1, 2) * freq
return amps, freqs
def custom_series(order, freq, amp, *args):
""" Creates parameters for a custom fourier series
If the given waveform is not in the list, it raises a ValueError and a list
containing the accepted inputs.
Parameters
----------
order : dummy
is redefined inside implementatoin. Kept for compatibility.
frequency : float
fundamental frequency of generated fourier wave
amp: tuple
tuple containing amplitude vectors of cosine and sine terms for the
custom fourier series
Returns
-------
amps, freqs
amplitude tple (passed directly from input) and frequency vector used
in calculation of partial sum
"""
order = len(amp[0])
amps = amp
freqs = np.arange(1, order+1) * freq
return amps, freqs
class Fourier:
'''Generates an object with a single method: evaluate(time).
Attributes
----------
waveform : str {'sawtooth', 'triangular', 'square', 'custom'}
waveform type.
wave : Wave object
Wave instance containgng a sum object that implements the fourier
series up to given order.
custom : bool
desides wether user has requested custom series or not
Methods
----------
evaluate(time)
returns evaluated fourier partial sum
'''
def __init__(self, waveform='square', frequency=400, order=5, *args):
"""Initializes class instance.
Parameters
----------
waveform : str {'sawtooth', 'triangular', 'square', 'custom'} (Optional)
waveform type. Default: 'square'
frequency : float (Optional)
fundamental frequency of the constructed wave in Hz. Default: 400
order : int (optional)
order of the constructed fourier series, i.e. the series will
be calculated up to the nth non zero term, with n=order.
args : tuple (optional)
if waveform is 'custom', a tuple of length 2, each element
containing the amplitudes of the cosine and sine terms,
respectively. Order will be ignored and will be assumed to be
equal to len(amplitudes[0]).
Returns
-------
Evaluated fourier partial sum
"""
self.waveform_maker = fourier_switcher(waveform)
self._order = order #doesn't call setup_props becaouse there's no frequency defined yet
self.setup_props(frequency)
self.extra_args = args
self.custom = waveform=='custom'
def setup_props(self, freq):
'''Sets up frequencyes, amplitudes and wave attributes for given freq.'''
self.amplitudes, self._frequencies = self.waveform_maker(self.order, freq)
self.wave = Wave('sum', self._frequencies, self.amplitudes)
@property
def frequency(self):
'''Frequency getter: returns fundamental frequency of wave.'''
return self._frequencies[0]
@frequency.setter
def frequency(self, value):
'''Frequency setter: calculates the frequency vector for given
fundamental frequency and order. Redefine Wave accordingly.'''
self.setup_props(value)
@property
def order(self):
'''Order getter: returns order of the last nonzero term in partial sum.'''
return self._order
@order.setter
def order(self, value):
'''Order setter: Calculates new appropiate frequency and amplitude
vectors for given order value. Redefine Wave accordingly.'''
self._order = value
self.setup_props(self.frequency)
def evaluate(self, time):
"""Takes in an array-like object to evaluate the funcion in.
Parameters
----------
time : array
time vector in which to evaluate the funcion
Returns
-------
Evaluated waveform
"""
if self.custom:
#missing support for custom phases
#cosine series:
self.wave.amplitude = self.amplitudes[0]
wave = self.wave.evaluate(time + np.pi *.5) * .5
#sine series:
self.wave.amplitude = self.amplitudes[1]
wave += self.wave.evaluate(time) * .5
return wave
else:
return self.wave.evaluate(time) |
from pyforest import *
from bs4 import BeautifulSoup
import requests
from collections import OrderedDict
import json
import re
def getFGPlayerPages(urls):
'''Takes in list of urls of FG leaderboards and returns list of urls of desired player pages'''
player_pages=[]
#get leaderboard html for each page
for url in urls:
response=requests.get(url)
allplayerspage = response.text
allplayerssoup = BeautifulSoup(allplayerspage)
#get url endings and put in list
playertable = allplayerssoup.find('table',id="LeaderBoard1_dg1_ctl00").find('tbody').find_all('tr')
pageurls = []
for row in playertable:
player_pages.append(row.find('a').get('href'))
return player_pages
def getFGPlayerStats(playerurls):
'''Takes in list of player urls and returns dataframe of the desired statistics'''
#create columns
cols = ['Year','Player','Team','Games','PA','SB','ISO','Babip','CS','Spd','GB/FB',
'LD','GB','FB','IFFB','HR/FB','IFH','BUH','Pull','Cent','Oppo','Soft','Med',
'Hard','O-Swing','Z-Swing','Swing','O-Contact','Z-Contact','Contact','Zone']
#create list to collect player-year dicts
dictlist=[]
for i in range(len(playerurls)):
#create player page soup
response=requests.get(f'https://www.fangraphs.com/{playerurls[i]}')
playerpage=response.text
playersoup=BeautifulSoup(playerpage)
#iterate through years
for year in range(2014,2019):
#create list for each player-year
entry = []
#create counters to prevent there from being repeat for multiple teams cases
counter1,counter2,counter3,counter4,counter5 = 0,0,0,0,0
#iterate through rows in first table
for row in playersoup.find('div',id='SeasonStats1_dgSeason11').find_all('tr'):
#check existence, year, majors, counter
if (len(row.find_all('td')) == 0 or
not row.find_all('td')[0].find('a') or
row.find_all('td')[0].find('a').text != str(year) or
'(' in row.find_all('td')[1].text or
counter1 == year):
continue
#append year to babip
else:
entry.append(year)
entry.append(playersoup.find('div',id='content').find('h1').text)
entry.append(row.find_all('td')[1].text)
entry.append(int(row.find_all('td')[2].text))
entry.append(int(row.find_all('td')[3].text))
entry.append(int(row.find_all('td')[7].text))
entry.append(float(row.find_all('td')[10].text))
entry.append(float(row.find_all('td')[11].text))
counter1 = year
#iterate through rows in second table
for row in playersoup.find('div',id='SeasonStats1_dgSeason1').find_all('tr'):
#check existence, year, majors, counter
if (len(row.find_all('td')) == 0 or
not row.find_all('td')[0].find('a') or
row.find_all('td')[0].find('a').text != str(year) or
'(' in row.find_all('td')[1].text or
counter2 == year):
continue
#append caught stealing
else:
entry.append(int(row.find_all('td')[20].text))
counter2 = year
#iterate through rows in third table
for row in playersoup.find('div',id='SeasonStats1_dgSeason2').find_all('tr'):
#check existence, year, majors, counter
if (len(row.find_all('td')) == 0 or
not row.find_all('td')[0].find('a') or
row.find_all('td')[0].find('a').text != str(year) or
'(' in row.find_all('td')[1].text or
counter3 == year):
continue
#append Spd
else:
entry.append(float(row.find_all('td')[10].text))
counter3 = year
#iterate through rows in fourth table
for row in playersoup.find('div',id='SeasonStats1_dgSeason3').find_all('tr'):
#check existence, year, majors, counter
if (len(row.find_all('td')) == 0 or
not row.find_all('td')[0].find('a') or
row.find_all('td')[0].find('a').text != str(year) or
'(' in row.find_all('td')[1].text or
counter4 == year):
continue
#append batted ball data
else:
entry.append(float(row.find_all('td')[2].text.split()[0]))
entry.append(float(row.find_all('td')[3].text.split()[0]))
entry.append(float(row.find_all('td')[4].text.split()[0]))
entry.append(float(row.find_all('td')[5].text.split()[0]))
entry.append(float(row.find_all('td')[6].text.split()[0]))
entry.append(float(row.find_all('td')[7].text.split()[0]))
entry.append(float(row.find_all('td')[8].text.split()[0]))
entry.append(float(row.find_all('td')[9].text.split()[0]))
#check if there's not batted ball data, and if its missing input NaN
if not row.find_all('td')[10].text.isspace():
entry.append(float(row.find_all('td')[10].text.split()[0]))
entry.append(float(row.find_all('td')[11].text.split()[0]))
entry.append(float(row.find_all('td')[12].text.split()[0]))
entry.append(float(row.find_all('td')[13].text.split()[0]))
entry.append(float(row.find_all('td')[14].text.split()[0]))
entry.append(float(row.find_all('td')[15].text.split()[0]))
else:
for i in range(1,7):
entry.append(np.NaN)
counter4 = year
#iterate through fifth table
for row in playersoup.find('div',id='SeasonStats1_dgSeason7').find_all('tr'):
#check existence, year, majors, counter
if (len(row.find_all('td')) == 0 or
not row.find_all('td')[0].find('a') or
row.find_all('td')[0].find('a').text != str(year) or
'(' in row.find_all('td')[1].text or
counter5 == year):
continue
#append plate discipline data
else:
entry.append(float(row.find_all('td')[2].text.split()[0]))
entry.append(float(row.find_all('td')[3].text.split()[0]))
entry.append(float(row.find_all('td')[4].text.split()[0]))
entry.append(float(row.find_all('td')[5].text.split()[0]))
entry.append(float(row.find_all('td')[6].text.split()[0]))
entry.append(float(row.find_all('td')[7].text.split()[0]))
entry.append(float(row.find_all('td')[8].text.split()[0]))
counter5 = year
#append entry to playerdf
entrydict=OrderedDict(zip(cols,entry))
dictlist.append(entrydict)
return pd.DataFrame(dictlist)
def getPageSoups(urls):
'''Gets Soups for Baseball Savant Pages'''
pageSoups = []
for url in urls:
response=requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
pageSoups.append(soup)
return pageSoups
def getEVJsons(soups):
'''Gets EV data'''
dictlist = []
for soup in soups:
jsontext = str(soup.find_all('script')[9]).split('var leaderboard_data = [')[1].split(']')[0]
individuals = jsontext.split(',{')
for i in range(len(individuals)):
if i == 0:
data = re.sub(r'(,"href"(.*)a>")*','',individuals[i])
else:
data = '{' + re.sub(r'(,"href"(.*)a>")*','',individuals[i])
s=json.loads(data)
dictlist.append(s)
return pd.DataFrame(dictlist)
def getSSJsons(soups):
'''Gets sprint speed data'''
dictlist = []
for soup in soups:
jsontext = str(soup.find_all('script')[9].text.split('[')[1].split(']')[0])
individual = jsontext.split(',{')
for i in range(len(individual)):
if i == 0:
data = re.sub(r'(,"href"(.*)a>")*','',individual[i])
else:
data = '{' + re.sub(r'(,"href"(.*)a>")*','',individual[i])
s=json.loads(data)
dictlist.append(s)
return pd.DataFrame(dictlist)
def getShiftData(url):
'''Gets shift data'''
response=requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
cols = ['Player','Year','Shift%']
dictlist = []
row_list = soup.find('table',id='search_results').find('tbody').find_all('tr',class_='search_row')
for i in range(0,len(row_list)):
entry = []
entry.append(row_list[i].find('td',class_='player_name').text)
entry.append(row_list[i].find_all('td')[2].text.split('- ')[1])
entry.append(float(row_list[i].find_all('td')[4].text))
dictlist.append(OrderedDict(zip(cols,entry)))
return pd.DataFrame(dictlist)
if __name__ == '__main__':
FG_leaderboard_urls = ['https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=1_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=2_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=3_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=4_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=5_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=6_50',
'https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2018&month=0&season1=2014&ind=0&team=&rost=&age=&filter=&players=&startdate=&enddate=&page=7_50']
player_urls = getFGPlayerPages(FG_leaderboard_urls)
FG_df = getFGPlayerStats(player_urls)
FG_df.to_csv('../data/fgplayerdf.csv')
EV_urls = ['https://baseballsavant.mlb.com/statcast_leaderboard?year=2019&abs=50&player_type=resp_batter_id',
'https://baseballsavant.mlb.com/statcast_leaderboard?year=2018&abs=50&player_type=resp_batter_id',
'https://baseballsavant.mlb.com/statcast_leaderboard?year=2017&abs=50&player_type=resp_batter_id',
'https://baseballsavant.mlb.com/statcast_leaderboard?year=2016&abs=50&player_type=resp_batter_id',
'https://baseballsavant.mlb.com/statcast_leaderboard?year=2015&abs=50&player_type=resp_batter_id']
soups = getPageSoups(urls)
EV_df = getJsons(soups)
EV_df.to_csv('../data/EVdf.csv')
SS_urls = ['https://baseballsavant.mlb.com/sprint_speed_leaderboard?year=2019&position=&team=&min=10',
'https://baseballsavant.mlb.com/sprint_speed_leaderboard?year=2018&position=&team=&min=10',
'https://baseballsavant.mlb.com/sprint_speed_leaderboard?year=2017&position=&team=&min=10',
'https://baseballsavant.mlb.com/sprint_speed_leaderboard?year=2016&position=&team=&min=10',
'https://baseballsavant.mlb.com/sprint_speed_leaderboard?year=2015&position=&team=&min=10']
soups = getPageSoups(urls)
SS_df = getJsons(soups)
SS_df.to_csv('../data/SSdf.csv')
shift_url = 'https://baseballsavant.mlb.com/statcast_search?hfPT=&hfAB=&hfBBT=&hfPR=&hfZ=&stadium=&hfBBL=&hfNewZones=&hfGT=R%7C&hfC=&hfSea=2019%7C2018%7C2017%7C2016%7C2015%7C&hfSit=&player_type=batter&hfOuts=&opponent=&pitcher_throws=&batter_stands=&hfSA=&game_date_gt=&game_date_lt=&hfInfield=2%7C3%7C&team=&position=&hfOutfield=&hfRO=&home_road=&hfFlag=&hfPull=&metric_1=&hfInn=&min_pitches=0&min_results=0&group_by=name-year&sort_col=pitches&player_event_sort=h_launch_speed&sort_order=desc&min_pas=30'
shift_df = getShiftData(shift_url)
shift_df.to_csv('../data/Shiftdf.csv')
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import os
import datetime
from time import sleep
from random import randint
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
GPDBNAME = str(os.environ["PGDATABASE"])
''' Utility's for Global Persistent table Rebuild test '''
class PTRebuildUtil(Command):
def __init__(self, cmd = None):
Command.__init__(self, 'Running fault command', cmd)
def _run_sys_cmd(self, cmd_str, validate = False):
'''helper function to run a sys cmd'''
tinctest.logger.info("execute:" +cmd_str)
cmd = PTRebuildUtil(cmd_str)
cmd.run(validateAfter = validate)
return cmd.get_results()
def check_dbconnections(self):
''' Check if database has any active connections '''
sql_cmd = 'select count(*) FROM pg_stat_activity;'
conCount = int(PSQL.run_sql_command(sql_cmd).split('\n')[3]) - 1
if conCount > 0:
print "There are %s Active connection on Database" %conCount
return True
else :
return False
def get_hostname_port_of_segment(self):
''' Get hostname and Port no of Primary segments '''
# Get primary segments
cmd_str = "select hostname, port from gp_segment_configuration where role = 'p' and content != '-1';"
seglist = PSQL.run_sql_command(cmd_str).split('\n')
#select any segment randomly
segNo = 2 + 1 #randint( 1, 2) : Commented so that it will rebuild same segment in 2nd re-try
(hostname, port) = seglist[segNo].split('|')
return (hostname, port)
def persistent_Rebuild(self, hostname = None, port = None, type = 'Master'):
''' Rebuild Persistent Object by connecting in Utility mode '''
sql_file = local_path('persistent_Rebuild_%s.sql'%type)
now = datetime.datetime.now()
timestamp = '%s%s%s%s%s%s%s'%(now.year,now.month,now.day,now.hour,now.minute,now.second,now.microsecond)
out_file = sql_file.replace('.sql', timestamp + '.out')
PSQL.run_sql_file(sql_file = sql_file, PGOPTIONS = '-c gp_session_role=utility', host = hostname, port = port, out_file = out_file)
|
import unittest
class StringCase(unittest.TestCase):
def test_pring(self):
print('1 C:\some\name')
print("2 C:\some\name")
print(r"3 C:\some\name")
print("""\
Usage: thingy [OPTIONS]
-h Display this usage message
-H hostname Hostname to connect to
""")
def test_index(self):
word = 'Python'
#切片中的越界索引会被自动处理:
print(word[:3])
print(word[3:])
#Python 中的字符串不能被修改,它们是 immutable 的。
# 因此,向字符串的某个索引位置赋值会产生一个错误:
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._sourcemod_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of SourceMod functions.
It is able to re-generate itself.
Do not edit the FUNCTIONS list by hand.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
FUNCTIONS = (
'OnEntityCreated',
'OnEntityDestroyed',
'OnGetGameDescription',
'OnLevelInit',
'SDKHook',
'SDKHookEx',
'SDKUnhook',
'SDKHooks_TakeDamage',
'SDKHooks_DropWeapon',
'TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
'AddToTopMenu',
'GetTopMenuInfoString',
'GetTopMenuObjName',
'RemoveFromTopMenu',
'DisplayTopMenu',
'DisplayTopMenuCategory',
'FindTopMenuCategory',
'SetTopMenuTitleCaching',
'OnAdminMenuCreated',
'OnAdminMenuReady',
'GetAdminTopMenu',
'AddTargetsToMenu',
'AddTargetsToMenu2',
'RedisplayAdminMenu',
'TEHook',
'AddTempEntHook',
'RemoveTempEntHook',
'TE_Start',
'TE_IsValidProp',
'TE_WriteNum',
'TE_ReadNum',
'TE_WriteFloat',
'TE_ReadFloat',
'TE_WriteVector',
'TE_ReadVector',
'TE_WriteAngles',
'TE_WriteFloatArray',
'TE_Send',
'TE_WriteEncodedEnt',
'TE_SendToAll',
'TE_SendToClient',
'CreateKeyValues',
'KvSetString',
'KvSetNum',
'KvSetUInt64',
'KvSetFloat',
'KvSetColor',
'KvSetVector',
'KvGetString',
'KvGetNum',
'KvGetFloat',
'KvGetColor',
'KvGetUInt64',
'KvGetVector',
'KvJumpToKey',
'KvJumpToKeySymbol',
'KvGotoFirstSubKey',
'KvGotoNextKey',
'KvSavePosition',
'KvDeleteKey',
'KvDeleteThis',
'KvGoBack',
'KvRewind',
'KvGetSectionName',
'KvSetSectionName',
'KvGetDataType',
'KeyValuesToFile',
'FileToKeyValues',
'StringToKeyValues',
'KvSetEscapeSequences',
'KvNodesInStack',
'KvCopySubkeys',
'KvFindKeyById',
'KvGetNameSymbol',
'KvGetSectionSymbol',
'TE_SetupSparks',
'TE_SetupSmoke',
'TE_SetupDust',
'TE_SetupMuzzleFlash',
'TE_SetupMetalSparks',
'TE_SetupEnergySplash',
'TE_SetupArmorRicochet',
'TE_SetupGlowSprite',
'TE_SetupExplosion',
'TE_SetupBloodSprite',
'TE_SetupBeamRingPoint',
'TE_SetupBeamPoints',
'TE_SetupBeamLaser',
'TE_SetupBeamRing',
'TE_SetupBeamFollow',
'HookEvent',
'HookEventEx',
'UnhookEvent',
'CreateEvent',
'FireEvent',
'CancelCreatedEvent',
'GetEventBool',
'SetEventBool',
'GetEventInt',
'SetEventInt',
'GetEventFloat',
'SetEventFloat',
'GetEventString',
'SetEventString',
'GetEventName',
'SetEventBroadcast',
'GetUserMessageType',
'GetUserMessageId',
'GetUserMessageName',
'StartMessage',
'StartMessageEx',
'EndMessage',
'MsgHook',
'MsgPostHook',
'HookUserMessage',
'UnhookUserMessage',
'StartMessageAll',
'StartMessageOne',
'InactivateClient',
'ReconnectClient',
'GetMaxEntities',
'GetEntityCount',
'IsValidEntity',
'IsValidEdict',
'IsEntNetworkable',
'CreateEdict',
'RemoveEdict',
'GetEdictFlags',
'SetEdictFlags',
'GetEdictClassname',
'GetEntityNetClass',
'ChangeEdictState',
'GetEntData',
'SetEntData',
'GetEntDataFloat',
'SetEntDataFloat',
'GetEntDataEnt2',
'SetEntDataEnt2',
'GetEntDataVector',
'SetEntDataVector',
'GetEntDataString',
'SetEntDataString',
'FindSendPropOffs',
'FindSendPropInfo',
'FindDataMapOffs',
'FindDataMapInfo',
'GetEntSendPropOffs',
'GetEntProp',
'SetEntProp',
'GetEntPropFloat',
'SetEntPropFloat',
'GetEntPropEnt',
'SetEntPropEnt',
'GetEntPropVector',
'SetEntPropVector',
'GetEntPropString',
'SetEntPropString',
'GetEntPropArraySize',
'GetEntDataArray',
'SetEntDataArray',
'GetEntityAddress',
'GetEntityClassname',
'float',
'FloatMul',
'FloatDiv',
'FloatAdd',
'FloatSub',
'FloatFraction',
'RoundToZero',
'RoundToCeil',
'RoundToFloor',
'RoundToNearest',
'FloatCompare',
'SquareRoot',
'Pow',
'Exponential',
'Logarithm',
'Sine',
'Cosine',
'Tangent',
'FloatAbs',
'ArcTangent',
'ArcCosine',
'ArcSine',
'ArcTangent2',
'RoundFloat',
'operator%',
'DegToRad',
'RadToDeg',
'GetURandomInt',
'GetURandomFloat',
'SetURandomSeed',
'SetURandomSeedSimple',
'RemovePlayerItem',
'GivePlayerItem',
'GetPlayerWeaponSlot',
'IgniteEntity',
'ExtinguishEntity',
'TeleportEntity',
'ForcePlayerSuicide',
'SlapPlayer',
'FindEntityByClassname',
'GetClientEyeAngles',
'CreateEntityByName',
'DispatchSpawn',
'DispatchKeyValue',
'DispatchKeyValueFloat',
'DispatchKeyValueVector',
'GetClientAimTarget',
'GetTeamCount',
'GetTeamName',
'GetTeamScore',
'SetTeamScore',
'GetTeamClientCount',
'SetEntityModel',
'GetPlayerDecalFile',
'GetPlayerJingleFile',
'GetServerNetStats',
'EquipPlayerWeapon',
'ActivateEntity',
'SetClientInfo',
'GivePlayerAmmo',
'SetClientListeningFlags',
'GetClientListeningFlags',
'SetListenOverride',
'GetListenOverride',
'IsClientMuted',
'TR_GetPointContents',
'TR_GetPointContentsEnt',
'TR_TraceRay',
'TR_TraceHull',
'TR_TraceRayFilter',
'TR_TraceHullFilter',
'TR_TraceRayEx',
'TR_TraceHullEx',
'TR_TraceRayFilterEx',
'TR_TraceHullFilterEx',
'TR_GetFraction',
'TR_GetEndPosition',
'TR_GetEntityIndex',
'TR_DidHit',
'TR_GetHitGroup',
'TR_GetPlaneNormal',
'TR_PointOutsideWorld',
'SortIntegers',
'SortFloats',
'SortStrings',
'SortFunc1D',
'SortCustom1D',
'SortCustom2D',
'SortADTArray',
'SortFuncADTArray',
'SortADTArrayCustom',
'CompileRegex',
'MatchRegex',
'GetRegexSubString',
'SimpleRegexMatch',
'TF2_GetPlayerClass',
'TF2_SetPlayerClass',
'TF2_RemoveWeaponSlot',
'TF2_RemoveAllWeapons',
'TF2_IsPlayerInCondition',
'TF2_GetObjectType',
'TF2_GetObjectMode',
'NominateMap',
'RemoveNominationByMap',
'RemoveNominationByOwner',
'GetExcludeMapList',
'GetNominatedMapList',
'CanMapChooserStartVote',
'InitiateMapChooserVote',
'HasEndOfMapVoteFinished',
'EndOfMapVoteEnabled',
'OnNominationRemoved',
'OnMapVoteStarted',
'CreateTimer',
'KillTimer',
'TriggerTimer',
'GetTickedTime',
'GetMapTimeLeft',
'GetMapTimeLimit',
'ExtendMapTimeLimit',
'GetTickInterval',
'OnMapTimeLeftChanged',
'IsServerProcessing',
'CreateDataTimer',
'ByteCountToCells',
'CreateArray',
'ClearArray',
'CloneArray',
'ResizeArray',
'GetArraySize',
'PushArrayCell',
'PushArrayString',
'PushArrayArray',
'GetArrayCell',
'GetArrayString',
'GetArrayArray',
'SetArrayCell',
'SetArrayString',
'SetArrayArray',
'ShiftArrayUp',
'RemoveFromArray',
'SwapArrayItems',
'FindStringInArray',
'FindValueInArray',
'ProcessTargetString',
'ReplyToTargetError',
'MultiTargetFilter',
'AddMultiTargetFilter',
'RemoveMultiTargetFilter',
'OnBanClient',
'OnBanIdentity',
'OnRemoveBan',
'BanClient',
'BanIdentity',
'RemoveBan',
'CreateTrie',
'SetTrieValue',
'SetTrieArray',
'SetTrieString',
'GetTrieValue',
'GetTrieArray',
'GetTrieString',
'RemoveFromTrie',
'ClearTrie',
'GetTrieSize',
'GetFunctionByName',
'CreateGlobalForward',
'CreateForward',
'GetForwardFunctionCount',
'AddToForward',
'RemoveFromForward',
'RemoveAllFromForward',
'Call_StartForward',
'Call_StartFunction',
'Call_PushCell',
'Call_PushCellRef',
'Call_PushFloat',
'Call_PushFloatRef',
'Call_PushArray',
'Call_PushArrayEx',
'Call_PushString',
'Call_PushStringEx',
'Call_Finish',
'Call_Cancel',
'NativeCall',
'CreateNative',
'ThrowNativeError',
'GetNativeStringLength',
'GetNativeString',
'SetNativeString',
'GetNativeCell',
'GetNativeCellRef',
'SetNativeCellRef',
'GetNativeArray',
'SetNativeArray',
'FormatNativeString',
'RequestFrameCallback',
'RequestFrame',
'OnRebuildAdminCache',
'DumpAdminCache',
'AddCommandOverride',
'GetCommandOverride',
'UnsetCommandOverride',
'CreateAdmGroup',
'FindAdmGroup',
'SetAdmGroupAddFlag',
'GetAdmGroupAddFlag',
'GetAdmGroupAddFlags',
'SetAdmGroupImmuneFrom',
'GetAdmGroupImmuneCount',
'GetAdmGroupImmuneFrom',
'AddAdmGroupCmdOverride',
'GetAdmGroupCmdOverride',
'RegisterAuthIdentType',
'CreateAdmin',
'GetAdminUsername',
'BindAdminIdentity',
'SetAdminFlag',
'GetAdminFlag',
'GetAdminFlags',
'AdminInheritGroup',
'GetAdminGroupCount',
'GetAdminGroup',
'SetAdminPassword',
'GetAdminPassword',
'FindAdminByIdentity',
'RemoveAdmin',
'FlagBitsToBitArray',
'FlagBitArrayToBits',
'FlagArrayToBits',
'FlagBitsToArray',
'FindFlagByName',
'FindFlagByChar',
'FindFlagChar',
'ReadFlagString',
'CanAdminTarget',
'CreateAuthMethod',
'SetAdmGroupImmunityLevel',
'GetAdmGroupImmunityLevel',
'SetAdminImmunityLevel',
'GetAdminImmunityLevel',
'FlagToBit',
'BitToFlag',
'ServerCommand',
'ServerCommandEx',
'InsertServerCommand',
'ServerExecute',
'ClientCommand',
'FakeClientCommand',
'FakeClientCommandEx',
'PrintToServer',
'PrintToConsole',
'ReplyToCommand',
'GetCmdReplySource',
'SetCmdReplySource',
'IsChatTrigger',
'ShowActivity2',
'ShowActivity',
'ShowActivityEx',
'FormatActivitySource',
'SrvCmd',
'RegServerCmd',
'ConCmd',
'RegConsoleCmd',
'RegAdminCmd',
'GetCmdArgs',
'GetCmdArg',
'GetCmdArgString',
'CreateConVar',
'FindConVar',
'ConVarChanged',
'HookConVarChange',
'UnhookConVarChange',
'GetConVarBool',
'SetConVarBool',
'GetConVarInt',
'SetConVarInt',
'GetConVarFloat',
'SetConVarFloat',
'GetConVarString',
'SetConVarString',
'ResetConVar',
'GetConVarDefault',
'GetConVarFlags',
'SetConVarFlags',
'GetConVarBounds',
'SetConVarBounds',
'GetConVarName',
'QueryClientConVar',
'GetCommandIterator',
'ReadCommandIterator',
'CheckCommandAccess',
'CheckAccess',
'IsValidConVarChar',
'GetCommandFlags',
'SetCommandFlags',
'FindFirstConCommand',
'FindNextConCommand',
'SendConVarValue',
'AddServerTag',
'RemoveServerTag',
'CommandListener',
'AddCommandListener',
'RemoveCommandListener',
'CommandExists',
'OnClientSayCommand',
'OnClientSayCommand_Post',
'TF2_IgnitePlayer',
'TF2_RespawnPlayer',
'TF2_RegeneratePlayer',
'TF2_AddCondition',
'TF2_RemoveCondition',
'TF2_SetPlayerPowerPlay',
'TF2_DisguisePlayer',
'TF2_RemovePlayerDisguise',
'TF2_StunPlayer',
'TF2_MakeBleed',
'TF2_GetClass',
'TF2_CalcIsAttackCritical',
'TF2_OnIsHolidayActive',
'TF2_IsHolidayActive',
'TF2_IsPlayerInDuel',
'TF2_RemoveWearable',
'TF2_OnConditionAdded',
'TF2_OnConditionRemoved',
'TF2_OnWaitingForPlayersStart',
'TF2_OnWaitingForPlayersEnd',
'TF2_OnPlayerTeleport',
'SQL_Connect',
'SQL_DefConnect',
'SQL_ConnectCustom',
'SQLite_UseDatabase',
'SQL_CheckConfig',
'SQL_GetDriver',
'SQL_ReadDriver',
'SQL_GetDriverIdent',
'SQL_GetDriverProduct',
'SQL_SetCharset',
'SQL_GetAffectedRows',
'SQL_GetInsertId',
'SQL_GetError',
'SQL_EscapeString',
'SQL_QuoteString',
'SQL_FastQuery',
'SQL_Query',
'SQL_PrepareQuery',
'SQL_FetchMoreResults',
'SQL_HasResultSet',
'SQL_GetRowCount',
'SQL_GetFieldCount',
'SQL_FieldNumToName',
'SQL_FieldNameToNum',
'SQL_FetchRow',
'SQL_MoreRows',
'SQL_Rewind',
'SQL_FetchString',
'SQL_FetchFloat',
'SQL_FetchInt',
'SQL_IsFieldNull',
'SQL_FetchSize',
'SQL_BindParamInt',
'SQL_BindParamFloat',
'SQL_BindParamString',
'SQL_Execute',
'SQL_LockDatabase',
'SQL_UnlockDatabase',
'SQLTCallback',
'SQL_IsSameConnection',
'SQL_TConnect',
'SQL_TQuery',
'SQL_CreateTransaction',
'SQL_AddQuery',
'SQLTxnSuccess',
'SQLTxnFailure',
'SQL_ExecuteTransaction',
'CloseHandle',
'CloneHandle',
'MenuHandler',
'CreateMenu',
'DisplayMenu',
'DisplayMenuAtItem',
'AddMenuItem',
'InsertMenuItem',
'RemoveMenuItem',
'RemoveAllMenuItems',
'GetMenuItem',
'GetMenuSelectionPosition',
'GetMenuItemCount',
'SetMenuPagination',
'GetMenuPagination',
'GetMenuStyle',
'SetMenuTitle',
'GetMenuTitle',
'CreatePanelFromMenu',
'GetMenuExitButton',
'SetMenuExitButton',
'GetMenuExitBackButton',
'SetMenuExitBackButton',
'SetMenuNoVoteButton',
'CancelMenu',
'GetMenuOptionFlags',
'SetMenuOptionFlags',
'IsVoteInProgress',
'CancelVote',
'VoteMenu',
'VoteMenuToAll',
'VoteHandler',
'SetVoteResultCallback',
'CheckVoteDelay',
'IsClientInVotePool',
'RedrawClientVoteMenu',
'GetMenuStyleHandle',
'CreatePanel',
'CreateMenuEx',
'GetClientMenu',
'CancelClientMenu',
'GetMaxPageItems',
'GetPanelStyle',
'SetPanelTitle',
'DrawPanelItem',
'DrawPanelText',
'CanPanelDrawFlags',
'SetPanelKeys',
'SendPanelToClient',
'GetPanelTextRemaining',
'GetPanelCurrentKey',
'SetPanelCurrentKey',
'RedrawMenuItem',
'InternalShowMenu',
'GetMenuVoteInfo',
'IsNewVoteAllowed',
'PrefetchSound',
'EmitAmbientSound',
'FadeClientVolume',
'StopSound',
'EmitSound',
'EmitSentence',
'GetDistGainFromSoundLevel',
'AmbientSHook',
'NormalSHook',
'AddAmbientSoundHook',
'AddNormalSoundHook',
'RemoveAmbientSoundHook',
'RemoveNormalSoundHook',
'EmitSoundToClient',
'EmitSoundToAll',
'ATTN_TO_SNDLEVEL',
'GetGameSoundParams',
'EmitGameSound',
'EmitAmbientGameSound',
'EmitGameSoundToClient',
'EmitGameSoundToAll',
'PrecacheScriptSound',
'strlen',
'StrContains',
'strcmp',
'strncmp',
'StrEqual',
'strcopy',
'Format',
'FormatEx',
'VFormat',
'StringToInt',
'StringToIntEx',
'IntToString',
'StringToFloat',
'StringToFloatEx',
'FloatToString',
'BreakString',
'TrimString',
'SplitString',
'ReplaceString',
'ReplaceStringEx',
'GetCharBytes',
'IsCharAlpha',
'IsCharNumeric',
'IsCharSpace',
'IsCharMB',
'IsCharUpper',
'IsCharLower',
'StripQuotes',
'CharToUpper',
'CharToLower',
'FindCharInString',
'StrCat',
'ExplodeString',
'ImplodeStrings',
'GetVectorLength',
'GetVectorDistance',
'GetVectorDotProduct',
'GetVectorCrossProduct',
'NormalizeVector',
'GetAngleVectors',
'GetVectorAngles',
'GetVectorVectors',
'AddVectors',
'SubtractVectors',
'ScaleVector',
'NegateVector',
'MakeVectorFromPoints',
'BaseComm_IsClientGagged',
'BaseComm_IsClientMuted',
'BaseComm_SetClientGag',
'BaseComm_SetClientMute',
'FormatUserLogText',
'FindPluginByFile',
'FindTarget',
'AcceptEntityInput',
'SetVariantBool',
'SetVariantString',
'SetVariantInt',
'SetVariantFloat',
'SetVariantVector3D',
'SetVariantPosVector3D',
'SetVariantColor',
'SetVariantEntity',
'GameRules_GetProp',
'GameRules_SetProp',
'GameRules_GetPropFloat',
'GameRules_SetPropFloat',
'GameRules_GetPropEnt',
'GameRules_SetPropEnt',
'GameRules_GetPropVector',
'GameRules_SetPropVector',
'GameRules_GetPropString',
'GameRules_SetPropString',
'GameRules_GetRoundState',
'OnClientConnect',
'OnClientConnected',
'OnClientPutInServer',
'OnClientDisconnect',
'OnClientDisconnect_Post',
'OnClientCommand',
'OnClientSettingsChanged',
'OnClientAuthorized',
'OnClientPreAdminCheck',
'OnClientPostAdminFilter',
'OnClientPostAdminCheck',
'GetMaxClients',
'GetMaxHumanPlayers',
'GetClientCount',
'GetClientName',
'GetClientIP',
'GetClientAuthString',
'GetClientAuthId',
'GetSteamAccountID',
'GetClientUserId',
'IsClientConnected',
'IsClientInGame',
'IsClientInKickQueue',
'IsClientAuthorized',
'IsFakeClient',
'IsClientSourceTV',
'IsClientReplay',
'IsClientObserver',
'IsPlayerAlive',
'GetClientInfo',
'GetClientTeam',
'SetUserAdmin',
'GetUserAdmin',
'AddUserFlags',
'RemoveUserFlags',
'SetUserFlagBits',
'GetUserFlagBits',
'CanUserTarget',
'RunAdminCacheChecks',
'NotifyPostAdminCheck',
'CreateFakeClient',
'SetFakeClientConVar',
'GetClientHealth',
'GetClientModel',
'GetClientWeapon',
'GetClientMaxs',
'GetClientMins',
'GetClientAbsAngles',
'GetClientAbsOrigin',
'GetClientArmor',
'GetClientDeaths',
'GetClientFrags',
'GetClientDataRate',
'IsClientTimingOut',
'GetClientTime',
'GetClientLatency',
'GetClientAvgLatency',
'GetClientAvgLoss',
'GetClientAvgChoke',
'GetClientAvgData',
'GetClientAvgPackets',
'GetClientOfUserId',
'KickClient',
'KickClientEx',
'ChangeClientTeam',
'GetClientSerial',
'GetClientFromSerial',
'FindStringTable',
'GetNumStringTables',
'GetStringTableNumStrings',
'GetStringTableMaxStrings',
'GetStringTableName',
'FindStringIndex',
'ReadStringTable',
'GetStringTableDataLength',
'GetStringTableData',
'SetStringTableData',
'AddToStringTable',
'LockStringTables',
'AddFileToDownloadsTable',
'GetEntityFlags',
'SetEntityFlags',
'GetEntityMoveType',
'SetEntityMoveType',
'GetEntityRenderMode',
'SetEntityRenderMode',
'GetEntityRenderFx',
'SetEntityRenderFx',
'SetEntityRenderColor',
'GetEntityGravity',
'SetEntityGravity',
'SetEntityHealth',
'GetClientButtons',
'EntityOutput',
'HookEntityOutput',
'UnhookEntityOutput',
'HookSingleEntityOutput',
'UnhookSingleEntityOutput',
'SMC_CreateParser',
'SMC_ParseFile',
'SMC_GetErrorString',
'SMC_ParseStart',
'SMC_SetParseStart',
'SMC_ParseEnd',
'SMC_SetParseEnd',
'SMC_NewSection',
'SMC_KeyValue',
'SMC_EndSection',
'SMC_SetReaders',
'SMC_RawLine',
'SMC_SetRawLine',
'BfWriteBool',
'BfWriteByte',
'BfWriteChar',
'BfWriteShort',
'BfWriteWord',
'BfWriteNum',
'BfWriteFloat',
'BfWriteString',
'BfWriteEntity',
'BfWriteAngle',
'BfWriteCoord',
'BfWriteVecCoord',
'BfWriteVecNormal',
'BfWriteAngles',
'BfReadBool',
'BfReadByte',
'BfReadChar',
'BfReadShort',
'BfReadWord',
'BfReadNum',
'BfReadFloat',
'BfReadString',
'BfReadEntity',
'BfReadAngle',
'BfReadCoord',
'BfReadVecCoord',
'BfReadVecNormal',
'BfReadAngles',
'BfGetNumBytesLeft',
'CreateProfiler',
'StartProfiling',
'StopProfiling',
'GetProfilerTime',
'OnPluginStart',
'AskPluginLoad2',
'OnPluginEnd',
'OnPluginPauseChange',
'OnGameFrame',
'OnMapStart',
'OnMapEnd',
'OnConfigsExecuted',
'OnAutoConfigsBuffered',
'OnAllPluginsLoaded',
'GetMyHandle',
'GetPluginIterator',
'MorePlugins',
'ReadPlugin',
'GetPluginStatus',
'GetPluginFilename',
'IsPluginDebugging',
'GetPluginInfo',
'FindPluginByNumber',
'SetFailState',
'ThrowError',
'GetTime',
'FormatTime',
'LoadGameConfigFile',
'GameConfGetOffset',
'GameConfGetKeyValue',
'GameConfGetAddress',
'GetSysTickCount',
'AutoExecConfig',
'RegPluginLibrary',
'LibraryExists',
'GetExtensionFileStatus',
'OnLibraryAdded',
'OnLibraryRemoved',
'ReadMapList',
'SetMapListCompatBind',
'OnClientFloodCheck',
'OnClientFloodResult',
'CanTestFeatures',
'GetFeatureStatus',
'RequireFeature',
'LoadFromAddress',
'StoreToAddress',
'CreateStack',
'PushStackCell',
'PushStackString',
'PushStackArray',
'PopStackCell',
'PopStackString',
'PopStackArray',
'IsStackEmpty',
'PopStack',
'OnPlayerRunCmd',
'BuildPath',
'OpenDirectory',
'ReadDirEntry',
'OpenFile',
'DeleteFile',
'ReadFileLine',
'ReadFile',
'ReadFileString',
'WriteFile',
'WriteFileString',
'WriteFileLine',
'ReadFileCell',
'WriteFileCell',
'IsEndOfFile',
'FileSeek',
'FilePosition',
'FileExists',
'RenameFile',
'DirExists',
'FileSize',
'FlushFile',
'RemoveDir',
'CreateDirectory',
'GetFileTime',
'LogToOpenFile',
'LogToOpenFileEx',
'PbReadInt',
'PbReadFloat',
'PbReadBool',
'PbReadString',
'PbReadColor',
'PbReadAngle',
'PbReadVector',
'PbReadVector2D',
'PbGetRepeatedFieldCount',
'PbSetInt',
'PbSetFloat',
'PbSetBool',
'PbSetString',
'PbSetColor',
'PbSetAngle',
'PbSetVector',
'PbSetVector2D',
'PbAddInt',
'PbAddFloat',
'PbAddBool',
'PbAddString',
'PbAddColor',
'PbAddAngle',
'PbAddVector',
'PbAddVector2D',
'PbRemoveRepeatedFieldValue',
'PbReadMessage',
'PbReadRepeatedMessage',
'PbAddMessage',
'SetNextMap',
'GetNextMap',
'ForceChangeLevel',
'GetMapHistorySize',
'GetMapHistory',
'GeoipCode2',
'GeoipCode3',
'GeoipCountry',
'MarkNativeAsOptional',
'RegClientCookie',
'FindClientCookie',
'SetClientCookie',
'GetClientCookie',
'SetAuthIdCookie',
'AreClientCookiesCached',
'OnClientCookiesCached',
'CookieMenuHandler',
'SetCookiePrefabMenu',
'SetCookieMenuItem',
'ShowCookieMenu',
'GetCookieIterator',
'ReadCookieIterator',
'GetCookieAccess',
'GetClientCookieTime',
'LoadTranslations',
'SetGlobalTransTarget',
'GetClientLanguage',
'GetServerLanguage',
'GetLanguageCount',
'GetLanguageInfo',
'SetClientLanguage',
'GetLanguageByCode',
'GetLanguageByName',
'CS_OnBuyCommand',
'CS_OnCSWeaponDrop',
'CS_OnGetWeaponPrice',
'CS_OnTerminateRound',
'CS_RespawnPlayer',
'CS_SwitchTeam',
'CS_DropWeapon',
'CS_TerminateRound',
'CS_GetTranslatedWeaponAlias',
'CS_GetWeaponPrice',
'CS_GetClientClanTag',
'CS_SetClientClanTag',
'CS_GetTeamScore',
'CS_SetTeamScore',
'CS_GetMVPCount',
'CS_SetMVPCount',
'CS_GetClientContributionScore',
'CS_SetClientContributionScore',
'CS_GetClientAssists',
'CS_SetClientAssists',
'CS_AliasToWeaponID',
'CS_WeaponIDToAlias',
'CS_IsValidWeaponID',
'CS_UpdateClientModel',
'LogToGame',
'SetRandomSeed',
'GetRandomFloat',
'GetRandomInt',
'IsMapValid',
'IsDedicatedServer',
'GetEngineTime',
'GetGameTime',
'GetGameTickCount',
'GetGameDescription',
'GetGameFolderName',
'GetCurrentMap',
'PrecacheModel',
'PrecacheSentenceFile',
'PrecacheDecal',
'PrecacheGeneric',
'IsModelPrecached',
'IsDecalPrecached',
'IsGenericPrecached',
'PrecacheSound',
'IsSoundPrecached',
'CreateDialog',
'GetEngineVersion',
'PrintToChat',
'PrintToChatAll',
'PrintCenterText',
'PrintCenterTextAll',
'PrintHintText',
'PrintHintTextToAll',
'ShowVGUIPanel',
'CreateHudSynchronizer',
'SetHudTextParams',
'SetHudTextParamsEx',
'ShowSyncHudText',
'ClearSyncHud',
'ShowHudText',
'ShowMOTDPanel',
'DisplayAskConnectBox',
'EntIndexToEntRef',
'EntRefToEntIndex',
'MakeCompatEntRef',
'SetClientViewEntity',
'SetLightStyle',
'GetClientEyePosition',
'CreateDataPack',
'WritePackCell',
'WritePackFloat',
'WritePackString',
'ReadPackCell',
'ReadPackFloat',
'ReadPackString',
'ResetPack',
'GetPackPosition',
'SetPackPosition',
'IsPackReadable',
'LogMessage',
'LogToFile',
'LogToFileEx',
'LogAction',
'LogError',
'OnLogAction',
'GameLogHook',
'AddGameLogHook',
'RemoveGameLogHook',
'FindTeamByName',
'StartPrepSDKCall',
'PrepSDKCall_SetVirtual',
'PrepSDKCall_SetSignature',
'PrepSDKCall_SetAddress',
'PrepSDKCall_SetFromConf',
'PrepSDKCall_SetReturnInfo',
'PrepSDKCall_AddParameter',
'EndPrepSDKCall',
'SDKCall',
'GetPlayerResourceEntity',
)
if __name__ == '__main__': # pragma: no cover
import re
import sys
try:
from urllib import FancyURLopener
except ImportError:
from urllib.request import FancyURLopener
from pygments.util import format_lines
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
class Opener(FancyURLopener):
version = 'Mozilla/5.0 (Pygments Sourcemod Builtins Update)'
opener = Opener()
def get_version():
f = opener.open('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+(?:-\w+)?)</td>')
for line in f:
m = r.search(line)
if m is not None:
return m.groups()[0]
raise ValueError('No version in api docs')
def get_sm_functions():
f = opener.open('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def regenerate(filename, natives):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('FUNCTIONS = (')]
footer = content[content.find("if __name__ == '__main__':")-1:]
with open(filename, 'w') as fp:
fp.write(header)
fp.write(format_lines('FUNCTIONS', natives))
fp.write(footer)
def run():
version = get_version()
print('> Downloading function index for SourceMod %s' % version)
functions = get_sm_functions()
print('> %d functions found:' % len(functions))
functionlist = []
for full_function_name in functions:
print('>> %s' % full_function_name)
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
run()
|
from bot import Twitter
from time import sleep
import random
twitter = Twitter()
with open("tweets.txt") as f:
tweets = [tweet.strip() for tweet in f.readlines()[1:]]
if __name__ == "__main__":
# 【ここにコードを書く】
# --- ツイートする ---
# twitter.statuses_update("ツイートの内容")
# --- tweets.txtの内容をツイートさせる ---
# tweetsという変数にtweets.txtの中身が入っている
# print(tweets)
# --- 順番にツイートさせたいなら ---
# for tweet in tweets:
# twitter.statuses_update(tweet)
# --- ランダムな順番でツイートさせたいなら ---
# これをfor文の前に置く
# random.shuffle(tweets)
# --- ツイートをn秒間隔でさせたいなら ---
# for文の中にこれを書く
# sleep(n)
print("finished!")
|
import json
import pandas
from .weight import Weight, DrinkWeights, ingredients_dict
INITIAL_CHANGE_STRENGTH = 30
tastes_list = ["Sweet", "Sour", "Bitter"]
columns_dict = {"alcohol": 0, "juice": 2, "tastes": 2, "liquor": 3}
class ExportedBaseline(object):
def __init__(self, serialized):
lines = [line for line in serialized.splitlines() if line]
data = json.loads(lines[-1]).get("weights", {})
self._weights = {
key: Weight(weight_type=key, weight_value=value) for key, value in data.items()
}
self._change_strength = json.loads(lines[-1]).get("change_strength")
def get_ingredients_weights(self):
return self._weights
def get_change_strength(self):
return self._change_strength
class BaseLine(object):
def __init__(self, excel_filepath, change_strength=INITIAL_CHANGE_STRENGTH):
self.baseline_dataset = pandas.read_csv(excel_filepath)
self.baseline_dataset.drop('Timestamp', axis=1, inplace=True)
self._change_strength = change_strength
def count_ingredient(self, column_idx, name):
return int(self.baseline_dataset.iloc[:, column_idx].str.contains(name).sum())
def get_ingredients_weights_per_group(self, group_name):
weights_dict = {}
column_num = columns_dict[group_name]
for name in ingredients_dict[group_name]:
weights_dict[name] = Weight(weight_type=name,
weight_value=self.count_ingredient(column_idx=column_num, name=name))
return weights_dict
def get_ingredients_weights(self):
weights_dict = {}
for group_name in ingredients_dict.keys():
weights_dict.update(self.get_ingredients_weights_per_group(group_name))
return weights_dict
def get_change_strength(self):
return self._change_strength
if __name__ == "__main__":
baseline = BaseLine(excel_filepath="The NMDD Project.csv")
#for key, weight in baseline.get_ingredients_weights().iteritems():
# print key
# print weight.value
drinks = DrinkWeights(weights_dict=baseline.get_ingredients_weights())
#drinks.generate_mutation()
#drinks.accept_mutation(sweetness=5, sourness=5, strength=5, general=5)
|
from django.utils import timezone
from django.shortcuts import redirect, render
from django.http import HttpResponse
from .models import AccessAttempt, LockedUser
from .appsettings import (LOCKOUT_COUNT_LIMIT, COOLOFF_PERIOD_SEC,
LOCKOUT_TEMPLATE, LOCKOUT_MSG_URL)
lockout_msg = "Too many login attempts. Please try again in few seconds."
def process_login_attempt(request, username, success):
"""
Process given login attempt and store it and decide what to do next.
@success parameter denotes if attempt was failure or success.
"""
attempt = AccessAttempt.objects.add_attempt(request, username, safe=success)
# if failed find the count and update lock status
if not success:
failed_attempts = AccessAttempt.objects.get_recent_fails_for_user(username)
if failed_attempts >= LOCKOUT_COUNT_LIMIT:
lock_user(username)
def lock_user(username):
user_entry, is_exist = LockedUser.objects.get_or_create(username=username)
if is_exist:
user_entry.locked_at = timezone.now()
user_entry.save()
return user_entry
def check_and_update_lock(username):
"""
Check lock status for a given user name.
If locked, check time passed since it was locked.
If cool off period has already passed then remove lock
"""
try:
locked_user_status = LockedUser.objects.get(username=username)
time_since_locked = timezone.now() - locked_user_status.locked_at
remaining_sec = COOLOFF_PERIOD_SEC - time_since_locked.total_seconds()
if remaining_sec > 0:
return remaining_sec
else:
locked_user_status.delete()
except LockedUser.DoesNotExist:
pass
return None
def get_lockout_response(request, remaining_sec):
"""
Based on configuration get the lockout HttpResponse and return
"""
if LOCKOUT_MSG_URL:
return redirect(LOCKOUT_MSG_URL)
if LOCKOUT_TEMPLATE:
context = {'time_remaining': remaining_sec}
return render(request, LOCKOUT_TEMPLATE, context)
return HttpResponse(lockout_msg)
|
# -*- coding:gbk -*-
# auther : pdm
# email : ppppdm@gmail.com
import socket
import traceback
# self module
import myLog
HOST = socket.gethostbyname(socket.gethostname()) #socket.INADDR_ANY
REMOTE_CONTROL_PORT = 6320
remote_control_client_list = []
def send_to_remote(b_data):
for conn in remote_control_client_list:
try:
conn.send(b_data)
except:
print(traceback.format_exc())
myLog.mylogger.debug(traceback.format_exc())
remote_control_client_list.remove(conn)
conn.close()
def Server():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((HOST, REMOTE_CONTROL_PORT))
sock.listen(5)
print('remote control ready to accept...')
while True:
conn, addr = sock.accept()
print('connected by ', addr)
remote_control_client_list.append(conn)
except:
print(traceback.format_exc())
myLog.mylogger.debug(traceback.format_exc())
return
|
import torch
import torch.optim as optim
from torchvision.utils import save_image
from datetime import datetime
import itertools
from libs.compute import *
from libs.constant import *
from libs.model import *
import gc
# we are missing weight decayed specified in the original as regularization loss
# add cipping the equivalent to tf.clip_by_value to torch.clamp(input, 0 , 1 ) !!!!!!verify that we only clamp when applying the inverse!!!!!!!
#add gradient clipping FLAGS['net_gradient_clip_value'] = 1e8 torch.nn.utils.clip_grad_value_
#check if instance batch norm alo applies to the discriminator
clip_value = 1e8
if __name__ == "__main__":
start_time = datetime.now()
# Creating generator and discriminator
generatorX = Generator()
generatorX.load_state_dict(torch.load('./gan1_pretrain_100_4.pth', map_location=device))
generatorX_ = Generator_(generatorX)
generatorX = nn.DataParallel(generatorX)
generatorX_ = nn.DataParallel(generatorX_)
generatorX.train()
generatorY = Generator()
generatorY = nn.DataParallel(generatorY)
#generatorY.train()
discriminatorY = Discriminator()
discriminatorY = nn.DataParallel(discriminatorY)
discriminatorX = Discriminator()
discriminatorX = nn.DataParallel(discriminatorX)
if torch.cuda.is_available():
generatorX.cuda(device=device)
generatorX_.cuda(device=device)
generatorY.cuda(device=device)
discriminatorY.cuda(device=device)
discriminatorX.cuda(device=device)
# Loading Training and Test Set Data
trainLoader1, trainLoader2, trainLoader_cross, testLoader = data_loader()
# MSE Loss and Optimizer
criterion = nn.MSELoss()
optimizer_g = optim.Adam(itertools.chain(generatorX.parameters(), generatorY.parameters()), lr=LEARNING_RATE, betas=(BETA1, BETA2))
optimizer_d = optim.Adam(itertools.chain(discriminatorY.parameters(),discriminatorX.parameters()), lr=LEARNING_RATE, betas=(BETA1, BETA2))
# Training Network
dataiter = iter(testLoader)
gt_test, data_test = dataiter.next()
input_test, dummy = data_test
testInput = Variable(input_test.type(Tensor_gpu))
batches_done = 0
generator_loss = []
discriminator_loss = []
for epoch in range(NUM_EPOCHS_TRAIN):
for i, (data, gt1) in enumerate(trainLoader_cross, 0):
input, dummy = data
groundTruth, dummy = gt1
trainInput = Variable(input.type(Tensor_gpu)) # stands for X
realImgs = Variable(groundTruth.type(Tensor_gpu)) # stands for Y
# TRAIN DISCRIMINATOR
discriminatorX.zero_grad()
discriminatorY.zero_grad()
fake_imgs = generatorX(trainInput) # stands for Y'
x1 = generatorY(torch.clamp(realImgs,0,1)) # stands for x'
#
# y2 = generatorX_(x1) # stands for y''
# Real Images
realValid = discriminatorY(realImgs) # stands for D_Y
# Fake Images
fakeValid = discriminatorY(fake_imgs.detach()) # stands for D_Y'
# Real Images
dx = discriminatorX(trainInput) # stands for D_X
# Fake Images
dx1 = discriminatorX(x1.detach()) # stands for D_X'
set_requires_grad([discriminatorY,discriminatorX], True)
#computing losses
#ad, ag = computeAdversarialLosses(discriminatorY,discriminatorX, trainInput, x1, realImgs, fake_imgs)
adY = compute_d_adv_loss(realValid,fakeValid)
adX = compute_d_adv_loss(dx,dx1)
ad = adX + adY
# ad.backward(retain_graph=True)
gradient_penaltyY = compute_gradient_penalty(discriminatorY, realImgs, fake_imgs)
gradient_penaltyX = compute_gradient_penalty(discriminatorX, trainInput, x1)
# gradient_penalty.backward(retain_graph=True)
gradient_penalty = gradient_penaltyY + gradient_penaltyX
d_loss = computeDiscriminatorLossFor2WayGan(ad, gradient_penalty)
d_loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_value_(itertools.chain(discriminatorY.parameters(),discriminatorX.parameters()),clip_value)
optimizer_d.step()
if batches_done % 50 == 0:
set_requires_grad([discriminatorY,discriminatorX], False)
# TRAIN GENERATOR
generatorX.zero_grad()
generatorY.zero_grad()
x2 = generatorY(torch.clamp(fake_imgs,0,1)) # stands for x''
y2 = generatorX_(x1) # stands for y''
ag = compute_g_adv_loss(discriminatorY,discriminatorX, trainInput, x1, realImgs, fake_imgs)
i_loss = computeIdentityMappingLoss(trainInput, x1, realImgs, fake_imgs)
c_loss = computeCycleConsistencyLoss(trainInput, x2, realImgs, y2)
g_loss = computeGeneratorLossFor2WayGan(ag, i_loss, c_loss)
#set_requires_grad([discriminatorY,discriminatorX], False)
# ag.backward(retain_graph=True)
# i_loss.backward(retain_graph=True)
# c_loss.backward(retain_graph=True)
g_loss.backward()
torch.nn.utils.clip_grad_value_(itertools.chain(generatorX.parameters(), generatorY.parameters()),clip_value)
optimizer_g.step()
del ag,i_loss,c_loss,x2,y2 #,g_loss
if torch.cuda.is_available() :
torch.cuda.empty_cache()
else:
gc.collect()
print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (
epoch + 1, NUM_EPOCHS_TRAIN, i + 1, len(trainLoader_cross), d_loss.item(), g_loss.item()))
f = open("./models/log_Train.txt", "a+")
f.write("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\n" % (
epoch + 1, NUM_EPOCHS_TRAIN, i + 1, len(trainLoader_cross), d_loss.item(), g_loss.item()))
f.close()
if batches_done % 50 == 0:
for k in range(0, fake_imgs.data.shape[0]):
save_image(fake_imgs.data[k], "./models/train_images/2Way/2Way_Train_%d_%d_%d.png" % (epoch+1, batches_done+1, k+1),
nrow=1,
normalize=True)
torch.save(generatorX.state_dict(),
'./models/train_checkpoint/2Way/gan2_train_' + str(epoch) + '_' + str(i) + '.pth')
torch.save(discriminatorY.state_dict(),
'./models/train_checkpoint/2Way/discriminator2_train_' + str(epoch) + '_' + str(i) + '.pth')
fake_test_imgs = generatorX(testInput)
for k in range(0, fake_test_imgs.data.shape[0]):
save_image(fake_test_imgs.data[k],
"./models/train_test_images/2Way/2Way_Train_Test_%d_%d_%d.png" % (epoch, batches_done, k),
nrow=1, normalize=True)
del fake_test_imgs
if torch.cuda.is_available() :
torch.cuda.empty_cache()
else:
gc.collect()
batches_done += 1
print("Done training discriminator on iteration: %d" % i)
# TEST NETWORK
batches_done = 0
with torch.no_grad():
psnrAvg = 0.0
for j, (gt, data) in enumerate(testLoader, 0):
input, dummy = data
groundTruth, dummy = gt
trainInput = Variable(input.type(Tensor_gpu))
realImgs = Variable(groundTruth.type(Tensor_gpu))
output = generatorX(trainInput)
loss = criterion(output, realImgs)
psnr = 10 * torch.log10(1 / loss)
psnrAvg += psnr
for k in range(0, output.data.shape[0]):
save_image(output.data[k],
"./models/test_images/2Way/test_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1),
nrow=1,
normalize=True)
for k in range(0, realImgs.data.shape[0]):
save_image(realImgs.data[k],
"./models/gt_images/2Way/gt_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1),
nrow=1,
normalize=True)
for k in range(0, trainInput.data.shape[0]):
save_image(trainInput.data[k],
"./models/input_images/2Way/input_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1), nrow=1,
normalize=True)
batches_done += 5
print("Loss loss: %f" % loss)
print("PSNR Avg: %f" % (psnrAvg / (j + 1)))
f = open("./models/psnr_Score.txt", "a+")
f.write("PSNR Avg: %f" % (psnrAvg / (j + 1)))
f = open("./models/psnr_Score.txt", "a+")
f.write("Final PSNR Avg: %f" % (psnrAvg / len(testLoader)))
print("Final PSNR Avg: %f" % (psnrAvg / len(testLoader)))
end_time = datetime.now()
print(end_time - start_time)
# G_AB = Generator()
# G_BA = Generator()
# D_A = Discriminator()
# D_B = Discriminator()
# batches_done = 0
# prev_time = time.time()
# for epoch in range(opt.n_epochs):
# for i, batch in enumerate(dataloader):
# # Configure input
# imgs_A = Variable(batch["A"].type(FloatTensor))
# imgs_B = Variable(batch["B"].type(FloatTensor))
# # ----------------------
# # Train Discriminators
# # ----------------------
# optimizer_D_A.zero_grad()
# optimizer_D_B.zero_grad()
# # Generate a batch of images
# fake_A = G_BA(imgs_B).detach()
# fake_B = G_AB(imgs_A).detach()
# # ----------
# # Domain A
# # ----------
# # Compute gradient penalty for improved wasserstein training
# gp_A = compute_gradient_penalty(D_A, imgs_A.data, fake_A.data)
# # Adversarial loss
# D_A_loss = -torch.mean(D_A(imgs_A)) + torch.mean(D_A(fake_A)) + lambda_gp * gp_A
# # ----------
# # Domain B
# # ----------
# # Compute gradient penalty for improved wasserstein training
# gp_B = compute_gradient_penalty(D_B, imgs_B.data, fake_B.data)
# # Adversarial loss
# D_B_loss = -torch.mean(D_B(imgs_B)) + torch.mean(D_B(fake_B)) + lambda_gp * gp_B
# # Total loss
# D_loss = D_A_loss + D_B_loss
# D_loss.backward()
# optimizer_D_A.step()
# optimizer_D_B.step()
# if i % opt.n_critic == 0:
# # ------------------
# # Train Generators
# # ------------------
# optimizer_G.zero_grad()
# # Translate images to opposite domain
# fake_A = G_BA(imgs_B)
# fake_B = G_AB(imgs_A)
# # Reconstruct images
# recov_A = G_BA(fake_B)
# recov_B = G_AB(fake_A)
# # Adversarial loss
# G_adv = -torch.mean(D_A(fake_A)) - torch.mean(D_B(fake_B))
# # Cycle loss
# G_cycle = cycle_loss(recov_A, imgs_A) + cycle_loss(recov_B, imgs_B)
# # Total loss
# G_loss = lambda_adv * G_adv + lambda_cycle * G_cycle
# G_loss.backward()
# optimizer_G.step()
# # --------------
# # Log Progress
# # --------------
# # Determine approximate time left
# batches_left = opt.n_epochs * len(dataloader) - batches_done
# time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time) / opt.n_critic)
# prev_time = time.time()
# sys.stdout.write(
# "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, cycle: %f] ETA: %s"
# % (
# epoch,
# opt.n_epochs,
# i,
# len(dataloader),
# D_loss.item(),
# G_adv.data.item(),
# G_cycle.item(),
# time_left,
# )
# )
# # Check sample interval => save sample if there
# if batches_done % opt.sample_interval == 0:
# sample_images(batches_done)
# batches_done += 1
# def backward_G(self):
# """Calculate the loss for generators G_A and G_B"""
# lambda_idt = self.opt.lambda_identity
# lambda_A = self.opt.lambda_A
# lambda_B = self.opt.lambda_B
# # Identity loss
# if lambda_idt > 0:
# # G_A should be identity if real_B is fed: ||G_A(B) - B||
# self.idt_A = self.netG_A(self.real_B)
# self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# # G_B should be identity if real_A is fed: ||G_B(A) - A||
# self.idt_B = self.netG_B(self.real_A)
# self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
# else:
# self.loss_idt_A = 0
# self.loss_idt_B = 0
# # GAN loss D_A(G_A(A))
# self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# # GAN loss D_B(G_B(B))
# self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# # Forward cycle loss || G_B(G_A(A)) - A||
# self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# # Backward cycle loss || G_A(G_B(B)) - B||
# self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# # combined loss and calculate gradients
# self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
# self.loss_G.backward() |
import json
import logging
import os
import shutil
import torch
def set_logger(log_path):
"""Set the logger to log info in terminal and file log_path.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example
-------
$ logging.info("Starting training...")
Parameters
----------
log_path : str
Where to log.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Parameters
----------
d : dict
Dictionary of float-castable values (np.float, int, float, etc.).
json_path : str
Path to JSON file.
"""
with open(json_path, 'w') as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: v.dump() for k, v in d.items()}
json.dump(d, f, indent=4)
def save_checkpoint(model, state, is_best, checkpoint):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Parameters
----------
state : dict
Contains model's state_dict, may contain other keys such as epoch, optimizer state_dict.
is_best : bool
True if it is the best model seen till now.
checkpoint : str
Folder where parameters are to be saved.
"""
state_filepath = os.path.join(checkpoint, 'last.pth.tar')
model_filepath = os.path.join(checkpoint, 'last_model.pth')
if not os.path.exists(checkpoint):
print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint))
os.mkdir(checkpoint)
else:
print("Checkpoint Directory exists! ")
torch.save(state, state_filepath)
torch.save(model, model_filepath)
if is_best:
shutil.copyfile(state_filepath, os.path.join(checkpoint, 'best.pth.tar'))
shutil.copyfile(model_filepath, os.path.join(checkpoint, 'best_model.pth'))
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Parameters
----------
checkpoint : str
Filename which needs to be loaded.
model : torch.nn.Module
Model for which the parameters are loaded.
optimizer: torch.optim, optional
Resume optimizer from checkpoint.
Returns
-------
checkpoint : nn.Module
Model parameters and optimizer state.
"""
if not os.path.exists(checkpoint):
raise ("File doesn't exist {}".format(checkpoint))
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint['state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint
|
class Foo(object):
# Class variable, shared by all instances of this class
counter = 0
def __call__(self):
Foo.counter += 1
print Foo.counter
# Create an object instance of class "Foo," called "foo"
foo = Foo()
# Make calls to the "__call__" method, via the object's name itself
foo() #prints 1
foo() #prints 2
foo() #prints 3
"""
python doesn't have static variables but you can fake it by defining a callable class object and then using it as a function.
""" |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-09 15:07
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('steptracker', '0002_auto_20171215_1508'),
]
operations = [
migrations.AddField(
model_name='userstats',
name='friday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='monday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='saturday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='start_date',
field=models.DateField(default=datetime.date.today),
),
migrations.AddField(
model_name='userstats',
name='sunday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='thursday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='tuesday_steps',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userstats',
name='wednesday_steps',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='userstats',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='steptracker.User', unique_for_date='day'),
),
]
|
from binario import*
from file import*
#Nao mexam
#Comprimento da chave (palavras Nk)
# Numero de rodadas (Nr)
#Exp. Tamanho da chave (Nb (Nr + 1) palavras)
#Nb=4
#Como a chave e de 128 bits, Nr=(Nk+6)=10, Nk=tamanhoDaPalavra/32, Expansao da Palavra= (4)*(10+1)=44
Rcon = [
0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,
0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,
0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,
]
Sbox = [
[0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76],
[0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0],
[0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15],
[0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75],
[0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84],
[0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF],
[0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8],
[0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2],
[0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73],
[0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB],
[0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79],
[0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08],
[0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A],
[0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E],
[0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF],
[0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16]
]
def separaBytes2 (lista):
listaFinal = []
a=0
b=8
for x in range(0,4):
listaFinal.append(lista[a:b])
a=a+8
b=b+8
return listaFinal
def substituicaoBytes (lista):
for x in xrange(0,4):
lista[x]= string_to_bit_array(chr(Sbox[bit_to_int(lista[0:4])][bit_to_int(lista[4:8])]))
def deslocamentoLinhas (lista):
lista[4],lista[5],lista[6],lista[7]= lista[5],lista[6],lista[7],lista[4]
lista[8],lista[9],lista[10],lista[11]= lista[10],lista[11],lista[8],lista[9]
lista[12],lista[13],lista[14],lista[15]= lista[15],lista[12],lista[13],lista[14]
def rotacionaEsquerda(chave):
#Concatena uma lista que comeca na posicao 8 da lista inicial e vai ate o fim, com uma lista
#que comeca da posicao 0 e vai ate a posicao 7
y=8
return chave[y:] + chave[:y]
def condensaChave(chave1, chave2,chave3,chave4):
lista=[]
lista=[chave1]+[chave2]+[chave3]+[chave4]
return lista
def keyExpansion(chaveK):
chave=separabytes(chaveK)
nr=10
nk=4
nb=4
w=[]
chaves=[]
for k in range(0,11):
chaves=chaves+[0]
for k in range(0,44):
w=w+[0]
for x in range(0,4):
w[x]=chave[4*x]+chave[(4*x)+1]+chave[(4*x)+2]+chave[(4*x)+3]
for y in range(4,44):
aux=w[y-1]
if(y%nk==0):
aux2=rotacionaEsquerda(aux)
aux2=separaBytes2(aux2)
substituicaoBytes(aux2)
aux=aux2[0]+aux2[1]+aux2[2]
x=int_to_bit(Rcon[y/4])
x=xor(aux2[3],x)
aux=aux+x
w[y]=xor(w[y-4],aux)
for x in range(0,11):
chaves[x]=w[4*x]+w[4*x+1]+w[4*x+2]+w[4*x+3]
return chaves
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
STEP1
Напишите функцию, которая принимает на вход число, а в ответ выводит:
Fizz, если число делится на 3;
Buzz, если число делится на 5;
FizzBuzz, если число делится и на 3, и на 5.
Покажите вывод функции для чисел от 1 до 100.
STEP2
Рассмотрим все (целые, натуральные, скучные) числа от 1 до 10. Среди них на 3 и/или 5 делятся 3, 5, 6 и 9. Их сумма будет равна 23.
STEP3
Вопросы
Чему равна сумма всех чисел, делящихся на 3 и/или 5, если мы переберем все натуральные числа от 1 до 1000 (не включительно)? В ответ введите целое число
STEP4
Чему равна сумма всех чисел, делящихся на 3 и/или 5 и/или 7, если мы переберем все натуральные числа от 1 до 10000 (не включительно)? В ответ введите целое число'''
#STEP1
def fizz_buzz(num):
'''It returns str Fizz if num is divided by 3 to get quotient whithout remainder.
It returns str Buzz if num is divided by 5 to get quotient whithout remainder.
It returns str FizzBuzz if num is divided by 3 and 5 to get quotient whithout remainder.'''
if num % 3 == 0 and num % 5 == 0:
return 'FizzBuzz'
elif num % 3 == 0:
return 'Fizz'
elif num % 5 == 0:
return 'Buzz'
else: return False
def main(end):
'''It print results of checking natural numbers until end by fizz_buzz'''
for i in range(end)[1:]:
result = fizz_buzz(i)
if result: print(str(i) + result)
#STEP2
def fizz_buzz_st2(num):
'''It returns True if num is divided by 3 and/or 5 to get quotient whithout remainder.
'''
if num % 3 == 0 or num % 5 == 0:
return True
else: return False
def main_st2(end):
'''It sums all numbers in interval from 1 till end,
which divided by 3 and/or 5 to get quotient whithout remainder'''
final = 0
for i in range(end)[1:]:
result = fizz_buzz_st2(i)
if result:
final += i
return final
#STEP4
def fizz_buzz_st4(num, divisioner_list):
'''It returns True if num is divided by one of divisioner_list to get quotient whithout remainder.
:num - int
:divisioner_list - list of int
'''
for i in divisioner_list:
if num % i == 0: return True
return False
def main_st4(end):
'''It sums all numbers in interval from 1 till end,
which divided by 3 and/or 5 and\or 7 to get quotient whithout remainder'''
final = 0
for i in range(end)[1:]:
result = fizz_buzz_st4(i, [3,5,7])
if result:
# print(i)
final += i
return final
if __name__ == '__main__':
#STEP1
# main(100)
#STEP2
print('Сумма всех (целые, натуральные, скучные) чисел, делящихся на 3 и/или 5, от 1 до 10 равна ', main_st2(10))
#STEP3
print('Cумма всех чисел, делящихся на 3 и/или 5, если мы переберем все натуральные числа'
' от 1 до 1000 (не включительно) равна ', main_st2(1000))
#STEP4
print('Cумма всех чисел, делящихся на 3 и/или 5 и/или 7, если мы переберем все натуральные числа от 1 до 10000 (не включительно)', main_st4(10000))
|
from unittest import TestCase
from torchagents import Policy
from torchagents import Agent
import numpy as np
import torch
class TestEpsGreedy(TestCase):
def test_get_eps_greedy_action(self):
class DumbPolicy(Policy):
def get_action(self, state: torch.Tensor) -> torch.Tensor:
return torch.tensor([0]) # always return first action
torch.manual_seed(1337)
policy = DumbPolicy(state_shape=(1,), num_actions=20)
dummy = Agent(policy=policy, state_shape=(1,), num_actions=20,
off_policy=True, epsilon=0.05)
num_test_actions = 100
actions = np.zeros(num_test_actions)
for i in range(num_test_actions):
actions[i] = dummy.get_action(torch.tensor([0]))
num_first = len(actions[actions == 0])
p_first = num_first / num_test_actions
self.assertTrue(0.93 < p_first < 0.97)
|
#coding=utf-8
import requests
import re
import xlwt
#解析http://ris.szpl.gov.cn/bol/数据
class ParseRISURL:
url = 'http://ris.szpl.gov.cn/bol/index.aspx'
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'ris.szpl.gov.cn',
'Origin': 'http://ris.szpl.gov.cn',
'Referer': 'http://ris.szpl.gov.cn/bol/index.aspx',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
data = {
'__EVENTTARGET':'',
'__EVENTARGUMENT':'',
'__VIEWSTATE':'',
'__VIEWSTATEGENERATOR':'',
'__VIEWSTATEENCRYPTED':'',
'__EVENTVALIDATION': '',
'tep_name':'',
'organ_name':'',
'site_address':'',
'AspNetPager1_input': '1',
'AspNetPager1': 'go'
}
pageDatas = []
def init_url(self):
result = requests.get(self.url, headers=self.header)
self.update_VIEWSTATE(result.text)
self.update_EVENTVALIDATION(result.text)
self.update_VIEWSTATEGENERATOR(result.text)
self.get_totalNum(result.text)
self.get_totalPage(result.text)
def update_VIEWSTATE(self,urlText):
self.data["__VIEWSTATE"] = re.findall('id="__VIEWSTATE".*?>', urlText)[0][24:-4]
def update_VIEWSTATEGENERATOR(self,urlText):
self.data["__VIEWSTATEGENERATOR"] = re.findall('id="__VIEWSTATEGENERATOR".*?>', urlText)[0][33:-4]
def update_EVENTVALIDATION(self,urlText):
self.data['__EVENTVALIDATION'] = re.findall('id="__EVENTVALIDATION".*?>', urlText)[0][30:-4]
def update_pageNum(self,num):
self.data['AspNetPager1_input'] = num
def get_totalPage(self,urlText):
self.totalPage = int(re.findall('总共<b>.*?</b>', urlText)[0][5:-4])
def get_currentPage(self,urlText):
self.currentPage = int(re.findall('当前为第<b>.*?</b>', urlText)[0][7:-4])
def get_totalNum(self,urlText):
self.totalNum = int(re.findall('共<b>.*?</b>条', urlText)[0][4:-5])
#http://ris.szpl.gov.cn/bol/certdetail.aspx? 预售项目
#http://ris.szpl.gov.cn/bol/hezuo.aspx? 项目合作方资料
#http://ris.szpl.gov.cn/bol/projectdetail.aspx? 项目详细资料
result = requests.post('http://ris.szpl.gov.cn/bol/index.aspx',headers = header,data=data)
trs = re.findall('<td.*?</td>',result.text)
def parseData(self,urlText):
tds = re.findall('<td.*?</td>',result.text)
try:
for idx in range(0,len(tds)-1,6):
temp = {
'id':'',
'preLicense':'',
'houseName':'',
'developer':'',
'area':'',
'date':''
}
temp['id'] = re.findall("id=[0-9]*?'",tds[idx+1])[0][3:-1]
temp['preLicense'] = re.findall("'>.*?</a>",tds[idx+1])[0][2:-4]
temp['houseName'] = re.findall("'>.*?</a>",tds[idx+2])[0][2:-4]
temp['developer'] = re.findall(">.*?<",tds[idx+3])[0][1:-1]
temp['area'] = re.findall(">.*?<",tds[idx+4])[0][1:-1]
temp['date'] = re.findall(">.*?<",tds[idx+5])[0][1:-1]
self.pageDatas.append(temp)
except:
print (str(id)+'--------ERROR--------')
return self.pageDatas
ris = ParseRISURL()
ris.init_url()
#workbook = xlwt.Workbook(encoding='ascii')
#worksheet = workbook.add_sheet('House')
for i in range(0,ris.totalPage):
try:
with open("D:\\house.txt", "w+", encoding='utf-8') as f:
print(i)
ris.update_pageNum(i)
result = requests.post(ris.url,headers = ris.header, data= ris.data)
ris.update_EVENTVALIDATION(result.text)
ris.update_VIEWSTATEGENERATOR(result.text)
ris.update_VIEWSTATE(result.text)
out = ris.parseData(result.text)
for idx in range(0,len(out)):
f.write(out[idx]['id']+'\t'+out[idx]['preLicense']+'\t'+out[idx]['houseName']+'\t'+out[idx]['developer']+'\t'+out[idx]['area']+'\t'+out[idx]['date']+'\n')
'''
worksheet.write(idx, 0, out[idx]['id'])
worksheet.write(idx, 1, out[idx]['preLicense'])
worksheet.write(idx, 2, out[idx]['houseName'])
worksheet.write(idx, 3, out[idx]['developer'])
worksheet.write(idx, 4, out[idx]['area'])
worksheet.write(idx, 5, out[idx]['date'])
'''
finally:
#workbook.save("D:\\house.xls")
f.close()
|
# coding: utf-8
# 14. 先頭からN行を出力
# 自然数Nをコマンドライン引数などの手段で受け取り,入力のうち先頭のN行だけを表示せよ.確認にはheadコマンドを用いよ.
fname = 'hightemp.txt'
with open(fname) as f:
lines = f.readlines()
while True:
input_str = input('Please Enter Number(max: ' + str(len(lines)) + '): ')
if input_str.isdigit():
for line in lines[:int(input_str)]:
print(line, end='')
break
print(input_str + ' is invalid data.')
# Please Enter Number(max: 24): 5
# 高知県 江川崎 41 2013-08-12
# 埼玉県 熊谷 40.9 2007-08-16
# 岐阜県 多治見 40.9 2007-08-16
# 山形県 山形 40.8 1933-07-25
# 山梨県 甲府 40.7 2013-08-10
# By UNIX commands
# $ head -n 5 hightemp.txt
# 高知県 江川崎 41 2013-08-12
# 埼玉県 熊谷 40.9 2007-08-16
# 岐阜県 多治見 40.9 2007-08-16
# 山形県 山形 40.8 1933-07-25
# 山梨県 甲府 40.7 2013-08-10
|
import numpy as np
import matplotlib.pyplot as plt
batch_size =200
def get_len(data):
cnt =0
for i in range(len(data)-1):
if data[i]==0 and data[i+1]==0 and data[i+2]==0:
break
cnt+=1
return cnt
avg = np.load('reward_data_avg.npy')
avg = avg[avg!=0]
min_ = np.load('reward_data_min.npy')
min_ = min_[min_!=0]
max_ = np.load('reward_data_max.npy')
max_ = max_[max_!=0]
plt.plot(np.arange(get_len(avg))*batch_size,avg[:get_len(avg)],label='avg')
plt.plot(np.arange(get_len(avg))*batch_size,min_[:get_len(avg)],label='min')
plt.plot(np.arange(get_len(avg))*batch_size,max_[:get_len(avg)],label='max')
plt.legend(loc='lower right')
plt.show()
|
import re
import csv
with open('ex3.txt','rt') as fin:
s=fin.read()
s=re.findall('\w',s)
wc=[]
print(s)
for w in set(s):
print (w, s.count(w))
wc.append([w,s.count(w)])
#print(wc)
swc=sorted(wc , key=lambda x:x[1], reverse=True)
print(swc)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 09:58:01 2016
@author: d_floriello
Self-PUN-computizer
"""
import pandas as pd
import numpy as np
from collections import OrderedDict
import datetime
import statsmodels.api
###############################################################################
def divide_in_days(CMP):
day = OrderedDict()
lun = [] ## 0
mar = []
mer = []
gio = []
ven = []
sab = []
dom = [] ## 6
for i in range(CMP.shape[0]):
dt = datetime.date(int(str(CMP.index[i])[:4]),int(str(CMP.index[i])[5:7]),int(str(CMP.index[i])[8:10]))
if dt.weekday() == 0:
lun.append(CMP.ix[i].values[0])
elif dt.weekday() == 1:
mar.append(CMP.ix[i].values[0])
elif dt.weekday() == 2:
mer.append(CMP.ix[i].values[0])
elif dt.weekday() == 3:
gio.append(CMP.ix[i].values[0])
elif dt.weekday() == 4:
ven.append(CMP.ix[i].values[0])
elif dt.weekday() == 5:
sab.append(CMP.ix[i].values[0])
else:
dom.append(CMP.ix[i].values[0])
szs = [len(lun), len(mar), len(mer), len(gio), len(ven), len(sab), len(dom)]
M = max(szs)
if len(lun) < M:
lun.append(np.nan)
if len(mar) < M:
mar.append(np.nan)
if len(mer) < M:
mer.append(np.nan)
if len(gio) < M:
gio.append(np.nan)
if len(ven) < M:
ven.append(np.nan)
if len(sab) < M:
sab.append(np.nan)
if len(dom) < M:
dom.append(np.nan)
day['lun'] = lun
day['mar'] = mar
day['mer'] = mer
day['gio'] = gio
day['ven'] = ven
day['sab'] = sab
day['dom'] = dom
DBD = pd.DataFrame.from_dict(day)
return DBD
###############################################################################
def de_trend(df, trend):
dt = []
yh = trend(np.linspace(0,df.shape[0],df.shape[0]))
for i in range(df.shape[0]):
mon = yh[(df.index.month[i] - 1)]
print('month {} and correction {}'.format(df.index.month[i],mon))
dt.append(df.ix[i] - mon)
return pd.DataFrame(dt)
###############################################################################
def remainderizer(df):
### comupe trend in months:
diz = OrderedDict()
dow = ['lun', 'mar', 'mer', 'gio', 'ven', 'sab', 'dom']
mp = np.array(df.resample('M').mean())
dt = []
for i in range(df.shape[0]):
mon = mp[(df.index.month[i] - 1)]
print('month {} and correction {}'.format(df.index.month[i],mon))
dt.append(df.ix[i] - mon)
dt = pd.DataFrame(dt).set_index(df.index)
### remove monthly seasonality:
MONTHS = np.unique(dt.index.month)
des = []
for m in MONTHS:
lm = []
loc_dt = dt.ix[dt.index.month == m]
dd = divide_in_days(loc_dt)
diz[str(m)+'_mean'] = dd.mean()
diz[str(m)+'_std'] = dd.std()
for j in range(loc_dt.shape[0]):
die = datetime.date(int(str(loc_dt.index[j])[:4]),int(str(loc_dt.index[j])[5:7]),int(str(loc_dt.index[j])[8:10]))
giorno = die.weekday()
x = (loc_dt.ix[j] - dd[dow[giorno]].mean())/dd[dow[giorno]].std()
lm.append(x)
des.append(lm)
flattened_des = [item for sublist in des for item in sublist]
rem = pd.DataFrame(flattened_des)
seas = pd.DataFrame.from_dict(diz).set_index([dow])
return mp, seas, rem
###############################################################################
def Forecast_(pun, year, month, day):
ts = np.array(pun)
dow = ['lun', 'mar', 'mer', 'gio', 'ven', 'sab', 'dom']
dt = datetime.datetime(year, month, day)
mm, sea, remn = remainderizer(pun)
arma = statsmodels.api.tsa.ARMA(remn.values.ravel(), (1,0)).fit()
resid = remn.values.ravel() - arma.predict()
pred = arma.predict(start = ts.size, end = ts.size)
forecasted = mm[-1] + sea[str(month)+'_mean'].ix[dow[dt.weekday()]] + pred
#### sampled sigma is a bit overestimated
sigma_hat = np.std(pun.ix[pun.index.month == 10] - mm[-1]) + sea[str(month)+'_std'].ix[dow[dt.weekday()]] + np.std(resid)
return (forecasted - 2*sigma_hat, forecasted - sigma_hat, forecasted, forecasted + sigma_hat, forecasted + 2*sigma_hat)
###############################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.