text stringlengths 38 1.54M |
|---|
__author__ = "Quy Doan"
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
with open(input_file,"r") as reader:
with open(output_file,"w") as writer:
num_of_test = int(reader.readline())
for test in range(num_of_test):
k,c,s = map(int,reader.readline().split())
res = [str(i+1) for i in range(s)]
writer.write("Case #"+str(test+1)+": "+" ".join(res)+"\n")
|
# Generated by Django 2.0.6 on 2018-06-28 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ntakibariapp', '0004_auto_20180627_2118'),
]
operations = [
migrations.AddField(
model_name='member',
name='sex',
field=models.CharField(choices=[('F', 'Female'), ('M', 'Male')], db_index=True, default=None, max_length=9),
),
migrations.AlterField(
model_name='member',
name='total_household',
field=models.PositiveIntegerField(db_column='Total Person in your house', db_index=True, default=0),
),
]
|
for循环语句
\1、for语句的结构:
Python语言中的for语句与其他高级程序设计语言有很大的不同,其他高级语言for语句要用循环控制变量来控制循环。Python中for语句是通过循环遍历某一序列对象(字符串、列表、元组等)来构建循环,循环结束的条件就是对象被遍历完成。
for语句的形式如下:
for <循环变量> in <循环对象>:
<语句1>
else:
<语句2>
else语句中的语句2只有循环正常退出(遍历完所有遍历对象中的值)时执行。
# 迭代式循环:for,语法如下
# for i in range(10):
# 缩进的代码块
# break与continue(同上)
# 循环嵌套
\实例
for num in range(10,20):
for i in range(2,num):
if num % i == 0:
j = num/i
print("%d等于%d*%d" % (num,i,j))
break
else:
print("%d是一个质数" % num) # for在没有被break时才会执行else
for i in range(1,10):
for j in range(1,i+1):
print('%s*%s=%s' %(i,j,i*j),end=' ')
print()
\2、range()函数
for语句的循环对象可以是列表、元组以及字符串,可以通过range()函数产生一个迭代值,以完成计数循环。
range( [start,] stop [, step])
实例:
for i in range(5):
print(i)
'''
0
1
2
3
4
'''
for i in range(0,10,3):
print(i)
'''
0
3
6
9
'''
for语句使用range()函数可以构建基于已知循环次数的循环程序,也可以以range()生成的数字作为索引来访问列表、元组、字符串中的值。
需要注意的是,range() 函数返回的对象表现为它是一个列表,但事实上它并不是,range()函数并不是在调用时一次生成整个序列,而是遍历一次才产生一个值,以减少内存的占用,其本质是一个迭代器。
如:
>>>range(10)
range(0, 10)
>>> type(range(10))
<class 'range'>
>>> list(range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
\循环嵌套
循环嵌套是指:在一个循环体里面嵌入另一循环。
实例1:通过while循环打印99乘法表
j = 1
while j <= 9:
i = 1
while i <= j:
print('%d*%d=%d' % (i, j, i*j), end='\t')
i += 1
print()
j += 1
实例2:通过for循环打印99乘法表
for j in range(1, 10):
for i in range(1, j+1):
print('%d*%d=%d' % (i, j, i*j), end='\t')
i += 1
print()
j += 1
'''
1*1=1
1*2=2 2*2=4
1*3=3 2*3=6 3*3=9
1*4=4 2*4=8 3*4=12 4*4=16
1*5=5 2*5=10 3*5=15 4*5=20 5*5=25
1*6=6 2*6=12 3*6=18 4*6=24 5*6=30 6*6=36
1*7=7 2*7=14 3*7=21 4*7=28 5*7=35 6*7=42 7*7=49
1*8=8 2*8=16 3*8=24 4*8=32 5*8=40 6*8=48 7*8=56 8*8=64
1*9=9 2*9=18 3*9=27 4*9=36 5*9=45 6*9=54 7*9=63 8*9=72 9*9=81
'''
实例3:打印金字塔
#分析
'''
#max_level=5
* #current_level=1,空格数=4,*号数=1
*** #current_level=2,空格数=3,*号数=3
***** #current_level=3,空格数=2,*号数=5
******* #current_level=4,空格数=1,*号数=7
********* #current_level=5,空格数=0,*号数=9
#数学表达式
空格数=max_level-current_level
*号数=2*current_level-1
'''
#实现
max_level=5
for current_level in range(1,max_level+1):
for i in range(max_level-current_level):
print(' ',end='') #在一行中连续打印多个空格
for j in range(2*current_level-1):
print('*',end='') #在一行中连续打印多个空格
print()
|
from django.conf.urls import url, include
from .views import all_features, create_feature, feature_detail, feature_upvote
urlpatterns = [
url(r'^$', all_features, name='features'),
url(r'^new/$', create_feature, name='new_feature'),
url(r'^(?P<pk>\d+)/$', feature_detail, name='feature_detail'),
url(r'upvote/(?P<feature_id>[0-9]+)/$', feature_upvote, name='feature_upvote')
] |
# Generated by Django 3.0.7 on 2021-04-05 14:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Nom Catégorie')),
('slug', models.SlugField(max_length=200, unique=True, verbose_name='Slug')),
],
options={
'verbose_name': 'Catégorie',
'verbose_name_plural': 'Catégories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SubCateory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Sous Catégorie')),
('slug', models.SlugField(max_length=200, unique=True, verbose_name='Slug')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_categories', to='posts.Category', verbose_name='Catégorie')),
],
options={
'verbose_name': 'Sous Catégorie',
'verbose_name_plural': 'Sous Catégories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='PostInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=200, unique=True)),
('show', models.BooleanField(default=True, verbose_name='Afficher')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Date de Création')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Date de dernière mise à jour')),
('image_thumbnail', models.ImageField(blank=True, upload_to='images/blog/%Y/%m/%d', verbose_name='Photo Principale')),
('content', models.TextField()),
('subCategory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.SubCateory', verbose_name='Sous Catégorie')),
],
options={
'verbose_name': 'Post Info',
'verbose_name_plural': 'Posts Info',
},
),
migrations.CreateModel(
name='PostImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('images', models.FileField(upload_to='images/blog/%Y/%m/%d', verbose_name='Images associées')),
('post', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='posts.PostInfo', verbose_name='Article')),
],
),
]
|
'''
Created on February 9th, 2018
author: Michael Rodriguez
sources: http://docs.fetchrobotics.com/
description: Module to monitor keyboard activity for ROS
'''
# External Imports
import rospy
# Local Imports
from std_msgs.msg import Int32
from geometry_msgs.msg import PointStamped
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Quaternion
import tf
class PointPublisher:
def __init__(self, ar_tag_frame, point_topic):
'''
Description: establishes the variabes included in the class
Input: self <Class>, ar_tag_frame <String>, point topic <Object>
Return: None
'''
rospy.Subscriber(point_topic, PointStamped, self.point_callback)
self.tf = tf.TransformListener()
self.ar_tag_frame = ar_tag_frame
self.last_seq = -1
self.num_points = 0
self.points = []
self.pubs = []
self.marker_pubs = []
def point_callback(self, data):
'''
Description: This function takes point data and
calls ROS functions to publish the point
Input: self <Class>, point data
Return: None
'''
point = data
self.points.append(point)
point_pub = rospy.Publisher('/object_point' + str(data.header.seq), PointStamped, queue_size=10)
marker_pub = rospy.Publisher('/object_point_marker' + str(data.header.seq), Marker, queue_size = 10)
self.pubs.append(point_pub)
self.marker_pubs.append(marker_pub)
self.num_points += 1
print "Point " + str(data.header.seq) +" clicked"
def main():
'''
Description: establishes a ROS connection and opens a point publisher.
It loops continuously, publishing updated ROS points, creating
associated markers, and keeping track of odometry and orientation.
Input: None
Return: None
'''
ar_tag_frame = '/tag_0'
point_topic = '/clicked_point'
rospy.init_node('point_publisher', anonymous=True)
point_publisher = PointPublisher(ar_tag_frame, point_topic)
num_pub = rospy.Publisher('/object_point_num', Int32, queue_size=10)
rate = rospy.Rate(10) # 10hz
while not (rospy.is_shutdown()):
num_pub.publish(point_publisher.num_points)
for i in range(point_publisher.num_points):
point = point_publisher.points[i]
point_pub = point_publisher.pubs[i]
point_pub.publish(point)
marker = Marker()
marker.header.seq = i
marker.header.stamp = rospy.Time.now()
marker.header.frame_id = point.header.frame_id
marker.ns = "/clicked_points/"
marker.id = i
marker.type = 2
marker.action = 0
marker.pose.position = point.point
quat = tf.transformations.quaternion_from_euler(0, 0, 0)
orientation = Quaternion()
orientation.x = quat[0]
orientation.y = quat[1]
orientation.z = quat[2]
orientation.w = quat[3]
marker.pose.orientation = orientation
marker.scale.x = .01
marker.scale.y = .01
marker.scale.z = .01
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 1.0
marker.color.a = 1.0
marker.lifetime = rospy.Duration.from_sec(1)
marker.frame_locked = True
marker_pub = point_publisher.marker_pubs[i]
marker_pub.publish(marker)
rate.sleep()
if __name__ == "__main__":
main()
|
from typing import Callable, Dict, Any, Iterable, Tuple, List
import numpy
import pandas
from pandas import DataFrame, SparseDataFrame, Categorical
from modeling import categorical_util
class GridSearchCVResults:
def __init__(self,
params: Dict):
self.params = params
self.attrs: List[Dict[str, Any]] = []
self.scores = []
def add(self, score, **attrs):
self.scores.append(score)
self.attrs.append(attrs)
def score_avg(self) -> float:
return numpy.average(self.scores) if len(self.scores) > 0 else numpy.nan
def attr_avg(self, attr_name: str) -> float:
if len(self.scores) is 0:
return numpy.nan
vals = [attrs[attr_name] for attrs in self.attrs]
return numpy.average(vals)
def all_attr_avg(self) -> Dict[str, float]:
if len(self.scores) is 0:
return numpy.nan
# dict of all values for key in the first attribute
# assume all attributes have the same key
return {k: self.attr_avg(k) for k in self.attrs[0]}
def avg(self) -> (float, Dict[str, float]):
if len(self.scores) is 0:
return numpy.nan, {}
return self.score_avg(), self.all_attr_avg()
def __str__(self):
return f"{self.params}, score: {self.score_avg():.3f}"
def __repr__(self):
return f"({self.params}, {self.score_avg():.3f}"
|
#!/usr/bin/env python
import sys
sys.path.append("/home2/data/Projects/CWAS/share/lib/surfwrap")
import os
from os import path
from os import path as op
from surfwrap import SurfWrap, io
import numpy as np
import nibabel as nib
###
# Setup
strategy = "compcor"
scans = ["short", "medium"]
hemis = ["lh", "rh"]
study = "iq"
print "strategy: %s; scans: %s" % (strategy, ",".join(scans))
basedir = "/home2/data/Projects/CWAS/nki/cwas"
# Output prefixes
obase = "/home2/data/Projects/CWAS/figures"
odir = path.join(obase, "fig_04")
if not path.exists(odir): os.mkdir(odir)
# Distance Directory
kstr = "kvoxs_fwhm08_to_kvoxs_fwhm08"
dirname = "%s_%s" % (strategy, kstr)
distdirs = [ path.join(basedir, scan, dirname) for scan in scans ]
## WITHOUT GLOBAL
# MDMR Directories
mname = "iq_age+sex+meanFD.mdmr"
cname = "cluster_correct_v05_c05"
factor = "FSIQ"
# Input pfile
mdmrdirs = [ path.join(distdir, mname) for distdir in distdirs ]
pfiles1 = [ path.join(mdmrdir, cname, "easythresh", "thresh_zstat_%s.nii.gz" % factor) for mdmrdir in mdmrdirs ]
# Intermediate surface files
easydirs = [ path.join(mdmrdir, cname, "easythresh") for mdmrdir in mdmrdirs ]
for easydir in easydirs:
surfdir = path.join(easydir, "surfs")
if not path.exists(surfdir):
os.mkdir(surfdir)
cmd = "./x_vol2surf.py %s/zstat_%s.nii.gz %s/thresh_zstat_%s.nii.gz %s/surf_thresh_zstat_%s" % (easydir, factor, easydir, factor, surfdir, factor)
print cmd
#os.system(cmd)
sfiles = [ path.join(easydir, "surfs/surf_thresh_zstat_%s" % factor) for easydir in easydirs ]
sfiles1 = {
"short": { hemi : "%s_%s.nii.gz" % (sfiles[0], hemi) for hemi in hemis },
"medium": { hemi : "%s_%s.nii.gz" % (sfiles[1], hemi) for hemi in hemis },
}
## WITH GLOBAL
# MDMR Directories
mname = "iq_age+sex+meanFD+meanGcor.mdmr"
cname = "cluster_correct_v05_c05"
factor = "FSIQ"
# Input pfile
mdmrdirs = [ path.join(distdir, mname) for distdir in distdirs ]
pfiles2 = [ path.join(mdmrdir, cname, "easythresh", "thresh_zstat_%s.nii.gz" % factor) for mdmrdir in mdmrdirs ]
# Intermediate surface files
easydirs = [ path.join(mdmrdir, cname, "easythresh") for mdmrdir in mdmrdirs ]
for easydir in easydirs:
surfdir = path.join(easydir, "surfs")
if not path.exists(surfdir):
os.mkdir(surfdir)
cmd = "./x_vol2surf.py %s/zstat_%s.nii.gz %s/thresh_zstat_%s.nii.gz %s/surf_thresh_zstat_%s" % (easydir, factor, easydir, factor, surfdir, factor)
print cmd
#os.system(cmd)
sfiles = [ path.join(easydir, "surfs/surf_thresh_zstat_%s" % factor) for easydir in easydirs ]
sfiles2 = {
"short": { hemi : "%s_%s.nii.gz" % (sfiles[0], hemi) for hemi in hemis },
"medium": { hemi : "%s_%s.nii.gz" % (sfiles[1], hemi) for hemi in hemis },
}
###
###
# Overlap
print "...overlap"
def get_range(data):
data_max = data.max()
if data_max == 0:
data_min = data_max
else:
data_min = data[data.nonzero()].min()
return [data_min, data_max]
print "...loop through scans"
for i,scan in enumerate(scans):
###
# Get individual percentile maps
print "individual data maps"
# MDMR w/o
mdmr1_lh = io.read_scalar_data(sfiles1[scan]['lh'])
mdmr1_rh = io.read_scalar_data(sfiles1[scan]['rh'])
# MDMR w/
mdmr2_lh = io.read_scalar_data(sfiles2[scan]['lh'])
mdmr2_rh = io.read_scalar_data(sfiles2[scan]['rh'])
###
###
# Create overlap
print "creating and saving overlap"
# Threshold MDMR w/o global
mdmr1_lh[mdmr1_lh.nonzero()] = 1
mdmr1_rh[mdmr1_rh.nonzero()] = 1
# Threshold MDMR w/ global
mdmr2_lh[mdmr2_lh.nonzero()] = 2
mdmr2_rh[mdmr2_rh.nonzero()] = 2
# Overlap
overlap_lh = mdmr1_lh[:] + mdmr2_lh[:]
overlap_rh = mdmr1_rh[:] + mdmr2_rh[:]
# Save
outfile = op.join(odir, "overlaps_scan_%s.npz" % scan)
np.savez(outfile, lh=overlap_lh, rh=overlap_rh)
###
###
|
"""
This module contains a function to download every one-minute time window
where there is an LFE recorded, stack the signal over all the LFEs, cross
correlate each window with the stack, sort the LFEs and keep only the best
We also save the value of the maximum cross correlation for each LFE
"""
import obspy
from obspy import UTCDateTime
from obspy.core.stream import Stream
from obspy.signal.cross_correlation import correlate
import matplotlib.cm as cm
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from math import cos, pi, sin, sqrt
from get_data import get_from_IRIS, get_from_NCEDC
from stacking import linstack
def compute_templates(filename, TDUR, filt, ratios, dt, ncor, window, \
winlength, nattempts, waittime, method='RMS'):
"""
This function computes the waveform for each template, cross correlate
them with the stack, and keep only the best to get the final template
that will be used to find LFEs
Input:
type filename = string
filename = Name of the template
type TDUR = float
TDUR = Time to add before and after the time window for tapering
type filt = tuple of floats
filt = Lower and upper frequencies of the filter
type ratios = list of floats
ratios = Percentage of LFEs to be kept for the final template
type dt = float
dt = Time step for resampling
type ncor = integer
ncor = Number of points for the cross correlation
type window = boolean
window = Do we do the cross correlation on the whole seismogram
or a selected time window?
type winlength = float
winlength = Length of the window to do the cross correlation
type nattempts = integer
nattempts = Number of times we try to download data
type waittime = positive float
waittime = Type to wait between two attempts at downloading
type method = string
method = Normalization method for linear stack (RMS or Max)
Output:
None
"""
# To transform latitude and longitude into kilometers
a = 6378.136
e = 0.006694470
lat0 = 41.0
lon0 = -123.0
dx = (pi / 180.0) * a * cos(lat0 * pi / 180.0) / sqrt(1.0 - e * e * \
sin(lat0 * pi / 180.0) * sin(lat0 * pi / 180.0))
dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(lat0 * \
pi / 180.0) * sin(lat0 * pi / 180.0)) ** 1.5)
# Get the names of the stations which have a waveform for this LFE family
file = open('../data/Plourde_2015/detections/' + filename + \
'_detect5_cull.txt')
first_line = file.readline().strip()
staNames = first_line.split()
file.close()
# Get the time of LFE detections
LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \
'_detect5_cull.txt', \
dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \
'formats': (np.float, '|S6', np.int, np.float, np.float)}, \
skiprows=2)
# Get the network, channels, and location of the stations
staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \
sep=r'\s{1,}', header=None)
staloc.columns = ['station', 'network', 'channels', 'location', \
'server', 'latitude', 'longitude']
# Get the location of the source of the LFE
LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \
dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \
'eZ', 'nb'), \
'formats': ('S13', 'S3', np.float, np.float, np.float, \
np.float, np.float, np.int)}, \
skiprows=1)
for ie in range(0, len(LFEloc)):
if (filename == LFEloc[ie][0].decode('utf-8')):
lats = LFEloc[ie][2]
lons = LFEloc[ie][3]
xs = dx * (lons - lon0)
ys = dy * (lats - lat0)
# Create directory to store the waveforms
namedir = 'templates/' + filename
if not os.path.exists(namedir):
os.makedirs(namedir)
# Read origin time and station slowness files
origintime = pickle.load(open('timearrival/origintime.pkl', 'rb'))
slowness = pickle.load(open('timearrival/slowness.pkl', 'rb'))
# File to write error messages
errorfile = 'error/' + filename + '.txt'
# Loop over stations
for station in staNames:
# Create streams
EW = Stream()
NS = Stream()
UD = Stream()
# Get station metadata for downloading
for ir in range(0, len(staloc)):
if (station == staloc['station'][ir]):
network = staloc['network'][ir]
channels = staloc['channels'][ir]
location = staloc['location'][ir]
server = staloc['server'][ir]
# Compute source-receiver distance
latitude = staloc['latitude'][ir]
longitude = staloc['longitude'][ir]
xr = dx * (longitude - lon0)
yr = dy * (latitude - lat0)
distance = sqrt((xr - xs) ** 2.0 + (yr - ys) ** 2.0)
# Loop on LFEs
for i in range(0, np.shape(LFEtime)[0]):
YMD = LFEtime[i][1]
myYear = 2000 + int(YMD[0 : 2])
myMonth = int(YMD[2 : 4])
myDay = int(YMD[4 : 6])
myHour = LFEtime[i][2] - 1
myMinute = int(LFEtime[i][3] / 60.0)
mySecond = int(LFEtime[i][3] - 60.0 * myMinute)
myMicrosecond = int(1000000.0 * \
(LFEtime[i][3] - 60.0 * myMinute - mySecond))
Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \
hour=myHour, minute=myMinute, second=mySecond, \
microsecond=myMicrosecond)
Tstart = Tori - TDUR
Tend = Tori + 60.0 + TDUR
# First case: we can get the data from IRIS
if (server == 'IRIS'):
(D, orientation) = get_from_IRIS(station, network, channels, \
location, Tstart, Tend, filt, dt, nattempts, waittime, \
errorfile)
# Second case: we get the data from NCEDC
elif (server == 'NCEDC'):
(D, orientation) = get_from_NCEDC(station, network, channels, \
location, Tstart, Tend, filt, dt, nattempts, waittime, \
errorfile)
else:
raise ValueError( \
'You can only download data from IRIS and NCEDC')
if (type(D) == obspy.core.stream.Stream):
# Add to stream
if (channels == 'EH1,EH2,EHZ'):
EW.append(D.select(channel='EH1').slice(Tori, \
Tori + 60.0)[0])
NS.append(D.select(channel='EH2').slice(Tori, \
Tori + 60.0)[0])
UD.append(D.select(channel='EHZ').slice(Tori, \
Tori + 60.0)[0])
else:
EW.append(D.select(component='E').slice(Tori, \
Tori + 60.0)[0])
NS.append(D.select(component='N').slice(Tori, \
Tori + 60.0)[0])
UD.append(D.select(component='Z').slice(Tori, \
Tori + 60.0)[0])
else:
print('Failed at downloading data')
# Stack
if (len(EW) > 0 and len(NS) > 0 and len(UD) > 0):
# Stack waveforms
EWstack = linstack([EW], normalize=True, method=method)
NSstack = linstack([NS], normalize=True, method=method)
UDstack = linstack([UD], normalize=True, method=method)
# Initializations
maxCC = np.zeros(len(EW))
cc0EW = np.zeros(len(EW))
cc0NS = np.zeros(len(EW))
cc0UD = np.zeros(len(EW))
if (window == True):
# Get time arrival
arrivaltime = origintime[filename] + \
slowness[station] * distance
Tmin = arrivaltime - winlength / 2.0
Tmax = arrivaltime + winlength / 2.0
if Tmin < 0.0:
Tmin = 0.0
if Tmax > EWstack[0].stats.delta * (EWstack[0].stats.npts - 1):
Tmax = EWstack[0].stats.delta * (EWstack[0].stats.npts - 1)
ibegin = int(Tmin / EWstack[0].stats.delta)
iend = int(Tmax / EWstack[0].stats.delta) + 1
# Cross correlation
for i in range(0, len(EW)):
ccEW = correlate(EWstack[0].data[ibegin : iend], \
EW[i].data[ibegin : iend], ncor)
ccNS = correlate(NSstack[0].data[ibegin : iend], \
NS[i].data[ibegin : iend], ncor)
ccUD = correlate(UDstack[0].data[ibegin : iend], \
UD[i].data[ibegin : iend], ncor)
maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)
cc0EW[i] = ccEW[ncor]
cc0NS[i] = ccNS[ncor]
cc0UD[i] = ccUD[ncor]
else:
# Cross correlation
for i in range(0, len(EW)):
ccEW = correlate(EWstack[0].data, EW[i].data, ncor)
ccNS = correlate(NSstack[0].data, NS[i].data, ncor)
ccUD = correlate(UDstack[0].data, UD[i].data, ncor)
maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)
cc0EW[i] = ccEW[ncor]
cc0NS[i] = ccNS[ncor]
cc0UD[i] = ccUD[ncor]
# Sort cross correlations
index = np.flip(np.argsort(maxCC), axis=0)
EWbest = Stream()
NSbest = Stream()
UDbest = Stream()
# Compute stack of best LFEs
for j in range(0, len(ratios)):
nLFE = int(ratios[j] * len(EW) / 100.0)
EWselect = Stream()
NSselect = Stream()
UDselect = Stream()
for i in range(0, nLFE):
EWselect.append(EW[index[i]])
NSselect.append(NS[index[i]])
UDselect.append(UD[index[i]])
# Stack best LFEs
EWbest.append(linstack([EWselect], normalize=True, \
method=method)[0])
NSbest.append(linstack([NSselect], normalize=True, \
method=method)[0])
UDbest.append(linstack([UDselect], normalize=True, \
method=method)[0])
# Plot figure
plt.figure(1, figsize=(20, 15))
params = {'xtick.labelsize':16,
'ytick.labelsize':16}
pylab.rcParams.update(params)
colors = cm.rainbow(np.linspace(0, 1, len(ratios)))
# East - West component
ax1 = plt.subplot(311)
dt = EWstack[0].stats.delta
nt = EWstack[0].stats.npts
t = dt * np.arange(0, nt)
for j in range(0, len(ratios)):
if (method == 'RMS'):
norm = EWbest[j].data / np.sqrt(np.mean(np.square( \
EWbest[j].data)))
elif (method == 'MAD'):
norm = EWbest[j].data / np.median(np.abs(EWbest[j].data - \
np.median(EWbest[j].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, color = colors[j], \
label = str(int(ratios[j])) + '%')
if (method == 'RMS'):
norm = EWstack[0].data / np.sqrt(np.mean(np.square( \
EWstack[0].data)))
elif (method == 'MAD'):
norm = EWstack[0].data / np.median(np.abs(EWstack[0].data - \
np.median(EWstack[0].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, 'k', label='All')
if (window == True):
plt.axvline(Tmin, linewidth=2, color='grey')
plt.axvline(Tmax, linewidth=2, color='grey')
plt.xlim([np.min(t), np.max(t)])
plt.title('East - West component', fontsize=24)
plt.xlabel('Time (s)', fontsize=24)
plt.legend(loc=1)
# North - South component
ax2 = plt.subplot(312)
dt = NSstack[0].stats.delta
nt = NSstack[0].stats.npts
t = dt * np.arange(0, nt)
for j in range(0, len(ratios)):
if (method == 'RMS'):
norm = NSbest[j].data / np.sqrt(np.mean(np.square( \
NSbest[j].data)))
elif (method == 'MAD'):
norm = NSbest[j].data / np.median(np.abs(NSbest[j].data - \
np.median(NSbest[j].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, color = colors[j], \
label = str(int(ratios[j])) + '%')
if (method == 'RMS'):
norm = NSstack[0].data / np.sqrt(np.mean(np.square( \
NSstack[0].data)))
elif (method == 'MAD'):
norm = NSstack[0].data / np.median(np.abs(NSstack[0].data - \
np.median(NSstack[0].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, 'k', label='All')
if (window == True):
plt.axvline(Tmin, linewidth=2, color='grey')
plt.axvline(Tmax, linewidth=2, color='grey')
plt.xlim([np.min(t), np.max(t)])
plt.title('North - South component', fontsize=24)
plt.xlabel('Time (s)', fontsize=24)
plt.legend(loc=1)
# Vertical component
ax3 = plt.subplot(313)
dt = UDstack[0].stats.delta
nt = UDstack[0].stats.npts
t = dt * np.arange(0, nt)
for j in range(0, len(ratios)):
if (method == 'RMS'):
norm = UDbest[j].data / np.sqrt(np.mean(np.square( \
UDbest[j].data)))
elif (method == 'MAD'):
norm = UDbest[j].data / np.median(np.abs(UDbest[j].data - \
np.median(UDbest[j].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, color = colors[j], \
label = str(int(ratios[j])) + '%')
if (method == 'RMS'):
norm = UDstack[0].data / np.sqrt(np.mean(np.square( \
UDstack[0].data)))
elif (method == 'MAD'):
norm = UDstack[0].data / np.median(np.abs(UDstack[0].data - \
np.median(UDstack[0].data)))
else:
raise ValueError('Method must be RMS or MAD')
norm = np.nan_to_num(norm)
plt.plot(t, norm, 'k', label='All')
if (window == True):
plt.axvline(Tmin, linewidth=2, color='grey')
plt.axvline(Tmax, linewidth=2, color='grey')
plt.xlim([np.min(t), np.max(t)])
plt.title('Vertical component', fontsize=24)
plt.xlabel('Time (s)', fontsize=24)
plt.legend(loc=1)
# End figure
plt.suptitle(station, fontsize=24)
plt.savefig(namedir + '/' + station + '.eps', format='eps')
ax1.clear()
ax2.clear()
ax3.clear()
plt.close(1)
# Save stacks into files
savename = namedir + '/' + station +'.pkl'
pickle.dump([EWstack[0], NSstack[0], UDstack[0]], \
open(savename, 'wb'))
for j in range(0, len(ratios)):
savename = namedir + '/' + station + '_' + \
str(int(ratios[j])) + '.pkl'
pickle.dump([EWbest[j], NSbest[j], UDbest[j]], \
open(savename, 'wb'))
# Save cross correlations into files
savename = namedir + '/' + station + '_cc.pkl'
pickle.dump([cc0EW, cc0NS, cc0UD], \
open(savename, 'wb'))
if __name__ == '__main__':
# Set the parameters
TDUR = 10.0
filt = (1.5, 9.0)
ratios = [50.0, 60.0, 70.0, 80.0, 90.0]
dt = 0.05
ncor = 400
window = False
winlength = 10.0
nattempts = 10
waittime = 10.0
method = 'RMS'
LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \
dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \
'eZ', 'nb'), \
'formats': ('S13', 'S3', np.float, np.float, np.float, \
np.float, np.float, np.int)}, \
skiprows=1)
for ie in range(0, len(LFEloc)):
filename = LFEloc[ie][0].decode('utf-8')
compute_templates(filename, TDUR, filt, ratios, dt, ncor, window, \
winlength, nattempts, waittime, method)
|
from flask import Blueprint, current_app, jsonify
from flask_restful import Api
from marshmallow import ValidationError
from myapi.extensions import apispec
from myapi.api.resources import TaskResource, TaskList, UserResource, UserList
from myapi.api.schemas import TaskSchema, UserSchema
blueprint = Blueprint("api", __name__, url_prefix="/api/v1")
api = Api(blueprint)
api.add_resource(TaskResource, "/tasks/<int:task_id>", endpoint="task_by_id")
api.add_resource(TaskList, "/tasks", endpoint="tasks")
api.add_resource(UserResource, "/users/<int:user_id>", endpoint="user_by_id")
api.add_resource(UserList, "/users", endpoint="users")
@blueprint.before_app_first_request
def register_views():
apispec.spec.components.schema("TaskSchema", schema=TaskSchema)
apispec.spec.path(view=TaskResource, app=current_app)
apispec.spec.path(view=TaskList, app=current_app)
apispec.spec.components.schema("UserSchema", schema=UserSchema)
apispec.spec.path(view=UserResource, app=current_app)
apispec.spec.path(view=UserList, app=current_app)
@blueprint.errorhandler(ValidationError)
def handle_marshmallow_error(e):
"""Return json error for marshmallow validation errors.
This will avoid having to try/catch ValidationErrors in all endpoints, returning
correct JSON response with associated HTTP 400 Status (https://tools.ietf.org/html/rfc7231#section-6.5.1)
"""
return jsonify(e.messages), 400
|
def func(a_list):
res = []
for i in range(2 ** len(a_list)):
combo = []
for j in range(len(a_list)):
if (i >> j) % 2 == 1:
combo.append(a_list[j])
res.append(combo)
return res
def main(range_len, length):
import random
lists = random.sample(range(range_len), length)
print(f'the list is {lists}')
print(f'the subset of list are {func(lists)}')
main(30, 10)
|
from __future__ import print_function
from imutils.video.pivideostream import PiVideoStream
from picamera.array import PiRGBArray
from picamera import PiCamera
import argparse
import imutils
import time
import cv2
# initialize the camera and stream
camera = PiCamera()
camera.resolution = (640, 480)
rawCapture = PiRGBArray(camera, size=(640, 480))
stream = camera.capture_continuous(rawCapture, format="bgr",use_video_port=True)
# do a bit of cleanup
cv2.destroyAllWindows()
stream.close()
rawCapture.close()
camera.close()
# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
vs = PiVideoStream().start()
time.sleep(2.0)
# loop over some frames...this time using the threaded stream
while(1):
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixel
frame = vs.read()
frame= imutils.resize(frame,width=640)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
"""
There are a total of n courses you have to take, labeled from 0 to n-1.
Some courses may have prerequisites, for example to take course 0 you have
to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs,
return the ordering of courses you should take to finish all courses.
There may be multiple correct orders, you just need to return one of them.
If it is impossible to finish all courses, return an empty array.
Example 1:
Input: 2, [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So the correct course order is [0,1] .
Example 2:
Input: 4, [[1,0],[2,0],[3,1],[3,2]]
Output: [0,1,2,3] or [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
"""
import collections
class Solution:
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
# TODO Valid input; Need to confirm that there is no single point in the graph.
res = []
outdegree = [[] for _ in range(numCourses)]
indegree = [0 for _ in range(numCourses)]
for follow, pre in prerequisites:
outdegree[pre].append(follow)
indegree[follow] += 1
q = collections.deque()
for i in range(numCourses):
if indegree[i] == 0:
q.append(i)
while q:
# For Course_Schedule_I, count how many course we can pop from queue
# if @count == numCourses, return True
course = q.popleft()
res.append(course)
for j in outdegree[course]:
indegree[j] -= 1
if indegree[j] == 0:
q.append(j)
return res if len(res) == numCourses else []
|
from collections import Counter
# ransom note
class Pleb:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
magazine_ctr = Counter(magazine)
for char in ransomNote:
if magazine_ctr[char] > 0:
magazine_ctr[char] -= 1
else:
return False
return True
class Fool:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
xs = set(ransomNote)
for x in xs:
if ransomNote.count(x) > magazine.count(x):
return False
return True
s = Fool()
print(s.canConstruct("a", "b"))
print(s.canConstruct("aa", "ab"))
print(s.canConstruct("aa", "aab"))
|
import os
import time
from collections import defaultdict
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow_probability as tfp
import numpy as np
GRIDS = {16: (4, 4), 32: (8, 4), 64: (8, 8), 128: (16, 8), 256: (16, 16),
512: (32, 16), 1024: (32, 32), 2048: (64, 32)}
class W2L:
def __init__(self, model_dir, vocab_size, n_channels, data_format):
if data_format not in ["channels_first", "channels_last"]:
raise ValueError("Invalid data type specified: {}. Use either "
"channels_first or "
"channels_last.".format(data_format))
self.model_dir = model_dir
self.data_format = data_format
self.cf = self.data_format == "channels_first"
self.n_channels = n_channels
self.vocab_size = vocab_size
self.hidden_dim = 16 # TODO don't hardcode
if os.path.isdir(model_dir) and os.listdir(model_dir):
print("Model directory already exists. Loading last model...")
last = self.get_last_model()
self.model = tf.keras.models.load_model(
os.path.join(model_dir, last),
custom_objects={"Conv1DTranspose": Conv1DTranspose})
self.step = int(last[:-3])
print("...loaded {}.".format(last))
else:
print("Model directory does not exist. Creating new model...")
self.model = self.make_w2l_model()
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
self.step = 0
self.writer = tf.summary.create_file_writer(model_dir)
def make_w2l_model(self):
"""Creates a Keras model that does the W2L forward computation.
Just goes from mel spectrogram input to logits output.
Returns:
Keras sequential model.
TODO could allow model configs etc. For now, architecture is hardcoded
"""
channel_ax = 1 if self.cf else -1
def conv1d(n_f, w_f, stride):
return layers.Conv1D(
n_f, w_f, stride, padding="same", data_format=self.data_format,
use_bias=False)
def conv1d_t(n_f, w_f, stride):
return Conv1DTranspose(
n_f, w_f, stride, padding="same", data_format=self.data_format,
use_bias=False)
def act():
return layers.ReLU()
layer_list_enc = [
layers.BatchNormalization(channel_ax),
conv1d(256, 48, 2),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(2048, 32, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d(2048, 1, 1),
layers.BatchNormalization(channel_ax),
act(),
layers.Conv1D(self.hidden_dim, 1, 1, padding="same",
data_format=self.data_format)
]
layer_list_dec = [
layers.BatchNormalization(channel_ax),
conv1d_t(2048, 1, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(2048, 1, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 32, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
conv1d_t(256, 7, 1),
layers.BatchNormalization(channel_ax),
act(),
Conv1DTranspose(128, 48, 2, padding="same",
data_format=self.data_format)
]
# w2l = tf.keras.Sequential(layer_list, name="w2l")
inp = tf.keras.Input((self.n_channels, None) if self.cf
else (None, self.n_channels))
layer_outputs_enc = [inp]
for layer in layer_list_enc:
layer_outputs_enc.append(layer(layer_outputs_enc[-1]))
layer_outputs_dec = [layer_outputs_enc[-1]]
for layer in layer_list_dec:
layer_outputs_dec.append(layer(layer_outputs_dec[-1]))
# only include relu layers in outputs
relevant = layer_outputs_enc[4::3] + [layer_outputs_enc[-1]]
relevant += layer_outputs_dec[4::3] + [layer_outputs_dec[-1]]
w2l = tf.keras.Model(inputs=inp, outputs=relevant)
return w2l
def forward(self, audio, training=False, return_all=False):
"""Simple forward pass of a W2L model to compute logits.
Parameters:
audio: Tensor of mel spectrograms, channels_first!
training: Bool, if true assuming training mode otherwise inference.
Important for batchnorm to work properly.
return_all: Bool, if true, return list of all layer activations
(post-relu), with the logits at the very end.
Returns:
Result of applying model to audio (list or tensor depending on
return_all).
"""
if not self.cf:
audio = tf.transpose(audio, [0, 2, 1])
out = self.model(audio, training=training)
if return_all:
return out
else:
return out[-1]
def train_step(self, audio, audio_length, optimizer, on_gpu):
"""Implements train step of the W2L model.
Parameters:
audio: Tensor of mel spectrograms, channels_first!
audio_length: "True" length of each audio clip.
optimizer: Optimizer instance to do training with.
on_gpu: Bool, whether running on GPU. This changes how the
transcriptions are handled. Currently ignored!!
Returns:
Loss value.
"""
with tf.GradientTape() as tape:
recon = self.forward(audio, training=True, return_all=False)
# after this we need logits in shape time x batch_size x vocab_size
# TODO mask, i.e. do not compute for padding
loss = tf.reduce_mean(tf.math.squared_difference(recon, audio))
# audio_length = tf.cast(audio_length / 2, tf.int32)
grads = tape.gradient(loss, self.model.trainable_variables)
optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
# probably has to go into train_full...
# self.annealer.update_history(loss)
return loss
def train_full(self, dataset, steps, adam_params, on_gpu):
"""Full training logic for W2L.
Parameters:
dataset: tf.data.Dataset as produced in input.py.
steps: Number of training steps.
adam_params: List/tuple of four parameters for Adam: learning rate,
beta1, beta2, epsilon.
on_gpu: Bool, whether running on a GPU.
"""
# TODO more flexible checkpointing. this will simply do 10 checkpoints overall
check_freq = steps // 10
data_step_limited = dataset.take(steps)
# TODO use annealing
# self.annealer = AnnealIfStuck(adam_params[0], 0.1, 20000)
# TODO don't hardcode this
schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
[200000, 250000], [adam_params[0], adam_params[0] / 10,
adam_params[0] / (5 * 10)])
opt = tf.optimizers.Adam(schedule, *adam_params[1:])
opt.iterations.assign(self.step)
audio_shape = [None, self.n_channels, None] if self.cf \
else [None, None, self.n_channels]
def train_fn(w, x):
return self.train_step(w, x, opt, on_gpu)
graph_train = tf.function(
train_fn, input_signature=[tf.TensorSpec(audio_shape, tf.float32),
tf.TensorSpec([None], tf.int32)])
# graph_train = train_fn # skip tf.function
start = time.time()
for features, labels in data_step_limited:
if not self.step % check_freq:
print("Saving checkpoint...")
self.model.save(os.path.join(
self.model_dir, str(self.step).zfill(6) + ".h5"))
loss = graph_train(features["audio"], features["length"])
if not self.step % 500:
stop = time.time()
print("Step: {}. Recon: {}".format(self.step, loss.numpy()))
print("{} seconds passed...".format(stop - start))
if not self.step % 100:
with self.writer.as_default():
tf.summary.scalar("loss/recon", loss, step=self.step)
self.step += 1
self.model.save(os.path.join(
self.model_dir, str(self.step).zfill(6) + ".h5"))
def get_last_model(self):
ckpts = [file for file in os.listdir(self.model_dir) if
file.endswith(".h5")]
if "final.h5" in ckpts:
return "final.h5"
else:
return sorted(ckpts)[-1]
class AnnealIfStuck(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, base_lr, factor, n_steps):
"""Anneal the learning rate if loss doesn't decrease anymore.
Refer to
http://blog.dlib.net/2018/02/automatic-learning-rate-scheduling-that.html.
Parameters:
base_lr: LR to start with.
factor: By what to multiply in case we're stuck.
n_steps: How often to check if we're stuck.
"""
super(AnnealIfStuck, self).__init__()
self.n_steps = n_steps
self.lr = base_lr
self.factor = factor
self.loss_history = tf.Variable(
np.zeros(n_steps), trainable=False, dtype=tf.float32,
name="loss_history")
def __call__(self, step):
if tf.logical_or(tf.greater(tf.math.mod(step, self.n_steps), 0),
tf.equal(step, 0)):
pass
else:
x1 = tf.range(self.n_steps, dtype=tf.float32, name="x")
x2 = tf.ones([self.n_steps], dtype=tf.float32, name="bias")
x = tf.stack((x1, x2), axis=1, name="input")
slope_bias = tf.linalg.lstsq(x, self.loss_history[:, tf.newaxis],
name="solution")
slope = slope_bias[0][0]
bias = slope_bias[1][0]
preds = slope * x1 + bias
data_var = 1 / (self.n_steps - 2) * tf.reduce_sum(
tf.square(self.loss_history - preds))
dist_var = 12 * data_var / (self.n_steps ** 3 - self.n_steps)
dist = tfp.distributions.Normal(slope, tf.sqrt(dist_var),
name="slope_distribution")
prob_decreasing = dist.cdf(0., name="prob_below_zero")
if tf.less_equal(prob_decreasing, 0.5):
self.lr *= self.factor
return self.lr
def check_lr(self):
return self.lr
def update_history(self, new_val):
self.loss_history.assign(tf.concat((self.loss_history[1:], [new_val]),
axis=0))
def dense_to_sparse(dense_tensor, sparse_val=-1):
"""Inverse of tf.sparse_to_dense.
Parameters:
dense_tensor: The dense tensor. Duh.
sparse_val: The value to "ignore": Occurrences of this value in the
dense tensor will not be represented in the sparse tensor.
NOTE: When/if later restoring this to a dense tensor, you
will probably want to choose this as the default value.
Returns:
SparseTensor equivalent to the dense input.
"""
with tf.name_scope("dense_to_sparse"):
sparse_inds = tf.where(tf.not_equal(dense_tensor, sparse_val),
name="sparse_inds")
sparse_vals = tf.gather_nd(dense_tensor, sparse_inds,
name="sparse_vals")
dense_shape = tf.shape(dense_tensor, name="dense_shape",
out_type=tf.int64)
return tf.SparseTensor(sparse_inds, sparse_vals, dense_shape)
class Conv1DTranspose(layers.Conv1D):
"""Why does this still not exist in Keras... """
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=tf.keras.activations.get(activation),
use_bias=use_bias,
kernel_initializer=tf.keras.initializers.get(kernel_initializer),
bias_initializer=tf.keras.initializers.get(bias_initializer),
kernel_regularizer=tf.keras.regularizers.get(kernel_regularizer),
bias_regularizer=tf.keras.regularizers.get(bias_regularizer),
activity_regularizer=tf.keras.regularizers.get(
activity_regularizer),
kernel_constraint=tf.keras.constraints.get(kernel_constraint),
bias_constraint=tf.keras.constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = normalize_tuple(
self.output_padding, 1, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 3:
raise ValueError(
'Inputs should have rank 3. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = layers.InputSpec(ndim=3, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis = 2
else:
h_axis = 1
height = inputs_shape[h_axis]
kernel_h, = self.kernel_size
stride_h, = self.strides
if self.output_padding is None:
out_pad_h = None
else:
out_pad_h = self.output_padding
# Infer the dynamic output shape:
out_height = deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height)
else:
output_shape = (batch_size, out_height, self.filters)
output_shape_tensor = tf.stack(output_shape)
outputs = tf.nn.conv1d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding.upper(),
data_format=convert_data_format(self.data_format, ndim=3),
dilations=self.dilation_rate)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = tf.nn.bias_add(
outputs,
self.bias,
data_format=convert_data_format(self.data_format,
ndim=3))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis = 1, 2
else:
c_axis, h_axis = 2, 1
kernel_h, = self.kernel_size
stride_h, = self.strides
if self.output_padding is None:
out_pad_h = None
else:
out_pad_h = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
return tf.TensorShape(output_shape)
def get_config(self):
config = super(Conv1DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError(
'The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(
single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def deconv_output_length(input_length,
filter_size,
padding,
output_padding=None,
stride=0,
dilation=1):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: Integer.
filter_size: Integer.
padding: one of `"same"`, `"valid"`, `"full"`.
output_padding: Integer, amount of padding along the output dimension.
Can be set to `None` in which case the output length is inferred.
stride: Integer.
dilation: Integer.
Returns:
The output length (integer).
"""
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
|
import os
import re
import glob
import pickle
import pandas as pd
from utils.transform_utils import *
# Get all posts within the data directory
posts = glob.glob('data/posts/*.p')
# Iterate over all posts within a class
for fp in posts:
# Load each post into a DataFrame and store its networkid
df = pd.DataFrame(pickle.load(open(fp, "rb")))
network_id = re.search("posts_(.*).p", fp).group(1)
# Compute different metrics about the class
df['created'] = pd.to_datetime(df['created'])
df['num_revisions'] = df['history'].apply(lambda x: len(x))
df['subject'] = df['history'].apply(lambda x: x[0]['subject'])
df['is_student'] = df['tags'].apply(lambda x: 'student' in x)
df['is_instructor'] = df['tags'].apply(lambda x: 'instructor-note' in x)
df['is_announcement'] = df['config'].apply(lambda x: 1 if 'is_announcement' in x else 0)
df['num_children'] = df['children'].apply(lambda x: len(list(num_nested_dicts(x[0], 'children'))) if len(x) > 0 else 0)
# Remove HTML from text column
df['text'] = df['history'].apply(lambda x: re.sub('<[^<]+?>|\n', ' ', x[0]['content']))
# Reorder the columns
df = df[['id', 'created', 'type', 'folders', 'tags', 'is_announcement', 'history', 'children', 'tag_good', 'is_student', 'no_answer', 'num_children', 'num_favorites', 'num_revisions', 'unique_views', 'subject','text']]
with open(f"data/dataframes/{fp[11:-23]}_dataframe_{network_id}.p", 'wb') as f:
pickle.dump(df, f) |
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.ticker as mtick
df = pd.read_csv(r'Medical\\DataCleaning\\DataTransformation\\KivaLoanProject\\kiva_data.csv')
print(df.head(25))
# Creates the figure
f, ax = plt.subplots(figsize=(15, 10))
# Plot the data
sns.barplot(data=df, x="country", y = "loan_amount")
# Use part of the code above to format the y-axis ticks below this line
sns.barplot(data=df, x="country", y="loan_amount", hue="gender")
fmt = '${x:,.0f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
plt.show()
plt.clf()
# On average, do female or male recipients receive larger loans from Kiva?
# On average, male recepient receive larger loans from Kiva.
# Which country has the least disparity in loan amounts awarded by gender?
# It looks like El Salvador appears have the smallest disparity in loan amounts (by gender).
# Based on the data, what kind of recommendations can you make to Kiva about the loans they give?
# Kiva should work to decrease the gap between gender loan.
# What actions could be taken to implement the recommendations you've made?
# Some actions could be:
# Kiva could hold workshops focused on women-led projects (to see more women-led project)
# Kiva could require that an equal amount of loans be given to male and female-driven projects
# Set color palette
sns.set_palette("Accent")
# Set style
sns.set_style("darkgrid")
# Create figure and axes (no need to use the previous syntax, as the y-label ticks aren't going to be formatted)
plt.figure(figsize=(25, 15))
# Add a title
ax.set_title("Loan Amounts")
# Use Seaborn to create the bar plot
sns.barplot(data=df, x="country", y="loan_amount", hue="gender")
fmt = '${x:,.0f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
plt.show()
plt.clf()
# Box Plots With Kiva Data
plt.figure(figsize=(16, 10))
sns.boxplot(data=df,x="country",y="loan_amount")
plt.show()
plt.clf()
# Which country's box has the widest distribution?
# Kenya
# In which country would you be most likely to receive the largest loan amount?
# Cambodia
# Box Plot by Activity
plt.figure(figsize=(16, 10))
sns.boxplot(data=df,x="activity",y="loan_amount")
plt.show()
plt.clf()
# What does this visualization reveal that previous ones did not?
# The loan amount are grouped by activity intead of country with Farming activities having the most loans.
# Violin Plots
plt.figure(figsize=(16, 10))
sns.violinplot(data=df, x="activity", y="loan_amount")
plt.show()
plt.clf()
# Create a violin plot that visualizes the distribution of loan amount by country.
# Split Violin Plots by gender
# Some styling (feel free to modify)
sns.set_palette("Spectral")
plt.figure(figsize=(18, 12))
sns.violinplot(data=df, x="country", y="loan_amount", hue="gender", split=True)
plt.show()
plt.clf()
# What does this visualization reveal about the distribution of loan amounts within countries by gender?
# The average amount of loans that is givent to male gender is higher overall, accept to El Salvador.
#plt.show() # Show the plot
#plt.clf() # Clear the plot |
import sys
from explain.tf2.deletion_scorer import summarize_deletion_score_batch8, show
def main():
dir_path = sys.argv[1]
deletion_per_job = 20
deletion_offset_list = list(range(20, 301, deletion_per_job))
summarized_result = summarize_deletion_score_batch8(dir_path, deletion_per_job, deletion_offset_list)
out_file_name = "ck_contribution.html"
show(out_file_name, summarized_result)
if __name__ == "__main__":
main() |
import os
import ezexif
import shutil
os.chdir("/Users/chilly/Desktop/python/yequ/崩溃的阿文/lesson06")
downloadPath = "照片"
photoList = os.listdir(downloadPath)
for photo in photoList:
photoPath = os.path.join(downloadPath, photo)
exifInfo = ezexif.process_file(photoPath)
# 获取拍摄时间
takeTime = exifInfo["EXIF DateTimeOriginal"]
# 通过空格分隔成拍摄日期和拍摄时间
takeTimeParts = takeTime.split(" ")
# 分隔后的字符串列表第一个元素就是拍摄日期,赋值给变量photoDate
photoDate = takeTimeParts[0]
# 再把拍摄日期通过冒号分隔,分成年、月、日三部分,赋值给变量photoDateParts
photoDateParts = photoDate.split(":")
targetFolderName = f"{photoDateParts[0]}年{photoDateParts[1]}月"
photoTargetPath = os.path.join(downloadPath, targetFolderName)
if not os.path.exists(photoTargetPath):
os.mkdir(photoTargetPath)
shutil.move(photoPath,photoTargetPath)
|
import re
class ValidateEmail():
def __init__(self, email, users):
self._email = email
self._users = users
def validate(self):
if self._email in self._users:
print("student was already registered, please use a different email\n")
return False
else:
return True
class ValidateNewStudent:
def __init__(self, student):
self._student = student
def validate_compleate_info(self):
if ("id" in self._student) and ("first_name" in self._student) and ("last_name" in self._student) \
and ("email" in self._student) and "current_skills" in self._student \
and "desier_skills" in self._student:
return True
else:
print("student missing information...")
return False
# def validate_type_data(self):
# if type(self._student["id"]) == "int" and \
# type(self._student["first_name"]) == "string" and \
# type(self._student["last_name"]) == "string" and \
# type(self._student["email"]) == "string" and \
# type(self._student["password"]) == "string" and \
# type(self._student["existing_skills"]) == "list" and \
# type(self._student["desire_skills"]) == "list":
# return True
# else:
# print("some student data is incorrect, please try again..\n")
# return False
class ValidateEditStudent:
def __init__(self, email, students):
self._email = email
self._students = students
def validate_student_exists(self):
if self._email in self._students:
return True
else:
print("Student does not exist..\n")
return False
class ValidatePasswordLen():
def __init__(self, password):
super(ValidatePasswordLen, self).__init__()
self._password = password
def length_validation(self):
special_characters = ["*", "<", ">", "!", "@", "#", "$", "%", "^", "&", "(", ")", "{", "}", ":", "|", "/", "\\"]
for character in special_characters:
if self._password.find(character) >= 0:
print(
f"Please make sure that in your password you use only alphanumerical characters and not special "
f"characters {special_characters}\n")
return False
else:
if len(self._password) < 8:
print("Please make sure your password is at least 8 to 16 Characters long\n")
return False
elif len(self._password) > 16:
print("Please make sure your password is not longer than 16 Characters\n")
else:
return True
class ValidateEmailFormat():
def __init__(self, email):
super(ValidateEmailFormat, self).__init__()
self._email = email
def email_format_validation(self):
test_email = re.fullmatch("^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+$", self._email)
if test_email:
return True
else:
print("Please enter a valid email address with the correct format -user@mail.com-\n")
return False
class NameLastnameValidator():
def __init__(self, first_name, last_name):
super(NameLastnameValidator, self).__init__()
self._first_name = first_name
self._last_name = last_name
def validte_first_last_name(self):
special_characters = ["*", "<", ">", "!", "@", "#", "$", "%", "^", "&", "(", ")", "{", "}", ":", "|", "/", "\\"]
test_name = re.search("[!@#$%^&()_+={};':\",.<>/?|]", self._first_name.lower())
test_lastname = re.search("[!@#$%^&()_+={};':\",.<>/?|]", self._last_name.lower())
if not test_name and not test_lastname:
return True
else:
if test_name:
print(f"Please make sure your name has no special characters {special_characters}\n")
elif test_lastname:
print(f"Please make sure your last name has no special characters {special_characters}\n")
return False
class ValidateStudentDate():
def __init__(self, students, date):
pass
|
import logging
import os
import shutil
from collections import OrderedDict, namedtuple
from pathlib import Path
from uuid import uuid4
from sqlalchemy.exc import IntegrityError
from tqdm import tqdm
import pandas as pd
from common import DAL
from common.DAL import ModelPartialScore
from common.utils import VerboseTimer
from data_access.api import SpecificDataAccess
from data_access.model_folder import ModelFolder
logger = logging.getLogger(__name__)
def _post_training_prediction(model_folder):
from classes.vqa_model_predictor import DefaultVqaModelPredictor
from evaluate.VqaMedEvaluatorBase import VqaMedEvaluatorBase
model_dal = DAL.get_model(lambda dal: Path(dal.model_location).parent == model_folder.folder)
model_id = model_dal.id
mp = DefaultVqaModelPredictor(model_folder)
data_sets = {'test': mp.df_test, 'validation': mp.df_validation}
if model_folder.question_category:
for name, df in data_sets.items():
data_sets[name] = df[df.question_category == model_folder.question_category]
predictions = {}
found_data = False
for name, df in data_sets.items():
with VerboseTimer(f"Predictions for VQA contender {name}"):
if len(df) == 0:
logger.warning(f'Found no items for category "{model_folder.question_category}" in "{name}" data set')
continue
found_data = True
df_predictions = mp.predict(df)
predictions[name] = df_predictions
if not found_data:
raise Exception(f'Found no data for category "{model_folder.question_category}" ({model_folder})')
outputs = {}
for name, df_predictions in predictions.items():
curr_predictions = df_predictions.prediction.values
df_predicted = data_sets[name]
df_output = df_predicted.copy()
df_output['image_id'] = df_output.path.apply(lambda p: p.rsplit(os.sep)[-1].rsplit('.', 1)[0])
df_output['prediction'] = curr_predictions
columns_to_remove = ['path', 'answer_embedding', 'question_embedding', 'group', 'diagnosis', 'processed_answer']
for col in columns_to_remove:
del df_output[col]
sort_columns = sorted(df_output.columns, key=lambda c: c not in ['question', 'prediction', 'answer'])
df_output = df_output[sort_columns]
outputs[name] = df_output
df_output_test = outputs.get('test')
df_output_validation = outputs['validation']
def get_str(df_arg):
# strs = []
# debug_output_rows = df_arg.apply(lambda row: row.image_id + '|' + row.question + '|' + row.prediction, axis=1)
output_rows = df_arg.apply(lambda row: row.image_id + '|' + row.prediction + '|' + row.answer, axis=1)
output_rows = output_rows.str.strip('|')
rows = output_rows.values
res_value = '\n'.join(rows)
return res_value
res = get_str(df_output_test) if df_output_test is not None else 'NO DATA IN TEST'
res_val = get_str(df_output_validation)
# Get evaluation per category:
evaluations = {}
pbar = tqdm(df_output_validation.groupby('question_category'))
for question_category, df in pbar:
pbar.set_description(f'evaluating {len(df)} for {question_category} items')
curr_predictions = df.prediction.values
curr_ground_truth = df.answer.values
curr_evaluations = VqaMedEvaluatorBase.get_all_evaluation(predictions=curr_predictions,
ground_truth=curr_ground_truth)
evaluations[question_category] = curr_evaluations
total_evaluations = VqaMedEvaluatorBase.get_all_evaluation(predictions=df_output_validation.prediction.values,
ground_truth=df_output_validation.answer.values)
evaluations['Total'] = total_evaluations
df_evaluations = pd.DataFrame(evaluations).T # .sort_values(by=('bleu'))
df_evaluations['sort'] = df_evaluations.index == 'Total'
df_evaluations = df_evaluations.sort_values(by=['sort', 'wbss'])
del df_evaluations['sort']
# Getting string
model_repr = repr(mp)
sub_models = {category: folder for category, (model, folder) in mp.model_by_question_category.items()}
sub_models_str = '\n'.join(
[str(f'{category}: {folder} ({folder.prediction_data_name})') for category, folder in sub_models.items() if
folder is not None])
model_description_copy = df_evaluations.copy()
def get_prediction_vector(category):
sub_model = sub_models.get(category)
if sub_model is not None:
return sub_model.prediction_data_name
else:
return '--'
model_description_copy['prediction_vector'] = model_description_copy.index.map(get_prediction_vector)
model_description = f'''
==Model==
{model_repr}
==Sub models==
{sub_models_str}
==validation evaluation==
{model_description_copy.to_string()}
'''
logger.debug(model_description)
# Saving predictions
submission_folder = model_folder.folder / 'submissions'
if submission_folder.exists():
shutil.copy(str(submission_folder), str(submission_folder) + '_' + str(uuid4()))
submission_folder.mkdir()
txt_path = submission_folder / f'submission.txt'
txt_path.write_text(res)
txt_path_val = submission_folder / f'submission_validation.txt'
txt_path_val.write_text(res_val)
model_description_path = submission_folder / f'model_description.txt'
model_description_path.write_text(model_description)
with pd.HDFStore(str(submission_folder / 'predictions.hdf')) as store:
for name, df_predictions in predictions.items():
store[name] = df_predictions
logger.debug(f'For model {model_id}, Got results of\n{evaluations}')
evaluations_types = {'wbss': 1, 'bleu': 2, 'strict_accuracy': 3}
categories = OrderedDict({5: 'Abnormality_yes_no', 2: 'Plane', 3: 'Organ', 1: 'Modality', 4: 'Abnormality'})
partial_scores: [ModelPartialScore] = DAL.get_partial_scores()
for category_id, question_category in categories.items():
evaluations_dict = evaluations.get(question_category)
if not evaluations_dict:
continue
existing_evaluations = [ps for ps in partial_scores
if ps.model_id == model_id
and ps.question_category_id == category_id]
for evaluation_name, score in evaluations_dict.items():
evaluation_id = evaluations_types[evaluation_name]
ps = ModelPartialScore(model_id, evaluation_id, category_id, score)
existing_partials = [ev
for ev in existing_evaluations
if ev.evaluation_type == evaluation_id
and ev.question_category_id == category_id]
if len(existing_partials) != 0:
logger.debug(f'for {ps}, already had a partial score. Continuing...')
continue
try:
DAL.insert_dal(ps)
except IntegrityError:
logger.debug(f'for {ps}, value already existed')
except Exception as ex:
logger.exception(f'Failed to insert partial score to db (model: {model_id})')
print(type(ex))
return categories
# insert_partial_scores(model_predicate=lambda m: m.id == model_db_id)
def generate_multi_configuration():
BuildConfig = namedtuple('BuildConfig',
['dense_units', 'lstm_units', 'use_text_inputs_attention', 'use_class_weight'])
lstm_units = 128
dense_units_collection = [
# (8,),
# (8, 7, 6),
(7, 8, 6),
(6, 9, 7),
(6, 9),(8, 6),
(7, 8, 9), (6, 8, 9), (8, 6, 7), (8, 9, 7), (8, 6, 9),
]
use_text_inputs_attention = False # True if i % 2 == 0 else True
use_class_weight = False
configs = [BuildConfig(dense_units=ds,
lstm_units=lstm_units,
use_text_inputs_attention=use_text_inputs_attention,
use_class_weight=use_class_weight)
if not isinstance(ds, (BuildConfig,)) else ds
for i, ds in enumerate(dense_units_collection)]
configs = [BuildConfig(dense_units=(8, 7, 6), lstm_units=lstm_units, use_text_inputs_attention=True,
use_class_weight=True)] + configs
question_category = 'Abnormality'
for i, config in enumerate(configs):
logger.info(f'Training : {config} ({i + 1} / {len(dense_units_collection)})')
dense_units = config.dense_units
use_text_inputs_attention = config.use_text_inputs_attention
use_class_weight = config.use_class_weight
curr_lstm_units = config.lstm_units
epochs = 3 # 8 if len(dense_units) > 2 else 12
folder_suffix = get_folder_suffix(question_category, dense_units, curr_lstm_units, use_class_weight,
use_text_inputs_attention)
_train_model(activation='softmax',
prediction_vector_name='answers',
epochs=epochs,
loss_function='categorical_crossentropy',
lstm_units=curr_lstm_units,
optimizer='RMSprop',
post_concat_dense_units=dense_units,
use_text_inputs_attention=use_text_inputs_attention,
question_category=question_category,
batch_size=32,
augmentations=20,
notes_suffix=f'For Category: {question_category}',
folder_suffix=folder_suffix,
use_class_weight=use_class_weight)
def get_folder_suffix(question_category, dense_units, lstm_units, use_class_weight, use_text_inputs_attention):
folder_suffix = f'{question_category}_dense_{"_".join(str(v) for v in dense_units)}'
if lstm_units:
folder_suffix += f'_lstm_{int(lstm_units)}'
if use_text_inputs_attention:
folder_suffix += f'_attention'
if use_class_weight:
folder_suffix += f'_weighted_class'
return folder_suffix
def _train_model(activation, prediction_vector_name, epochs, loss_function, lstm_units, optimizer,
post_concat_dense_units,
use_text_inputs_attention,
question_category,
batch_size=75,
augmentations=20,
notes_suffix='',
folder_suffix='',
use_class_weight=False):
# Doing all of this here in order to not import tensor flow for other functions
from classes.vqa_model_trainer import VqaModelTrainer
from classes.vqa_model_builder import VqaModelBuilder
from common.settings import data_access as data_access_api
from keras import backend as keras_backend
# from classes.vqa_model_predictor import DefaultVqaModelPredictor
# from evaluate.VqaMedEvaluatorBase import VqaMedEvaluatorBase
keras_backend.clear_session()
mb = VqaModelBuilder(loss_function, activation,
post_concat_dense_units=post_concat_dense_units,
use_text_inputs_attention=use_text_inputs_attention,
optimizer=optimizer,
lstm_units=lstm_units,
prediction_vector_name=prediction_vector_name,
question_category=question_category)
model = mb.get_vqa_model()
model_folder = VqaModelBuilder.save_model(model, prediction_vector_name, question_category, folder_suffix)
# Train ------------------------------------------------------------------------
keras_backend.clear_session()
data_access = SpecificDataAccess(data_access_api.folder, question_category=question_category, group=None)
mt = VqaModelTrainer(model_folder,
augmentations=augmentations,
batch_size=batch_size,
data_access=data_access,
epochs=epochs,
question_category=question_category,
use_class_weight=use_class_weight)
history = mt.train()
# Train ------------------------------------------------------------------------
with VerboseTimer("Saving trained Model"):
notes = f'post_concat_dense_units: {post_concat_dense_units};\n' \
f'Optimizer: {optimizer}\n' \
f'loss: {loss_function}\n' \
f'activation: {activation}\n' \
f'prediction vector: {prediction_vector_name}\n' \
f'lstm_units: {lstm_units}\n' \
f'batch_size: {batch_size}\n' \
f'epochs: {epochs}\n' \
f'class weights: {use_class_weight}\n' \
f'Inputs Attention: {use_text_inputs_attention}\n' \
f'{notes_suffix}'
trained_suffix = f'{folder_suffix}_trained'
model_folder = mt.save(mt.model, mt.model_folder, history, notes=notes, folder_suffix=trained_suffix)
logger.debug(f'model_folder: {model_folder}')
# Evaluate ------------------------------------------------------------------------
keras_backend.clear_session()
# model_id_in_db = None # latest...
#
# mp = DefaultVqaModelPredictor(model=model_id_in_db)
# validation_prediction = mp.predict(mp.df_validation)
# predictions = validation_prediction.prediction.values
# ground_truth = validation_prediction.answer.values
#
# max_length = max([len(s) for s in predictions])
# if max_length < 100:
# # results = VqaMedEvaluatorBase.get_all_evaluation(predictions=predictions, ground_truth=ground_truth)
# from evaluate.BleuEvaluator import BleuEvaluator
# ins = BleuEvaluator(predictions, ground_truth)
# results = {}
# results['bleu'] = ins.evaluate()
# results['wbss'] = -2
# else:
# results = {'bleu': -1, 'wbss': -1}
#
# bleu = results['bleu']
# wbss = results['wbss']
#
# model_db_id = mp.model_idx_in_db
# model_score = ModelScore(model_db_id, bleu=bleu, wbss=wbss)
# DAL.insert_dal(model_score)
results = _post_training_prediction(model_folder)
logger.info('----------------------------------------------------------------------------------------')
logger.info(f'@@@For:\tLoss: {loss_function}\tActivation: {activation}: Got results of {results}@@@')
logger.info('----------------------------------------------------------------------------------------')
def train_model(base_model_id,
optimizer,
post_concat_dense_units,
lstm_units=0,
question_category='Abnormality',
epochs=20,
batch_size=75,
notes_suffix='',
folder_suffix='',
use_text_inputs_attention=False,
use_class_weight=False):
# Get------------------------------------------------------------------------
model_dal = DAL.get_model_by_id(model_id=base_model_id)
loss_function = model_dal.loss_function
activation = model_dal.activation
prediction_vector_name = model_dal.class_strategy
_train_model(activation,
prediction_vector_name,
epochs,
loss_function,
lstm_units,
optimizer,
post_concat_dense_units,
use_text_inputs_attention=use_text_inputs_attention,
batch_size=batch_size,
notes_suffix=notes_suffix,
folder_suffix=folder_suffix,
question_category=question_category,
use_class_weight=use_class_weight)
# noinspection PyBroadException
def insert_partial_scores(model_predicate=None):
from common.settings import data_access as data_access_api
from data_access.api import SpecificDataAccess
from classes.vqa_model_predictor import DefaultVqaModelPredictor
from evaluate.VqaMedEvaluatorBase import VqaMedEvaluatorBase
from keras import backend as keras_backend
all_models = DAL.get_models()
all_models = [m for m in all_models if model_predicate is None or model_predicate(m)]
partial_scores: [ModelPartialScore] = DAL.get_partial_scores()
pbar = tqdm(all_models)
for model in pbar:
model_id = model.id
pbar.set_description(f'Working on model {model_id}')
model_folder_location = Path(model.model_location).parent
if not model_folder_location.is_dir():
continue
model_folder = ModelFolder(model_folder_location)
if model_folder.prediction_data_name != 'answers':
# logger.warning(
# f'Skipping model {model_id}. The prediction vector was "{model_folder.prediction_data_name}"')
# continue
logger.warning(
f'for model {model_id} prediction vector was "{model_folder.prediction_data_name}". '
f'This might take a while')
keras_backend.clear_session()
categories = DAL.get_question_categories_data_frame().Category.to_dict()
rev_evaluations = DAL.get_evaluation_types_data_frame().name.to_dict()
evaluations = {ev: ev_id for ev_id, ev in rev_evaluations.items()}
for category_id, question_category in categories.items():
data_access = SpecificDataAccess(data_access_api.folder, question_category=question_category, group=None)
# ps: ModelPartialScore
existing_evaluations = [ps for ps in partial_scores
if ps.model_id == model_id
and ps.question_category_id == category_id]
if len(existing_evaluations) == len(evaluations):
logger.debug(f'Model {model_id} had evaluations for "{question_category} , '
f'got {len(existing_evaluations)} partial results. Continuing')
continue
try:
mp = DefaultVqaModelPredictor(model_folder, data_access=data_access)
df_to_predict = mp.df_validation
df_predictions = mp.predict(df_to_predict)
except Exception:
logger.exception(f'Failed to predict (model {model_id})')
continue
predictions = df_predictions.prediction.values
ground_truth = df_predictions.answer.values
max_length = max([len(s) for s in predictions])
if max_length < 100:
results = VqaMedEvaluatorBase.get_all_evaluation(predictions=predictions, ground_truth=ground_truth)
else:
results = {'bleu': -1, 'wbss': -1}
logger.debug(f'For {question_category} (model id: {model_id}), Got results of\n{results}')
for evaluation_name, score in results.items():
evaluation_id = evaluations[evaluation_name]
ps = ModelPartialScore(model_id, evaluation_id, category_id, score)
existing_partials = [ev
for ev in existing_evaluations
if ev.evaluation_type == evaluation_id and ev.question_category_id == category_id]
if len(existing_partials) != 0:
logger.debug(f'for {ps}, already had a partial score. Continuing...')
continue
try:
DAL.insert_dal(ps)
except IntegrityError:
logger.debug(f'for {ps}, value already existed')
except Exception as ex:
logger.exception(f'Failed to insert partial score to db (model: {model_id})')
print(type(ex))
# for evaluation_id, evaluation_type in evaluations.items():d
|
from setuptools import setup
setup(
name='odc_apps_cloud',
version='1',
author='Open Data Cube',
author_email='',
maintainer='Open Data Cube',
maintainer_email='',
description='CLI utils for working with objects/files the cloud',
long_description='',
license='Apache License 2.0',
tests_require=['pytest'],
install_requires=[
'odc_aws @ git+https://github.com/opendatacube/dea-proto.git#egg=odc_aws&subdirectory=libs/aws',
'odc_io @ git+https://github.com/opendatacube/dea-proto.git#egg=odc_io&subdirectory=libs/io',
'odc_aio @ git+https://github.com/opendatacube/dea-proto.git#egg=odc_aio&subdirectory=libs/aio',
'odc_ppt @ git+https://github.com/opendatacube/dea-proto.git#egg=odc_ppt&subdirectory=libs/ppt',
"click",
],
extras_require={
'GCP': ['google-cloud-storage'],
'THREDDS': ['thredds_crawler', 'requests']
},
entry_points={
'console_scripts': [
'thredds-to-tar = odc.apps.cloud.thredds_to_tar:cli [THREDDS]',
'gs-to-tar = odc.apps.cloud.gs_to_tar:cli [GCP]',
's3-find = odc.apps.cloud.s3_find:cli',
's3-inventory-dump = odc.apps.cloud.s3_inventory:cli',
's3-to-tar = odc.apps.cloud.s3_to_tar:cli',
]
},
packages=['odc.apps.cloud'],
zip_safe=False,
)
|
"""
Simple calculator without using `eval`
"""
import operator
from textwrap import dedent
MATH_OPS = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
}
def eval_equation(equation):
"""
Evaluate the equation
"""
number1, opr, number2 = equation.split()
return MATH_OPS[opr](float(number1), float(number2))
def main():
"""Calculator for simple arithmetic operations
"""
msg = """
Please enter an equation in the form: <num> <op> <num>
where:
<num> is a number such 10 or 4.5
<op> an operator, one of +, - *, or /
Examples:
1 + 1
10.5 * 3
12.5 / 2
Note: Spaces around the operator are needed.
Your equation: """
equation = input(dedent(msg))
print(equation, '=', eval_equation(equation))
if __name__ == '__main__':
main()
|
# this file and implementation of static HTML based off of Amos Omondi's tutorial on scotch.io: https://scotch.io/tutorials/working-with-django-templates-static-files
#used to render pages and pass necessary python parameters to them.
from django.shortcuts import render
from django.views.generic import TemplateView # Import TemplateView
from django.http import HttpResponse
from pathfinder.models import characterTable
from pathfinder.models import monsterTable
from django.shortcuts import get_object_or_404
#function to add a character string to our characterTable model in the database
#this implementation based off of Maddie Graham's solution on StackOverflow: https://stackoverflow.com/a/59079292/12352379
def addCharacter(sUserID, sPlayerName, sRace, sPlayerClass, sStr, sDex, sCon, sInt, sWis, sCha):
c = characterTable()
c.userID=sUserID
c.playerName = sPlayerName
c.race = sRace
c.playerClass = sPlayerClass
c.strength = sStr
c.dexterity = sDex
c.constitution = sCon
c.intelligence = sInt
c.wisdom = sWis
c.charisma = sCha
c.save()
# Add the two views we have been talking about all this time :)
class HomePageView(TemplateView):
template_name = "index.html"
class AboutPageView(TemplateView):
template_name = "about.html"
class CharacterCreatorView(TemplateView):
template_name = "characterCreator.html"
# this solution courtesy of Eliakin Costa on StackOverflow: https://stackoverflow.com/a/59112612/12352379
def post(self, request, *args, **kwargs):
userID = 'testUser'
addCharacter(
userID,
str(request.POST.get('characterName')),
str(request.POST.get('race')),
str(request.POST.get('class')),
str(request.POST.get('strength')),
str(request.POST.get('dexterity')),
str(request.POST.get('constitution')),
str(request.POST.get('intelligence')),
str(request.POST.get('wisdom')),
str(request.POST.get('charisma'))
)
return render(request, self.template_name, {})
class battleSimView(TemplateView):
template_name = "battleSim.html"
def get(self, request, *args, **kwargs):
character = characterTable.objects.all() # use filter() when you have sth to filter ;)
monster = monsterTable.objects.all()
return render(request, self.template_name, {'characters':character, 'monsters':monster},)
def post(self, request, *args, **kwargs):
characterID = str(request.POST.get('character_id'))
monsterID = str(request.POST.get('monster_id'))
playerHP = str(request.POST.get('player_hp'))
weapon = str(request.POST.get('weapon'))
level = str(request.POST.get('level'))
playerName = characterTable.objects.filter(playerName = characterID).first().playerName
race = characterTable.objects.filter(playerName = characterID).first().race
playerClass = characterTable.objects.filter(playerName = characterID).first().playerClass
strength = characterTable.objects.filter(playerName = characterID).first().strength
dexterity = characterTable.objects.filter(playerName = characterID).first().dexterity
constitution = characterTable.objects.filter(playerName = characterID).first().constitution
intelligence = characterTable.objects.filter(playerName = characterID).first().intelligence
wisdom = characterTable.objects.filter(playerName = characterID).first().wisdom
charisma = characterTable.objects.filter(playerName = characterID).first().charisma
monsterName = monsterTable.objects.filter(monsterName = monsterID).first().monsterName
monsterHP = monsterTable.objects.filter(monsterName = monsterID).first().monsterHP
monsterAC = monsterTable.objects.filter(monsterName = monsterID).first().monsterAC
special = monsterTable.objects.filter(monsterName = monsterID).first().special
monsterCR = monsterTable.objects.filter(monsterName = monsterID).first().monsterCR
return render(request, 'battle.html',{'playerName':playerName,'race': race,'playerClass': playerClass, "strength": strength, "dexterity": dexterity, "constitution": constitution,'intelligence': intelligence,'wisdom': wisdom, 'charisma': charisma, 'playerHP': playerHP, 'monsterName':monsterName,'monsterHP': monsterHP, 'monsterAC': monsterAC, 'monsterCR': monsterCR, 'special': special, 'weapon': weapon, 'level': level})
class beginnersGuideView(TemplateView):
template_name = "beginnersGuide.html"
class infoView(TemplateView):
template_name = "info.html"
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('localization')
from localization import *
from localization.bag import get_dict
from assignment_3.geometry import *
from assignment_4.laser import *
from math import pi
import tf
from tf.transformations import euler_from_quaternion
import argparse
import rospy
from sensor_msgs.msg import *
from nav_msgs.msg import *
from geometry_msgs.msg import *
# Parse Args
parser = argparse.ArgumentParser(description='Pose Scorer')
parser.add_argument('mapbag')
parser.add_argument('databag')
args = parser.parse_args()
# Get Data From Bag Files
the_map = get_dict( args.mapbag )['/map']
test_files = get_dict( args.databag )
scan = test_files['/base_scan']
truth = test_files['/base_pose_ground_truth']
pose = truth.pose.pose
true_pos = pose.position.x, pose.position.y, euler_from_quaternion((pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w))[2]
print "True Position:", true_pos
scan2 = LaserScan()
scan2.header = scan.header
scan2.angle_min = scan.angle_min
scan2.angle_max = scan.angle_max
scan2.angle_increment = scan.angle_increment
scan2.range_max = scan.range_max
rospy.init_node('query')
mpub = rospy.Publisher('/map', OccupancyGrid, latch=True, queue_size=10)
mpub.publish(the_map)
pub_true = rospy.Publisher('/base_scan', LaserScan, queue_size=10)
pub_expected = rospy.Publisher('/base_scan_expected', LaserScan, queue_size=10)
tposepub = rospy.Publisher('/truth', PoseStamped, latch=True, queue_size=10)
truth = PoseStamped()
truth.header.frame_id = '/map'
truth.pose = apply(to_pose, true_pos)
posepub = rospy.Publisher('/estimate', PoseStamped, queue_size=10)
estimate = PoseStamped()
estimate.header.frame_id = '/map'
rospy.sleep(1)
tposepub.publish(truth)
br = tf.TransformBroadcaster()
publish_update(pub_true, scan, br, true_pos)
def pose_sub(msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
q = msg.pose.pose.orientation
theta = euler_from_quaternion((q.x, q.y, q.z, q.w))[2]
result = to_grid(x,y, the_map.info.origin.position.x, the_map.info.origin.position.y, the_map.info.width, the_map.info.height, the_map.info.resolution)
if not result:
print "INVALID"
return
else:
mx, my = result
ex_scan = expected_scan(mx, my, theta, scan.angle_min, scan.angle_increment, len(scan.ranges), scan.range_max, the_map)
scan2.ranges = ex_scan
(wx, wy) = to_world(mx, my, the_map.info.origin.position.x, the_map.info.origin.position.y, the_map.info.width, the_map.info.height, the_map.info.resolution)
publish_update(pub_expected, scan2, br, (wx,wy,theta))
estimate.pose = apply(to_pose, (wx,wy,theta))
posepub.publish(estimate)
score = scan_similarity(scan.ranges, ex_scan, scan.range_max)
print "Score: " + str(score)
sub = rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, pose_sub)
rospy.spin()
|
import sys
levens = 6
woord = "pythonp"
te_raden = list(woord)
geraden = list("_"*len(woord))
def antwoord():
result = ""
while result == "_" or len(result) != 1 or result.isdecimal():
result = input("Geef mij een letter: ")
return result
while levens > 0:
print("Geraden woord: ", "".join(geraden))
gekozen_letter = antwoord()
if gekozen_letter in te_raden:
index = te_raden.index(gekozen_letter)
geraden[index] = gekozen_letter
te_raden[index] = "_"
print("Goed zo")
else:
levens -= 1
print("Fout, levens: ", levens)
if geraden == list(woord):
print("Gewonnen :-)")
sys.exit(0)
print("Verloren :-(")
|
import numpy as np
def Mutate(chromosome, mutationProbability):
"Mutates the chromosome"
nGenes = chromosome.size
mutatedChromosome = chromosome.copy()
for i in range(nGenes):
r = np.random.rand()
if r < mutationProbability:
mutatedChromosome[i] = 1 - chromosome[i]
return mutatedChromosome
|
# -*- coding: utf-8 -*-
import os
import logging
import time
import thread
from woof.transactions import TransactionLogger
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
filename='/tmp/kafkalog',
level=logging.INFO
)
logger = logging.getLogger('kafka')
logger.setLevel(logging.INFO)
srv = os.getenv("GOMSG_SRV", "localhost:9092")
stime = time.time()
# Instantiate
# Should be a long lived object
# async would be True for performance, if needed
# but in fringe cases if there is a restart, msg might not be deliverd
tr = TransactionLogger(srv, "dummy_vertical1", is_async=False)
print "Time taken for connection: ", time.time() - stime
def thread_test():
stime = time.time()
tr.New(txn_id="gofld3434",
amount=3500,
skus=["vcid_1", "vhid_1"],
detail="{'foo':'bar'}",
userid= u'मेरा नाम',
email="r1@gmail.com",
phone="8984758345345")
print "Time taken to send one message: ", time.time() - stime
# Modify
tr.Modify(txn_id="gofld3434",
amount=4000,
detail="{'foo':'bar', 'foo1':'bar1'}",
phone="8984758345345")
print "sent modify"
# Cancel
tr.Cancel(txn_id="gofld3434",
phone="8984758345345")
print "sent cancel"
# Fulfil
tr.Fulfil(txn_id="gofld3434",
skus=[u'aaaàçççñññ'],
userid='मेरा नाम',
phone="8984758345345")
print "fulfil"
for i in range(2):
thread.start_new_thread(thread_test,())
# sleep to allow msg to go
time.sleep(60)
|
from random import randint
import random
class Tree:
def __init__(self, parent=None, name=''):
self.name = name
self.parent = parent
self.value = 0
self.children =[]
self.probability = round(random.random(), 2)
self.probabilityGivenOne = round(random.random(), 2) # based on the parent value
self.probabilityGivenZero = round(random.random(), 2) # based on the parent value
def setProbabilityGivenOne(self, probability):
self.probabilityGivenOne = probability
def setProbabilityGivenZero(self, probability):
self.probabilityGivenZero = probability
def getJSONFormat(self):
result = {"name": self.name, "children": []}
for child in self.children:
result["children"].append(child.getJSONFormat())
return result
def getChildren(self):
return self.children
def setParent(self, parent):
self.parent = parent
def isRoot(self):
return self.parent == None
def getName(self):
return self.name
def addChild(self, child):
self.children.append(child)
def getProbability(self):
if self.parent is None:
return self.probability
if self.parent.value == 0:
return self.probabilityGivenZero
else:
return self.probabilityGivenOne
def setValue(self, value):
self.value = value
def getValue(self):
return self.value |
import csvReader
def testIsJustNumbersOnNumbers():
listOfStrings = ['22.4', '23.9']
assert csvReader.isJustNumbers(listOfStrings)
def testIsJustNumbersOnBadNumbers():
listOfStrings = ['abc', '23.9']
assert csvReader.isJustNumbers(listOfStrings) == False
def testGetNumbers():
listOfStrings = ['22.4', '23.9']
assert csvReader.getNumbers(listOfStrings) == [22.4, 23.9]
def testAverage():
listOfNumbers = [1,2.3]
assert csvReader.average(listOfNumbers) == 2.0 |
# coding: utf-8
import sys
import os
import re
import random
import time
import urllib
from sklearn.cluster import AffinityPropagation, MeanShift, KMeans, Birch
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import numpy as np
from collections import Counter
class Sekitei:
def __init__(self):
self.proba = {}
self.quota = {}
self.is_taken = {}
self.keys = []
self.cluster_expressions = {}
self.delta = {}
self.model = None
self.check_functions = []
self.parameters = []
def _segments(self, segments, param):
if len(segments) == param['n']:
return True
else:
return False
def _param(self, segments, param):
if re.search('[\?&]' + param['p'] + '([\&\/].*)?$', url) is not None:
return True
else:
return False
def _param_name(self, segments, param):
if re.search('[\?&]' + param['p'] + '=', url) is not None:
return True
else:
return False
def _segment_name(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if segments[param['i']] == param['s']:
return True
else:
return False
def _segment_09(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if segments[param['i']].isdigit():
return True
else:
return False
def _segment_substr_09(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if re.search('[^\d]+\d+[^\d]+$', segments[param['i']]) is not None:
return True
else:
return False
def _segment_ext(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if re.search('\.' + param['ext'] + '$', segments[param['i']]) is not None:
return True
else:
return False
def _segment_ext_substr_09(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if re.search('\.' + param['ext'] + '$', segments[param['i']]) is not None and re.search('[^\d]+\d+[^\d]+$', segments[param['i']]) is not None:
return True
else:
return False
def _segment_len(self, segments, param):
if len(segments) <= param['i']:
return False
pos = segments[param['i']].find('?')
if pos != -1:
segments[param['i']] = segments[param['i']][:pos]
if len(segments[param['i']]) == param['L']:
return True
else:
return False
def init_one(self, feature):
m = re.match('segments:([0-9]+)$', feature)
if m is not None:
return self._segments, {'n': int(m.groups()[0])}
m = re.match('param:(.*)$', feature)
if m is not None:
return self._param, {'p': m.groups()[0]}
m = re.match('param_name:(.*)$', feature)
if m is not None:
return self._param_name, {'p': m.groups()[0]}
m = re.match('segment_name_([0-9]+):(.*)$', feature)
if m is not None:
return self._segment_name, {'i': int(m.groups()[0]), 's': m.groups()[1]}
m = re.match('segment_\[0\-9\]_([0-9]+):1$', feature)
if m is not None:
return self._segment_09, {'i': int(m.groups()[0])}
m = re.match('segment_substr[0-9]_([0-9]+):1$', feature)
if m is not None:
return self._segment_substr_09, {'i': int(m.groups()[0])}
m = re.match('segment_ext_([0-9]+):(.*)$', feature)
if m is not None:
return self._segment_ext, {'i': int(m.groups()[0]), 'ext': m.groups()[1]}
m = re.match('segment_ext_substr\[0\-9\]_([0-9]+):(.*)$', feature)
if m is not None:
return self._segment_ext_substr_09, {'i': int(m.groups()[0]), 'ext': m.groups()[1]}
m = re.match('segment_len_([0-9]+):([0-9]+)$', feature)
if m is not None:
return self._segment_len, {'i': int(m.groups()[0]), 'L': int(m.groups()[1])}
print('ooops', feature, url)
return False, False
def init_functions(self, keys):
for key in keys:
f, p = self.init_one(key)
self.check_functions.append(f)
self.parameters.append(p)
def check_url(self, url):
N = len(self.keys)
X = np.zeros((1, N))
segments = url.split('/')[3:]
if segments[-1] == '\n':
del segments[-1]
else:
segments[-1] = segments[-1][:-1]
for i in range(len(segments)):
try:
segments[i] = urllib.unquote(segments[i]).decode('cp1251')
except UnicodeDecodeError:
try:
segments[i] = urllib.unquote(segments[i]).decode('utf8')
except UnicodeDecodeError:
pass
for i in range(N):
X[0, i] = self.check_functions[i](segments, self.parameters[i])
return X
sekitei = Sekitei()
def check(feature, url):
segments = url.split('/')[3:]
if segments[-1] == '\n':
del segments[-1]
else:
segments[-1] = segments[-1][:-1]
for i in range(len(segments)):
try:
segments[i] = urllib.unquote(segments[i]).decode('cp1251')
except UnicodeDecodeError:
try:
segments[i] = urllib.unquote(segments[i]).decode('utf8')
except UnicodeDecodeError:
pass
m = re.match('segments:([0-9]+)$', feature)
if m is not None:
n = int(m.groups()[0])
if len(segments) == n:
return True
else:
return False
m = re.match('param:(.*)$', feature)
if m is not None:
if re.search('[\?&]' + m.groups()[0] + '([\&\/].*)?$', url) is not None:
return True
else:
return False
m = re.match('param_name:(.*)$', feature)
if m is not None:
if re.search('[\?&]' + m.groups()[0] + '=', url) is not None:
return True
else:
return False
m = re.match('segment_name_([0-9]+):(.*)$', feature)
if m is not None:
i = int(m.groups()[0])
s = m.groups()[1]
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if segments[i] == s:
return True
else:
return False
m = re.match('segment_\[0\-9\]_([0-9]+):1$', feature)
if m is not None:
i = int(m.groups()[0])
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if segments[i].isdigit():
return True
else:
return False
m = re.match('segment_substr[0-9]_([0-9]+):1$', feature)
if m is not None:
i = int(m.groups()[0])
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if re.search('[^\d]+\d+[^\d]+$', segments[i]) is not None:
return True
else:
return False
m = re.match('segment_ext_([0-9]+):(.*)$', feature)
if m is not None:
i = int(m.groups()[0])
ext = m.groups()[1]
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if re.search('\.' + ext + '$', segments[i]) is not None:
return True
else:
return False
m = re.match('segment_ext_substr\[0\-9\]_([0-9]+):(.*)$', feature)
if m is not None:
i = int(m.groups()[0])
ext = m.groups()[1]
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if re.search('\.' + ext + '$', segments[i]) is not None and re.search('[^\d]+\d+[^\d]+$', segments[i]) is not None:
return True
else:
return False
m = re.match('segment_len_([0-9]+):([0-9]+)$', feature)
if m is not None:
i = int(m.groups()[0])
L = int(m.groups()[1])
if len(segments) <= i:
return False
pos = segments[i].find('?')
if pos != -1:
segments[i] = segments[i][:pos]
if len(segments[i]) == L:
return True
else:
return False
#print('ooops', feature, url)
return False
def extract_features(URLS):
result = Counter()
X_ = {}
for line in URLS:
X_[line] = []
segments = line.split('/')[3:]
if segments[-1] == '\n':
del segments[-1]
else:
segments[-1] = segments[-1][:-1]
result['segments:' + str(len(segments))] += 1
X_[line].append('segments:' + str(len(segments)))
if (len(segments) == 0):
continue
for i in range(len(segments)):
segment = segments[i]
try:
segment = urllib.unquote(segment).decode('cp1251')
except UnicodeDecodeError:
try:
segment = urllib.unquote(segment).decode('utf8')
except UnicodeDecodeError:
pass
if '?' in segment:
mb_par = segment.split('?')
params = mb_par[1].split('&')
for p in params:
result['param:' + p] += 1
X_[line].append('param:' + p)
result['param_name:' + p.split('=')[0]] += 1
X_[line].append('param_name:' + p.split('=')[0])
segment = mb_par[0]
result['segment_name_' + str(i) + ':' + segment] += 1
X_[line].append('segment_name_' + str(i) + ':' + segment)
if segment.isdigit():
result['segment_[0-9]_' + str(i) + ':1'] += 1
X_[line].append('segment_[0-9]_' + str(i) + ':1')
if re.search('[^\d]+\d+[^\d]+$', segment) is not None:
result['segment_substr[0-9]_' + str(i) + ':1'] += 1
X_[line].append('segment_substr[0-9]_' + str(i) + ':1')
ext = segment.split('.')
if len(ext) > 1:
result['segment_ext_' + str(i) + ':' + ext[-1]] += 1
X_[line].append('segment_ext_' + str(i) + ':' + ext[-1])
if len(ext) > 1 and re.search('[^\d]+\d+[^\d]+$', segment) is not None:
result['segment_ext_substr[0-9]_' + str(i) + ':' + ext[-1]] += 1
X_[line].append('segment_ext_substr[0-9]_' + str(i) + ':' + ext[-1])
result['segment_len_' + str(i) + ':' + str(len(segment))] += 1
X_[line].append('segment_len_' + str(i) + ':' + str(len(segment)))
for key in result.keys():
if result[key] > 100:
sekitei.keys.append(key)
sekitei.init_functions(sekitei.keys)
#print keys
X = np.zeros((len(URLS), len(sekitei.keys)))
for j, url in enumerate(URLS):
x = sekitei.check_url(url)
for i, key in enumerate(sekitei.keys):
if check(key, url):
X[j, i] = 1
if (x != X[j, :]).any():
print(x, X[j, :])
'''
if (key in X_[url]) != X[j, i]:
print('fuck', key, url, X[j, i], key in X_[url])
'''
return X
def give_vector(url):
X = np.zeros((1, len(sekitei.keys)))
for i, key in enumerate(sekitei.keys):
if check(key, url):
X[0, i] = 1
return X
def define_segments(QLINK_URLS, UNKNOWN_URLS, QUOTA):
sekitei.proba = {}
sekitei.quota = {}
sekitei.is_taken = {}
sekitei.keys = []
sekitei.cluster_expressions = {}
sekitei.delta = {}
sekitei.model = Pipeline([('scaler', StandardScaler()),
('clustering', Birch(n_clusters=20, threshold=0.1))])
sekitei.check_functions = []
sekitei.parameters = []
URLS = QLINK_URLS + UNKNOWN_URLS
X = extract_features(URLS)
'''
for i in range(len(URLS)):
for j in range(len(sekitei.keys)):
if X[i, j] != check(sekitei.keys[j], URLS[i]):
print(sekitei.keys[j], URLS[i])
'''
y = np.zeros((len(QLINK_URLS) + len(UNKNOWN_URLS)))
y[:len(QLINK_URLS)] = 1
clusters = sekitei.model.fit_predict(X)
un_clusters, counts = np.unique(clusters, return_counts=True)
for cluster, count in np.dstack((un_clusters, counts))[0]:
sekitei.proba[cluster] = np.sum(y[clusters == cluster]) / count
sekitei.is_taken[cluster] = 0
#sekitei.quota[cluster] = np.ceil(QUOTA * np.sum(y[clusters == cluster]) / len(QLINK_URLS))
k = 1.5
min_quota = QUOTA / len(QLINK_URLS) * k
#sekitei.quota[cluster] = np.ceil(k * np.sum(y[clusters == cluster]) + (QUOTA - k * np.sum(y)) * np.sum(1 - y[clusters == cluster]) / np.sum(1 - y))
sekitei.quota[cluster] = min_quota * np.sum(y[clusters == cluster])
sekitei.cluster_expressions[cluster] = np.mean(X[clusters == cluster], axis=0) > 0.5
sekitei.delta[cluster] = np.ceil(np.sum(np.abs(np.mean(X[clusters == cluster], axis=0) - sekitei.cluster_expressions[cluster])))
#print(sekitei.delta)
def reg_predict(X):
D = len(sekitei.cluster_expressions.keys())
cl = -1
for cluster, regs in sekitei.cluster_expressions.items():
#print(np.sum(np.abs(X - regs)), cluster)
if np.sum(np.abs(X - regs)) == 0:
return cluster
elif np.sum(np.abs(X - regs)) < D:
D = np.sum(np.abs(X - regs))
cl = cluster
if cl != -1:
if D <= sekitei.delta[cl]:
return cl
return -1
#
# returns True if need to fetch url
#
def fetch_url(url):
#global sekitei
#return sekitei.fetch_url(url);
X = give_vector(url)
#y = sekitei.predict(X)[0]
y = reg_predict(X)
if y == -1:
return False
if sekitei.is_taken[y] >= sekitei.quota[y]:
return False
sekitei.is_taken[y] += 1
return True
|
class Solution:
def findMin(self, nums: List[int]) -> int:
res=nums[0]
for i in nums:
if i < res:
return i
return res |
from flask import Flask, redirect, url_for, request,render_template
app = Flask(__name__)
n=''
def print(*args):
global n
for i in range(len(args)):
if i>0:
n+=','
n+=str(args[i])
n+='\n'
#@app.route('/success/<name>')
def success(name):
global n
n=''
# x=exec('x=2\ny=3\nPrint(x+y)')
# print(x)
# print(5,Print(5+6))
try:
exec(name)
except Exception as e:
n = e
return '>>> %s' %(n)
@app.route('/login',methods = ['POST', 'GET'])
def login():
if request.method == 'POST':
user = request.form['nm']
success(user)
return render_template('login.html', run=n,user=user)
else:
user = request.args.get('nm')
return render_template('login.html', run=n,user=user)
if __name__ == '__main__':
app.run(debug = True) |
from django.shortcuts import render
from blogs.models import Blog
def index(request):
blogs = Blog.objects.order_by('-post_date')[:8]
context = {
'blogs': blogs
}
return render(request, 'pages/index.html', context)
def about(request):
return render(request, 'pages/about.html')
|
#!/usr/bin/env python
PACKAGE = "openpose_ros_node_cfg"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("show_skeleton", bool_t, 0, "Boolean wether to show the openpose skeleton", True)
gen.add("show_bbox", bool_t, 0, "True to visualize bounding box around detected persons, show_skeleton must be true", True)
exit(gen.generate(PACKAGE, "openpose_ros_node", "openpose_ros")) |
import requests
import json
class yandexTranslateApi:
def __init__(self,token):
self.__token=token
self.__get_directions_url="https://translate.yandex.net/api/v1.5/tr.json/getLangs?key="
self.__direct_translate_url="https://translate.yandex.net/api/v1.5/tr.json/translate?key="
self.__detect_language_url="https://translate.yandex.net/api/v1.5/tr.json/detect?key="
def update_token(self,new_token):
self.__token=new_token
# Get all translation direction for language in language code
# Return a tuple because Api don`t return status code in the answer
def get_directions_code(self,language_code):
"""
language_codes are here:
https://tech.yandex.com/translate/doc/dg/concepts/api-overview-docpage/
"""
url=self.__get_directions_url+self.__token+"&ui={0}".format(language_code)
result=requests.get(url)
resultJson=result.json()
if result.status_code==200:
return (result.status_code,resultJson['langs'])
else:
return (result.status_code,resultJson)
def direct_traslate(self,from_language_code,to_language_code,text_to_translate):
url=self.__direct_translate_url+self.__token+"&lang={0}-{1}".format(from_language_code,to_language_code)
return self.__translate(url,text_to_translate)
def auto_detect_translate(self,to_language_code,text_to_translate):
url=self.__direct_translate_url+self.__token+"&lang={0}".format(to_language_code)
return self.__translate(url,text_to_translate)
#Translate text direct or auto detect via url
def __translate(self,url,text_to_translate):
requst_body={'text':text_to_translate}
result=requests.post(url,data=requst_body)
return result.json()
def detect_language(self,text_to_detect,*args_lagnage_codes):
url=self.__detect_language_url+self.__token
if len(args_lagnage_codes)!=0:
hint="&hint="
for language_code in args_lagnage_codes:
hint+="{0},".format(language_code)
hint=hint[0:-1]
url+=hint
request_body={'text':text_to_detect}
result=requests.post(url,data=request_body)
return result.json()
if __name__=="__main__":
translator=yandexTranslateApi("token")
print(translator.get_directions_code('ru'))
print(translator.direct_traslate("ru","uk","Привет мир!"))
print(translator.auto_detect_translate('az','Привет мир!'))
print(translator.detect_language("Hello world"))
|
# DoDirectory.py
#
#CheckIfExists 160306
#Create 160306 */ |
import pygame, sys, time, random
from pygame.locals import *
from Class_Button import button
# key description
kdc = '''
Key description:
press key A to move the red car to racetrack 1
press key D to move the red car to racetrack 2
press key < to move the yellow car to racetrack 3
press key > to move the yellow car to racetrack 4
press key R to initialization the game (replay / start the game)
'''
print(kdc)
# read MaxScore file
with open("MaxScore.dc", "r") as maxfile:
maxscore = maxfile.readlines()
maxscore = maxscore[0]
# ready
pygame.init()
screen = pygame.display.set_mode((600, 400))
screen.fill((255, 255, 255))
black = (0, 0, 0)
white = (255, 255, 255)
pygame.display.set_caption("Double Car by AbsoCube --version 1.5")
icon = pygame.image.load("racing_flag.ico")
pygame.display.set_icon(icon)
car1 = pygame.image.load("Red.png")
car2 = pygame.image.load("Yellow.png")
AC = pygame.image.load("AbsoCube.jpg")
bg = pygame.image.load("Racetrack.png")
bg = pygame.transform.smoothscale(bg, (600, 400))
rb = pygame.image.load("RB.png")
rb = pygame.transform.smoothscale(rb, (50, 50))
bb = pygame.image.load("BB.png")
bb = pygame.transform.smoothscale(bb, (50, 50))
BGM = 'Adventure.mp3'
pop = 'Pop.mp3'
pygame.mixer.init(frequency=44100)
tfont1 = pygame.font.Font("msyh.ttc", 80)
tfont2 = pygame.font.Font("msyh.ttc", 50)
bfont = pygame.font.Font("msyh.ttc", 25)
sfont = pygame.font.Font("msyh.ttc", 60)
srect = screen.get_rect()
title1 = tfont1.render('Double Car', True, black)
t1rect = title1.get_rect()
t1rect.centerx = srect.centerx
t1rect.centery = 100
title2 = tfont2.render('by AbsoCube', True, black)
t2rect = title2.get_rect()
t2rect.centerx = srect.centerx
t2rect.centery = 200
start = button(50, 270, 500, 35, "Play", bfont, (255, 0, 0), white)
back = button(50, 270, 500, 35, "Back", bfont, (0, 255, 0), white)
RT1 = 1
RT1pos = 75
RT2 = 3
RT2pos = 225
point = time.time()
score = 0
roadblocks = []
effect = None
stop = True
over = False
old = False
epoint = time.time()
def blitcar(car, rtp):
rect = car.get_rect()
rect.centerx = rtp
rect.centery = 320
screen.blit(car, rect)
def random_roadblock():
global roadblocks
red = random.randint(0, 2)
blue = random.randint(0, 2)
colors = [[rb, red], [bb, blue]]
totalpos = []
for rbcolor in colors:
oldposes = []
for i in range(1, rbcolor[1]+1):
while True:
conflag = False
pos = random.randint(1, 4)
if pos not in oldposes and pos not in totalpos:
for oldpos in oldposes:
if pos+oldpos == 3 or pos+oldpos == 7:
conflag = True
if conflag:
continue
roadblocks.append({'color': rbcolor[0], 'dis': 0, 'rt': pos})
oldposes.append(pos)
totalpos.append(pos)
break
def initialization():
global stop, over, RT1, RT2, point, roadblocks, score, old, effect
pygame.mixer.music.stop()
stop = False
over = False
old = True
effect = None
RT1 = 1
RT2 = 3
point = time.time()
roadblocks = []
score = 0
# show LOGO
screen.fill((255, 255, 255))
screen.blit(AC, (172, 72))
pygame.display.update()
time.sleep(3)
# main programme begin
while True:
# handle input
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif start.pressed(event) and stop:
initialization()
elif back.pressed(event) and over:
over = False
stop = True
old = False
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
if not effect:
if keys[K_a]:
RT1 = 1
elif keys[K_d]:
RT1 = 2
if keys[K_LEFT]:
RT2 = 3
elif keys[K_RIGHT]:
RT2 = 4
if keys[K_r]:
initialization()
# show background
screen.blit(bg, (0, 0))
if stop:
# game's cover
screen.blit(title1, t1rect)
screen.blit(title2, t2rect)
start.show(screen)
if not old:
# game BGM
pygame.mixer.music.stop()
pygame.mixer.music.load(BGM)
pygame.mixer.music.play()
old = True
elif not stop and not over and not effect:
# main game logic
# move car(player)
if RT1*150-75 > RT1pos:
RT1pos += 5
elif RT1*150-75 < RT1pos:
RT1pos -= 5
if RT2*150-75 > RT2pos:
RT2pos += 5
elif RT2*150-75 < RT2pos:
RT2pos -= 5
# show car(player)
blitcar(car1, RT1pos)
blitcar(car2, RT2pos)
# create roadblock
if time.time()-point >= 0.8:
point = time.time()
random_roadblock()
# show & move roadblock
for roadblock in roadblocks:
brect = roadblock['color'].get_rect()
brect.bottom = int(roadblock['dis'])
brect.centerx = roadblock['rt']*150-75
screen.blit(roadblock['color'], brect)
roadblock['dis'] += 1.5
# hit?
o = -1
for roadblock in roadblocks:
o += 1
crect = car1.get_rect()
if 320+crect.height//2+25 >= roadblock['dis'] >= 320-crect.height//2+25:
# hit red roadblock
if roadblock['color'] == rb and roadblock['rt'] in [RT1, RT2]:
effect = roadblocks[o]
epoint = time.time()
del roadblocks[o]
o -= 1
# miss & get blue roadblock
if roadblock['color'] == bb:
if roadblock['rt'] not in [RT1, RT2] and 320+crect.height//2 <= roadblock['dis']:
effect = roadblocks[o]
epoint = time.time()
del roadblocks[o]
o -= 1
elif 320+crect.height//2-25 >= roadblock['dis'] >= 320-crect.height//2+25:
if roadblock['rt'] in [RT1, RT2]:
del roadblocks[o]
o -= 1
score += 1
# sound effect
pygame.mixer.music.load(pop)
pygame.mixer.music.play()
# touch edge
if roadblock['dis'] >= 450:
del roadblocks[o]
o -= 1
# show score
scoretext = tfont2.render(str(score), True, black)
scorerect = scoretext.get_rect()
scorerect.top = srect.top
scorerect.centerx = srect.centerx
screen.blit(scoretext, scorerect)
elif effect:
# death effect
blitcar(car1, RT1pos)
blitcar(car2, RT2pos)
for roadblock in roadblocks:
brect = roadblock['color'].get_rect()
brect.bottom = int(roadblock['dis'])
brect.centerx = roadblock['rt'] * 150 - 75
screen.blit(roadblock['color'], brect)
if (time.time()-epoint)//0.3 % 2 == 1:
brect = effect['color'].get_rect()
brect.bottom = int(effect['dis'])
brect.centerx = effect['rt'] * 150 - 75
screen.blit(effect['color'], brect)
if time.time()-epoint >= 3:
over = True
effect = None
elif over:
# screen when game is over
overtitle = sfont.render('You got '+str(score)+' scores!', True, black)
otrect = overtitle.get_rect()
otrect.centery = 100
otrect.centerx = srect.centerx
screen.blit(overtitle, otrect)
# update max score
with open("MaxScore.dc", "w") as maxfile:
if score > int(maxscore):
maxfile.write(str(score))
maxscore = str(score)
else:
maxfile.write(maxscore)
maxtext = tfont2.render('max: '+maxscore, True, black)
mtrect = maxtext.get_rect()
mtrect.centerx = srect.centerx
mtrect.top = otrect.bottom
screen.blit(maxtext, mtrect)
back.show(screen)
pygame.display.update()
|
from datetime import datetime, timezone
import pytest
from website_monitor.status import Status
class TestStatus:
"""
Test the Status dataclass.
"""
def test_parsed_timestamp(self):
status = Status('http://www.ya.ru', '2021-02-10T18:04:28.023922+00:00', 200, 0.358636, True)
assert status.parsed_timestamp == datetime(2021, 2, 10, 18, 4, 28, 23922, tzinfo=timezone.utc)
def test_parsed_timestamp_fail(self):
status = Status('http://www.ya.ru', 'WRONG', 200, 0.358636, True)
with pytest.raises(ValueError):
status.parsed_timestamp
|
import json
rapper_dict = {'first': 'Marshall', 'last': 'Mathers'}
rapper_dict['City'] = 'Detroit'
rap_dict = {}
rap_dict['Best Rapper Ever'] = rapper_dict
rap_json = json.dumps(rap_dict)
print()
print(rap_json)
songs = ['Lose Yourself', 'Without Me', 'I Will']
rapper_dict['songs'] = songs
rapper_json = json.dumps(rapper_dict)
print()
print(rapper_json)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-01 12:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CIStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CISystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('url', models.CharField(max_length=255)),
('login', models.CharField(blank=True, max_length=255, null=True)),
('api_key', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('change_ci_status', models.BooleanField(default=False)),
('name', models.CharField(max_length=255)),
('triggered_by', models.CharField(choices=[(b'Timer', b'Timer'), (b'Gerrit trigger', b'Gerrit trigger'), (b'Manual', b'Manual'), (b'Any', b'Any')], max_length=30, null=True)),
('gerrit_branch', models.CharField(max_length=255, null=True)),
('gerrit_refspec', models.CharField(max_length=255, null=True)),
('ci_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='cidashboard.CISystem')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='JobResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('build_id', models.IntegerField(null=True)),
('result', models.CharField(default=b'SKIPPED', max_length=10)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='cidashboard.Job')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=255)),
('jobs', models.ManyToManyField(to='cidashboard.Job')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('jobs_results', models.ManyToManyField(to='cidashboard.JobResult')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='cidashboard.Product')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='View',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('change_ci_status', models.BooleanField(default=False)),
('name', models.CharField(max_length=255)),
('ci_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='views', to='cidashboard.CISystem')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='product',
name='views',
field=models.ManyToManyField(to='cidashboard.View'),
),
migrations.AddField(
model_name='cistatus',
name='ci_system',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='cidashboard.CISystem'),
),
migrations.AddField(
model_name='cistatus',
name='jobs_results',
field=models.ManyToManyField(to='cidashboard.JobResult'),
),
]
|
#!/usr/bin/env python
import rospy
import roslib
from geometry_msgs.msg import Point
import tf
from aruco_msgs.msg import MarkerArray
from std_msgs.msg import Float64
class get_pose():
def __init__(self):
rospy.init_node('get_pose',anonymous=False)
self.aruco_marker = {}
self.cam_pose = Point()
self.posepub = rospy.Publisher('/statespecs/pose', Point, queue_size=10)
rospy.Subscriber('/aruco_marker_publisher/markers',MarkerArray,self.aruco_data) # Subscribing to topic
rospy.Subscriber('setspecs/id',MarkerArray,self.get_aruco_id) # Subscribing to topic
self.id = 12
self.count = 0
# Callback for aruco marker information
def aruco_data(self, msg):
for i in range(0,len(msg.markers)):
aruco_id = msg.markers[i].id
pose_x = -round(msg.markers[i].pose.pose.position.x,3)*30
pose_y = -round(msg.markers[i].pose.pose.position.y,3)*30
pose_z = -round(msg.markers[i].pose.pose.position.z,3)*30
self.aruco_marker[aruco_id] = [pose_x,pose_y,pose_z]
try:
self.cam_pose.x = self.aruco_marker[self.id][0]
self.cam_pose.y = self.aruco_marker[self.id][1]
self.cam_pose.z = self.aruco_marker[self.id][2]
self.posepub.publish(self.cam_pose)
except:
print "Next target is not visible"
""" Somehow call aruco_map to get the next target """
# print "\n"
# print "ArUco_ID: ",self.id, "\r"
# self.changeid()
#Callback to get the current aruco ID to localize
def get_aruco_id(self, msg):
self.id = msg.data
def changeid(self):
if (self.count >900):
self.count = 0
if (self.count == 300):
self.id = 256
elif (self.count == 600):
self.id = 320
elif (self.count == 900):
self.id = 0
self.count = self.count + 1
if __name__=="__main__":
marker = get_pose()
while not rospy.is_shutdown():
rospy.spin()
|
"""
Permutations
============
A simple implementation of permutations on `n` elements.
Authors
-------
* Chris Swierczewski (Feb 2014)
"""
class Permutation(object):
"""A permutation on `n` elements.
Methods
-------
is_identity()
Returns `True` if the Permutation is the identity.
index(j)
Representing the Permutation in "map" notation, a list where `i`
is mapped to `j = lst[i]`, returns `i`. That is, the preimage of
`j`.
action(a)
Returns the permutation of an iterable `a` under the action of
the permutation.
inverse()
Returns the inverse of the Permutation.
"""
def __init__(self, l):
"""Construct a Permutation from a list.
There are two ways to constuct a permutation.
1. Permutations can be initialized by a list which is a
permutation of `range(n)` given in "map" notation. That is,
given a list `lst` the permutation constructed maps `i` to
`lst[i]`.
2. Permutations can be initialized by a list representing the
permutation in cycle notation. Fixed cycles must be provided.
Parameters
----------
l : iterable
Either an iterable (list) of integers from `0` to `n-1` or
an iterable of iterables.
Examples
--------
We construct the permutation `p = 0->3, 1->1, 2->0, 3->2` in two
different ways. First, we construct the permutation from a "map".
>>> p = Permutation([3,1,0,2])
>>> print(p)
foo
Second, the same permutation in cycle notation.
>>> q = Permutation([[1], [0,3,2]])
>>> print(q)
foo
>>> p == q
True
"""
if isinstance(l,list):
if isinstance(l[0],list):
l = self._list_from_cycles(l)
self._list = l
else:
# try to turn object into list
self._list = list(l)
self.__init__(l)
self._cycles = self._cycles_from_list(self._list)
self._hash = None
def _list_from_cycles(self, cycles):
"""Create a permutation list `i \to l[i]` from a cycle notation list.
Examples
--------
>>> p = Permutation([[0,1],[2],[3]])
>>> p._list
[1, 0, 2]
>>> q = Permutation([[2,4],[1,3]])
>>> q._list
[2, 3, 0, 1]
"""
degree = max([0] + [max(cycle + [0]) for cycle in cycles]) + 1
l = list(range(degree))
for cycle in cycles:
if not cycle:
continue
first = cycle[0]
for i in range(len(cycle)-1):
l[cycle[i]] = cycle[i+1]
l[cycle[-1]] = first
return l
def _cycles_from_list(self,l):
"""Create a list of cycles from a permutation list."""
n = len(l)
cycles = []
not_visited = list(range(n))[::-1]
while len(not_visited) > 0:
i = not_visited.pop()
cycle = [i]
j = l[i]
while j != i:
cycle.append(j)
not_visited.remove(j)
j = self(j)
cycles.append(tuple(cycle))
return cycles
def __repr__(self):
non_identity_cycles = [c for c in self._cycles if len(c) > 1]
return str(non_identity_cycles)
def __hash__(self):
if self._hash is None:
self._hash = str(self._list).__hash__()
return self._hash
def __len__(self):
return self._list.__len__()
def __getitem__(self, key):
return self._list.__getitem__(key)
def __contains__(self, item):
return self._list.__contains__(item)
def __eq__(self, other):
return self._list == other._list
def __mul__(self, other):
return self.__rmul__(other)
def __rmul__(self, other):
# # pad the permutations if they are of different lengths
# new_other = other[:] + [i+1 for i in range(len(other), len(self))]
# new_p1 = self[:] + [i+1 for i in range(len(self), len(other))]
# return Permutation([new_p1[i-1] for i in new_other])
new_other = other[:] + [i for i in range(len(other), len(self))]
new_p1 = self[:] + [i for i in range(len(self), len(other))]
return Permutation([new_p1[i] for i in new_other])
def __call__(self, i):
"""Returns the image of the integer i under this permutation."""
if isinstance(i,int) and 0 <= i < len(self):
return self[i]
else:
raise TypeError("i (= %s) must be an integer between "
"%s and %s" % (i, 0, len(self) - 1))
def is_identity(self):
"""Returns `True` if permutation is the identity."""
n = len(self._list)
return self._list == list(range(n))
def index(self, j):
"""If `p(i) = j`, returns `i`."""
return self._list.index(j)
def action(self, a):
"""Returns the action of the permutation on an iterable.
Examples
--------
>>> p = Permutation([0,3,1,2])
>>> p.action(['a','b','c','d'])
['a', 'd', 'b', 'c']
"""
if len(a) != len(self):
raise ValueError("len(a) must equal len(self)")
return [a[self[i]] for i in range(len(a))]
def inverse(self):
"""
Returns the inverse permutation.
"""
l = list(range(len(self)))
for i in range(len(self)):
l[self(i)] = i
return Permutation(l)
def matching_permutation(a, b):
"""Returns the permutation `p` mapping the elements of `a` to the
elements of `b`.
This function returns a :class:`Permutation` `p` such that `b ~
p.action(a)` or, equivalently, `norm(b - p.action(a))` is small. The
elements of `a` and `b` need not be exactly the same but close
enough to each other that it's unambiguous which elements match.
Parameters
----------
a,b : iterable
Lists of approximately the same elements.
Returns
-------
Permutation
A Permutation `p` such that `norm(b - p.action(a))` is small.
Examples
--------
If the two lists contain the same elements then
`matching_permutation` simply returns permutation defining the
rearrangement.
>>> a = [6, -5, 9]
>>> b = [9, 6, -5]
>>> p = matching_permutation(a,b); p
[2, 0, 1]
`matching_permutation` will attempt to find such a permutation even
if the elements of the two lists are not exactly the same.
>>> a = [1.1, 7.2, -3.9]
>>> b = [-4, 1, 7]
>>> p = matching_permutation(a,b); p
[2, 0, 1]
>>> p.action(a)
[-3.9, 1.1, 7.2]
"""
N = len(a)
if N != len(b):
raise ValueError("Lists must be of same length.")
perm = [-1]*N
eps = 0.5*min([abs(a[i]-a[j]) for i in range(N) for j in range(i)])
for i in range(N):
for j in range(N):
dist = abs(a[i] - b[j])
if dist < eps:
perm[j] = i
break
if -1 in perm:
raise ValueError("Could not compute matching permutation "
"between %s and %s." % (a, b))
return Permutation(perm)
|
# Generated by Django 3.1.2 on 2021-03-25 06:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0041_remove_product_updated_at'),
]
operations = [
migrations.AlterField(
model_name='category',
name='order',
field=models.IntegerField(default=0),
),
]
|
from gpiozero import *
from picamera import *
from time import *
from guizero import *
def take_picture():
global output
#name of file
output = strftime("/home/pi/mypibooth/image-%d-%m %H:%M:%S.png", gmtime())
#take 3 pics
for i in range(3):
sleep(3)
camera.capture(output)
#GPIO button asignment
take_pic_btn = Button(25)
take_pic_btn.when_pressed = take_picture
#camera settings
camera = PiCamera()
camera.resolution = (1920, 1080)
camera.hflip = True
camera.vflip = True
output = ""
#GUI
app = App("My Pi Booth")
app.attributes("-fullscreen", True)
camera.start_preview(alpha=50)
message = Text(app, "Text above button")
new_pic = PushButton(app, take_picture, text="Text on button")
app.display()
|
# Generated by Django 2.1.8 on 2019-08-09 17:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('locally', '0004_auto_20190810_0103'),
]
operations = [
migrations.RemoveField(
model_name='comment_buy',
name='cbuy',
),
migrations.AddField(
model_name='comment_buy',
name='buy',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comment_buys', to='locally.Buy'),
),
migrations.AddField(
model_name='comment_buy',
name='comment_contents',
field=models.CharField(default='', max_length=20, null=True),
),
]
|
import os
exec(open("_main2.py").read())
db = 1 #REPORTING DATABASE
database = ''
delete_staging = True
print_internal = True
print_details = False
run_warehousing = True
time_type = 'days'
time_unit = 30
#CAN ONLY SEND DATES TO RINGCENTRAL, SO THE TIME COMPONENT NEEDS STRIPPING OUT
start_date = now.replace(hour=0)
start_date = start_date.replace(minute=0)
start_date = start_date.replace(second=0)
start_date = start_date.replace(microsecond=0)
end_date = start_date + datetime.timedelta(days=1.0)
""""""
#start_date = datetime.datetime(2019, 1, 1)
#end_date = datetime.datetime(2019, 12, 3)
##############################################################################################################################################################
##############################################################################################################################################################
###############################################################################RINGCENTRAL
##############################################################################################################################################################
##############################################################################################################################################################
process_list = [
ws_process_class('RINGCENTRAL','agents'),
ws_process_class('RINGCENTRAL','skills'),
ws_process_class('RINGCENTRAL','campaigns'),
ws_process_class('RINGCENTRAL','completedcontacts', True,'RINGCENTRAL_telephony'),
]
#QUERY DATA AND MERGE IT INTO THE BASE TABLES AND TEMP WAREHOUSING TABLES
run_main("RINGCENTRAL", process_list, start_date, end_date, time_type, time_unit, db, database, run_warehousing,
delete_staging, print_internal, print_details)
""""""
"""
generate_creation_query('RINGCENTRAL', 'agents')
"""
##############################################################################################################################################################
##############################################################################################################################################################
###############################################################################NOW ADD TO REPORTING_TEMP TO COVER SOME EXISTING REPORTING
##############################################################################################################################################################
##############################################################################################################################################################
run_warehousing = False
db = 0 #reporting_temp
database = 'reporting_temp'
run_main("RINGCENTRAL", process_list, start_date, end_date, time_type, time_unit, db, database, run_warehousing,
delete_staging, print_internal, print_details)
|
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
from typing import MutableMapping
from typing import Sequence
import botocore
import colorlog
import simplejson as json
from cached_property import timed_cached_property
from clusterman.aws import CACHE_TTL_SECONDS
from clusterman.aws.aws_resource_group import AWSResourceGroup
from clusterman.aws.client import ec2
from clusterman.aws.client import ec2_describe_fleet_instances
from clusterman.aws.markets import get_market
from clusterman.aws.markets import InstanceMarket
from clusterman.exceptions import ResourceGroupError
logger = colorlog.getLogger(__name__)
_CANCELLED_STATES = ('deleted', 'deleted-terminating', 'failed')
class EC2FleetResourceGroup(AWSResourceGroup):
def __init__(self, group_id: str) -> None:
super().__init__(group_id)
# Can't change the WeightedCapacity of EC2Fleets, so cache them here for frequent access
self._market_weights = self._generate_market_weights()
def market_weight(self, market: InstanceMarket) -> float:
return self._market_weights.get(market, 1)
def modify_target_capacity(
self,
target_capacity: float,
*,
dry_run: bool = False,
) -> None:
if self.is_stale:
logger.info(f'Not modifying EC2 fleet since it is in state {self.status}')
return
kwargs = {
'FleetId': self.group_id,
'TargetCapacitySpecification': {
'TotalTargetCapacity': int(target_capacity),
},
'ExcessCapacityTerminationPolicy': 'no-termination',
}
logger.info(f'Modifying spot fleet request with arguments: {kwargs}')
if dry_run:
return
response = ec2.modify_fleet(**kwargs)
if not response['Return']:
logger.critical('Could not change size of spot fleet:\n{resp}'.format(resp=json.dumps(response)))
raise ResourceGroupError('Could not change size of spot fleet: check logs for details')
@timed_cached_property(ttl=CACHE_TTL_SECONDS)
def instance_ids(self) -> Sequence[str]:
""" Responses from this API call are cached to prevent hitting any AWS request limits """
return [instance['InstanceId'] for instance in ec2_describe_fleet_instances(self.group_id)]
@property
def fulfilled_capacity(self) -> float:
return self._configuration['FulfilledCapacity']
@property
def status(self) -> str:
return self._configuration['FleetState']
@property
def is_stale(self) -> bool:
try:
return self.status.startswith('deleted')
except botocore.exceptions.ClientError as e:
if e.response.get('Error', {}).get('Code', 'Unknown') == 'TODO':
return True
raise e
def _generate_market_weights(self) -> Mapping[InstanceMarket, float]:
market_weights: MutableMapping[InstanceMarket, float] = {}
for launch_template_config in self._configuration['LaunchTemplateConfigs']:
instance_type, subnet_id = None, None
for override in launch_template_config['Overrides']:
instance_type = override.get('InstanceType')
subnet_id = override.get('SubnetId')
if not (instance_type and subnet_id):
spec = launch_template_config['LaunchTemplateSpecification']
launch_template_data = ec2.describe_launch_template_versions(
LaunchTemplateId=spec['LaunchTemplateId'],
Versions=[spec['Version']],
)[0]['LaunchTemplateData']
if not instance_type:
instance_type = launch_template_data['InstanceType']
if not subnet_id:
subnet_id = launch_template_data['NetworkInterfaces']['SubnetId']
market_weights[get_market(instance_type, subnet_id)] = override['WeightedCapacity']
return market_weights
@property
def _target_capacity(self) -> float:
return self._configuration['TargetCapacitySpecification']['TotalTargetCapacity']
@timed_cached_property(ttl=CACHE_TTL_SECONDS)
def _configuration(self):
""" Responses from this API call are cached to prevent hitting any AWS request limits """
fleet_configuration = ec2.describe_fleets(FleetIds=[self.group_id])
return fleet_configuration['Fleets'][0]
@classmethod
def _get_resource_group_tags(cls) -> Mapping[str, Mapping[str, str]]:
fleet_id_to_tags = {}
for page in ec2.get_paginator('describe_fleets').paginate():
for fleet in page['Fleets']:
if fleet['FleetState'] in _CANCELLED_STATES:
continue
if 'Tags' in fleet:
tags_dict = {tag['Key']: tag['Value'] for tag in fleet['Tags']}
fleet_id_to_tags[fleet['FleetId']] = tags_dict
return fleet_id_to_tags
|
## This file alters the game described in simpy_rollout_fire_smdp.py
# Here, agents receive a local observation (location,strength,status,interest) for 5 closest fires
# Also, each fire gets random number of UAV-minutes needed to extinguish it, where the mean is a
# function of fire level
# Rewards are equal to the fire level
# Fires are clustered close together, but clusters are far apart
# Also new in this version: Agents take epsilon time between get_obs and take_action so that
# two agents supposed to act at the same time dont end up acting sequentially
# Hold time is now normally distributed so that symmetry is broken
# Fires are penalized 1 for trying to extinguish the same fire
import copy
import math
deg = math.pi/180
from math import ceil
import sys
import itertools
import os.path as osp
import numpy as np
from scipy.stats import truncnorm
#from gym import spaces
from rllab.spaces import Box, Discrete
# from sandbox.rocky.tf.spaces import Box, Discrete
import simpy
from gym.utils import colorize, seeding
from eventdriven.madrl_environments import AbstractMAEnv, Agent
from eventdriven.rltools.util import EzPickle
from eventdriven.EDhelpers import SimPyRollout
from rllab.envs.env_spec import EnvSpec
import pdb
import random
from math import exp
## ENVIRONMENT PARAMETERS
GRID_LIM = 1.0
GAMMA = 0.02 # math.log(0.9)/(-5.)
MAX_SIMTIME = math.log(0.005)/(-GAMMA)
UAV_VELOCITY = 0.015 # m/s
HOLD_TIME = 3. # How long an agent waits when it asks to hold its position
HOLD_TIME_VAR = 0.1*HOLD_TIME
ACTION_WAIT_TIME = 1e-5
UAV_MINS_STD = 0. #1.5
UAV_MINS_AVG = 3.
PRINTING = False
FIRE_DEBUG = False
## --- SIMPY FUNCTIONS
def within_epsilon(arr1,arr2):
return np.linalg.norm( np.array(arr1) - np.array(arr2) ) < 0.001
def distance(arr1,arr2):
return float(np.linalg.norm(np.array(arr1) - np.array(arr2)))
## --- ED Env
class UAV(Agent):
def __init__(self, env, simpy_env, id_num, start_position, goal_position, gamma, policy):
self.env = env
self.simpy_env = simpy_env
self.id_num = id_num
self.gamma = gamma
self.policy = policy
self.observations = []
self.actions = []
self.rewards = []
self.agent_infos = []
self.env_infos = []
self.offset_t_sojourn = []
# Fire Extinguishing specific stuff
self.start_position = start_position
self.goal_position = goal_position
self.action_time = 0.
self.accrued_reward = 0.
fire_dists = [ distance(f.location, self.current_position) for f in self.env.fires ]
closest_five_fires = np.argsort(fire_dists).tolist()[:5]
self.action_map = closest_five_fires
self.fire_attacking = -1
self.fire_interested = -1
return
def sim(self):
obs = self.get_obs()
while(not self.env.done):
yield self.simpy_env.timeout(ACTION_WAIT_TIME) # Forces small gap between get_obs and act
action, agent_info = self.policy(obs)
self.action_event = self.simpy_env.process(self.take_action(action))
try:
yield simpy.AnyOf(self.simpy_env,[self.action_event, self.env.done_event])
except simpy.Interrupt:
pass
reward = self.get_reward()
self.observations.append(self.env.observation_space.flatten(obs))
self.actions.append(self.env.action_space.flatten(action))
self.rewards.append(reward)
self.agent_infos.append(agent_info)
self.env_infos.append({})
obs = self.get_obs()
self.offset_t_sojourn.append(self.env.observation_space.flatten(obs)[-1])
def take_action(self, action):
self.start_position = copy.deepcopy(self.current_position)
self.action_time = self.simpy_env.now
# leave any interest party you were in
if(self.fire_interested != -1):
self.env.fires[self.fire_interested].leave_interest_party(self)
self.fire_interested = -1
hold_current = False
new_goal = None
if action >= 5:
# want to hold
hold_current = True
self.goal_position = copy.deepcopy(self.start_position)
if(PRINTING): print('UAV %d holding at (%.2f, %.2f)' % (self.id_num, self.current_position[0], self.current_position[1]))
# If we're at a fire, join its extinguish party
for i, f in enumerate(self.env.fires):
if within_epsilon(self.current_position, f.location):
f.join_interest_party(self)
self.fire_interested = i
if(self.fire_attacking != i):
f.join_extinguish_party(self)
self.fire_attacking = i
break
yield self.simpy_env.timeout( self.env.fixed_step( HOLD_TIME + HOLD_TIME_VAR*np.random.normal() ))
else:
# assign new goal location, fire interest
fire_ind = self.action_map[action]
self.env.fires[fire_ind].join_interest_party(self)
self.fire_interested = fire_ind
new_goal = copy.deepcopy(self.env.fires[fire_ind].location)
# stop attacking any fire you are attacking
if(self.fire_attacking > -1):
self.env.fires[self.fire_attacking].leave_extinguish_party(self)
self.goal_position = copy.deepcopy(new_goal)
travel_time = np.linalg.norm( np.array(self.goal_position) - np.array(self.start_position) ) / UAV_VELOCITY
if(PRINTING): print('UAV %d is heading from (%.2f, %.2f) to (%.2f, %.2f)' %
(self.id_num, self.start_position[0], self.start_position[1], self.goal_position[0], self.goal_position[1] ))
yield self.simpy_env.timeout(self.env.fixed_step(travel_time))
@property
def time_since_action(self):
return self.simpy_env.now - self.action_time
@property
def current_position(self):
if( within_epsilon(self.start_position, self.goal_position)):
return copy.deepcopy(self.start_position)
# find unit vector in heading direction
unit_vec = np.array(self.goal_position) - np.array(self.start_position)
dist_to_travel = np.linalg.norm(unit_vec)
unit_vec /= dist_to_travel
# find distance travelled
distance_travelled = min(self.time_since_action * UAV_VELOCITY,dist_to_travel)
return ( np.array(self.start_position) + unit_vec * distance_travelled ).tolist()
def get_obs(self):
obs = copy.deepcopy(self.current_position) # own position
# find closest fires
fire_dists = [ distance(f.location, self.current_position) for f in self.env.fires ]
closest_five_fires = np.argsort(fire_dists).tolist()[:5]
self.action_map = closest_five_fires
for f_ind in closest_five_fires:
f = self.env.fires[f_ind]
f_obs = [distance(f.location, self.current_position)]
f_obs += [f.reward, len(f.interest_party)]
f_obs += [1.] if f.status else [0.]
f_obs += [f.uavsecondsleft]
obs += f_obs
obs += [self.time_since_action]
return obs
def get_reward(self):
reward = self.accrued_reward
self.accrued_reward = 0.
return reward
def accrue_reward(self, reward):
if(not self.env.done):
self.accrued_reward += exp(-self.time_since_action * self.gamma) * reward
@property
def observation_space(self):
# Each agent observes:
# Its own x,y coordinates
# For 5 closest fires: location_x, location_y, strength, interest, status, uavsecondsleft
# Its sojourn time
return Box( np.array( [-GRID_LIM] * 2 + # OWN
[0., 0., 0., 0., 0.]*5 + # Fires
[0.] # Sojourn time
),
np.array( [GRID_LIM] * 2 + # OWN
[np.inf, 10., np.inf, 1., np.inf]*5 + # Fires
[np.inf] # Sojourn time
), )
@property
def action_space(self):
# Actions are Fire to go to or STAY
return Discrete( 5 + # Fires
1 ) # stay
class Fire(object):
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
def __init__(self, env, simpy_env, id_num, level, location):
self.env = env
self.simpy_env = simpy_env
self.id_num = id_num
self.location = location
self.status = True
self.extinguish_party = [] # Number of agents trying to extinguish the fire
self.prev_len_extinguish_party = 0
self.last_update_time = simpy_env.now
self.interest_party = []
self.extinguish_event = None
self.time_until_extinguish = np.inf
self.level = level
self.reward = level
if(UAV_MINS_STD > 0):
self.uav_seconds_left = float(truncnorm( -UAV_MINS_AVG*level / UAV_MINS_STD, np.inf).rvs(1))
self.uav_seconds_left = self.uav_seconds_left * UAV_MINS_STD + UAV_MINS_AVG*level
else:
self.uav_seconds_left = UAV_MINS_AVG
if(PRINTING or FIRE_DEBUG):
print('Fire %d has a %.2f UAV seconds left' % (self.id_num, self.uav_seconds_left))
def sim(self):
while(True):
try:
self.extinguish_event = self.simpy_env.process(self.try_to_extinguish())
yield self.extinguish_event
self.extinguish()
break
except simpy.Interrupt:
continue
def try_to_extinguish(self):
yield self.simpy_env.timeout(self.env.fixed_step(self.time_until_extinguish))
@property
def uavsecondsleft(self):
party_size = len(self.extinguish_party)
now = self.simpy_env.now
# decrement uav_seconds_left according to how long its been
# attacked for and by how many agents, since this function
# was last called
time_since_last_update = now - self.last_update_time
decrement = time_since_last_update * party_size
return self.uav_seconds_left - decrement
def update_extinguish_time(self):
party_size = len(self.extinguish_party)
prev_party_size = self.prev_len_extinguish_party
now = self.simpy_env.now
# decrement uav_seconds_left according to how long its been
# attacked for and by how many agents, since this function
# was last called
time_since_last_update = now - self.last_update_time
decrement = time_since_last_update * prev_party_size
# update state vars
self.last_update_time = now
self.prev_len_extinguish_party = party_size
self.uav_seconds_left -= decrement
# update event with new time remaining and new party size
time_to_extinguish = self.uav_seconds_left / party_size if party_size > 0 else np.inf
self.time_until_extinguish = time_to_extinguish
try:
self.extinguish_event.interrupt()
except RuntimeError:
pass
if(FIRE_DEBUG):
print('Fire %d has extinguish party size %d and %.2f UAV seconds left at time %.2f' %
(self.id_num, party_size, self.uav_seconds_left, now))
return
def join_interest_party(self, uav):
if uav not in self.interest_party:
if(PRINTING): print('UAV %d is joining Fire %d interest party at %.2f' % (uav.id_num, self.id_num, self.simpy_env.now))
self.interest_party.append(uav)
def leave_interest_party(self, uav):
if uav in self.interest_party:
if(PRINTING): print('UAV %d is leaving Fire %d interest party at %.2f' % (uav.id_num, self.id_num, self.simpy_env.now))
self.interest_party.remove(uav)
# Adds an agent to the number of agents trying to extinguish the fire
def join_extinguish_party(self, uav):
if(not self.status):
# Extinguished already
return self.extinguish_event
if uav not in self.extinguish_party:
if(PRINTING): print('UAV %d is joining Fire %d extinguishing party at %.2f' % (uav.id_num, self.id_num, self.simpy_env.now))
self.extinguish_party.append(uav)
if len(self.extinguish_party) > 1:
# penalize everyone in the part 1
for uav in self.extinguish_party:
uav.accrue_reward(-20)
if(self.status):
self.update_extinguish_time()
def leave_extinguish_party(self, uav):
if(not self.status):
# Extinguished already
return self.extinguish_event
if uav in self.extinguish_party:
if(PRINTING): print('UAV %d is leaving Fire %d extinguishing party at %.2f' % (uav.id_num, self.id_num, self.simpy_env.now))
self.extinguish_party.remove(uav)
if(self.status):
self.update_extinguish_time()
def extinguish(self):
self.status = False
for a in self.env.env_agents:
# if(a in self.extinguish_party):
# a.accrue_reward(self.reward)
# else:
# a.accrue_reward(self.reward)
a.accrue_reward(self.reward)
# Interrupt action for all agents in your interest party
for a in self.interest_party:
try:
a.action_event.interrupt()
except RuntimeError:
pass
# set event to one that never triggers
self.time_until_extinguish = -1
if(PRINTING or FIRE_DEBUG): print('Fire %d extinguished at %.2f' % (self.id_num, self.simpy_env.now))
# succeed death event
self.env.fire_extinguish_events[self.id_num].succeed()
return
class FireExtinguishingEnv(AbstractMAEnv, EzPickle, SimPyRollout):
def __init__(self, num_agents, num_fire_clusters, num_fires_per_cluster, gamma,
fire_locations = None, start_positions = None, DT = -1):
EzPickle.__init__(self, num_agents, num_fire_clusters, num_fires_per_cluster, gamma,
fire_locations, start_positions, DT)
self.discount = gamma
self.DT = DT
self.n_agents = num_agents
self.n_fires = num_fire_clusters * num_fires_per_cluster
self.num_fire_clusters = num_fire_clusters
self.fire_locations = fire_locations
self.start_positions = start_positions
# Assigned on reset()
self.env_agents = [None for _ in range(self.n_agents)] # NEEDED
self.fires = [None for _ in range(self.n_fires)]
self.simpy_env = None
self.uav_events = [] # checks if a UAV needs to act
self.fire_events = [] # checks if a fire was extinguished
self.done = False
self.seed()
def fixed_step(self, time):
if(np.isinf(time)):
return time
elif(self.DT > 0.):
now = self.simpy_env.now
return max(float(ceil((now + time) / self.DT )) * self.DT - now, 0.0)
else:
return max(time, 0.0)
def reset(self):
# This is a dummy reset just so agent obs/action spaces can be accessed
self.done = False
self.simpy_env = simpy.Environment()
self.fire_extinguish_events = [simpy.Event(self.simpy_env) for i in range(self.n_fires)]
fire_levels = [1]*self.n_fires
# we want to randomize
fire_locations = ( 2.*np.random.random_sample((self.n_fires,2)) - 1.).tolist()
self.fires = [ Fire(self, self.simpy_env, i, fire_levels[i], fl)
for i, fl in enumerate(fire_locations) ]
if self.start_positions is not None:
self.env_agents = [ UAV(self, self.simpy_env, i, sp, sp, self.discount, None) for i,sp in enumerate(self.start_positions) ]
else:
# we want to randomize
start_positions = ( 2.*np.random.random_sample((self.n_agents,2)) - 1.).tolist()
self.env_agents = [ UAV(self, self.simpy_env, i, sp, sp, self.discount, None) for i,sp in enumerate(start_positions) ]
return
def step(self, actions):
raise NotImplementedError
def reset_and_sim(self, policies):
self.simpy_env = simpy.Environment()
self.done = False
self.fire_extinguish_events = [simpy.Event(self.simpy_env) for i in range(self.n_fires)]
if self.fire_locations is True:
# Use presets
assert self.num_fire_clusters == 3, 'Only 3 clusters / fires per cluster implemented right now :('
assert self.n_fires/self.num_fire_clusters == 3, 'Only 3 clusters / fires per cluster implemented right now :('
R = np.array([[np.cos(120*deg),np.sin(-120*deg)],[np.sin(120*deg), np.cos(120*deg)]])
f1 = np.reshape(np.array([-0.01, 1]),(2,1))
f2 = np.reshape(np.array([0.01, 1]),(2,1))
f3 = np.reshape(np.array([0, 1 - 0.02*math.sin(60*deg)]),(2,1))
fire_locations = [f1,f2,f3, R.dot(f1),R.dot(f2),R.dot(f3), R.T.dot(f1),R.T.dot(f2),R.T.dot(f3) ]
fire_locations = [np.reshape(f,(2,)).tolist() for f in fire_locations]
self.fires = [Fire(self,self.simpy_env, i, 1, fl)
for i, fl in enumerate(fire_locations) ]
else:
raise NotImplementedError
# we want to randomize
fire_locations = ( 2.*np.random.random_sample((self.n_fires,2)) - 1.).tolist()
self.fires = [ Fire(self, self.simpy_env, i, fire_levels[i], fl)
for i, fl in enumerate(fire_locations) ]
if self.start_positions is not None:
self.env_agents = [ UAV(self, self.simpy_env, i, sp, sp, self.discount, policies[i]) for i,sp in enumerate(self.start_positions) ]
else:
# we want to randomize
start_positions = ( 2.*np.random.random_sample((self.n_agents,2)) - 1.).tolist()
self.env_agents = [ UAV(self, self.simpy_env, i, sp, sp, self.discount, policies[i]) for i,sp in enumerate(start_positions) ]
# Process all UAVs
agent_events = []
for uav in self.env_agents:
agent_events.append(self.simpy_env.process( uav.sim() ))
# Process all fires
for fire in self.fires:
self.simpy_env.process( fire.sim() )
self.max_simtime_event = self.simpy_env.timeout(MAX_SIMTIME)
self.done_event = simpy.Event(self.simpy_env)
self.simpy_env.run(until = simpy.AllOf(self.simpy_env, self.fire_extinguish_events) | self.max_simtime_event )
self.done_event.succeed()
self.done = True
self.simpy_env.run(until = simpy.AllOf(self.simpy_env, agent_events))
rewards = [uav.get_reward() for uav in self.env_agents]
if sum(rewards) != 0:
print('There were unaccounted for rewards')
[print(r) for r in rewards]
raise RuntimeError
# Collect observations, actions, etc.. and return them
observations = [ u.observations for u in self.env_agents]
actions = [ u.actions for u in self.env_agents]
rewards = [ u.rewards for u in self.env_agents]
agent_infos = [ u.agent_infos for u in self.env_agents]
env_infos = [ u.env_infos for u in self.env_agents]
offset_t_sojourn = [ u.offset_t_sojourn for u in self.env_agents ]
return observations, actions, rewards, agent_infos, env_infos, offset_t_sojourn
@property
def spec(self):
return EnvSpec(
observation_space=self.env_agents[0].observation_space,
action_space=self.env_agents[0].action_space,
)
@property
def observation_space(self):
if self.env_agents[0] is not None:
return self.env_agents[0].observation_space
else:
self.reset()
return self.env_agents[0].observation_space
@property
def action_space(self):
if self.env_agents[0] is not None:
return self.env_agents[0].action_space
else:
self.reset()
return self.env_agents[0].action_space
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@property
@property
def reward_mech(self):
return self._reward_mech
@property
def agents(self):
return self.env_agents
def seed(self, seed=None):
self.np_random, seed_ = seeding.np_random(seed)
return [seed_]
def terminate(self):
return
def get_param_values(self):
return self.__dict__
ENV_OPTIONS = [
('n_agents', int, 3, ''),
('n_fire_clusters', int, 3, ''),
('n_fires_per_cluster' , int, 3, ''),
('fire_locations', list, True, ''),
('start_positions', list, None, ''),
('discount', float, GAMMA, ''),
('GRID_LIM', float, 1.0, ''),
('MAX_SIMTIME', float, MAX_SIMTIME, ''),
('UAV_VELOCITY', float, UAV_VELOCITY, ''),
('HOLD_TIME', float, HOLD_TIME, ''),
('UAV_MINS_AVG', float, UAV_MINS_AVG, ''),
('UAV_MINS_STD', float, UAV_MINS_STD, ''),
('HOLD_TIME_VAR', float, HOLD_TIME_VAR, ''),
('ACTION_WAIT_TIME', float, ACTION_WAIT_TIME, ''),
('DT', float, -1., '')
]
from FirestormProject.runners import RunnerParser
from FirestormProject.runners.rurllab import RLLabRunner
import tensorflow as tf
from FirestormProject.test_policy import path_discounted_returns, policy_performance, \
parallel_policy_performance, parallel_path_discounted_returns, test_smart_policy
if __name__ == "__main__":
import datetime
import dateutil
parser = RunnerParser(ENV_OPTIONS)
mode = parser._mode
args = parser.args
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')
exp_name = 'experiment_%s_dt_%.3f' % (timestamp, args.DT)
args.exp_name = exp_name
env = FireExtinguishingEnv(num_agents = args.n_agents, num_fire_clusters = args.n_fire_clusters,
num_fires_per_cluster = args.n_fires_per_cluster, gamma = args.discount,
fire_locations = args.fire_locations, start_positions = args.start_positions, DT = args.DT)
#run = RLLabRunner(env, args)
#run()
#quit()
# Test simply_policy
from sandbox.rocky.tf.envs.base import TfEnv
paths = parallel_path_discounted_returns(env=TfEnv(env), num_traj=1000, policy = test_smart_policy(), progbar = True)
print(np.mean(paths), np.std(paths) / np.sqrt(len(paths)))
quit()
filenames = [
'experiment_2017_04_22_19_15_17_101782_PDT_dt_-1.000',
'experiment_2017_04_22_19_03_39_104449_PDT_dt_0.100',
'experiment_2017_04_22_18_51_33_838148_PDT_dt_0.316',
'experiment_2017_04_22_18_40_00_951295_PDT_dt_1.000',
'experiment_2017_04_22_18_28_44_508570_PDT_dt_3.162',
'experiment_2017_04_22_18_17_40_977501_PDT_dt_10.000'
]
# experiment_2017_04_22_16_51_28_720596_PDT_dt_1.000
# 100% (40 of 40) |########################################################################################################################| Elapsed Time: 0:14:29 Time: 0:14:29
# Mean ADR: 2.74564120508
# Std ADR: 0.00777526574076
# experiment_2017_04_21_15_10_08_966990_PDT_dt_-1.000
# 100% (40 of 40) |########################################################################################################################| Elapsed Time: 0:24:46 Time: 0:24:46
# Mean ADR: 2.82731574999
# Std ADR: 0.00812471811702
# for filename in filenames:
# _, _, adr_list = policy_performance(env = env, gamma = args.discount, num_traj = num_trajs_sim,
# filename = filename, start_itr = 260, end_itr = 300)
# num_traj_sim = 100
# out_dict = {}
# for filename in filenames:
# out_dict[filename] = parallel_policy_performance(env = env, num_traj = num_traj_sim,
# filename = filename, start_itr = 260, end_itr = 300)
# import pickle
# pickle.dump(out_dict, open('./data/policyperformance.pkl','wb'))
num_traj_sim = 1
import glob
import pickle
experiments = {-1: './data/*_-1.000', 10: './data/*_10.000', 0.316: './data/*_0.316',
3.162: './data/*_3.162', 1: './data/*_1.000', 0.1: './data/*_0.100' }
results = {}
for exp_id, exp_dirs in experiments.items():
print('Experiment %.2f' % (exp_id))
filenames = glob.glob(exp_dirs)
out_dict = {}
for i, fn in enumerate(filenames):
out_dict[str(i)] = parallel_policy_performance(env = env, num_traj = num_traj_sim,
filename = fn, start_itr = 260, end_itr = 300)
results[str(exp_id)] = out_dict
pickle.dump(out_dict, open('./data/ckpt_'+str(exp_id)+'.pkl','wb'))
pickle.dump(results, open('./data/policyperformance.pkl','wb'))
|
# set is an unordered and sorted collection of items with no duplicate
sets = {1,2,3}
sets = {1,"two",3.00,(2,3)}
list1 = [1,4,5,1,4,5]
sets = set(list1)
print(list1,sets)
# create a empty set
sets = {}#its a dictionary
sets = set() # we use set function to create a empty set
# Add and update in set (index has not meaning in set because they are unordered)
sets = set(list1)
sets.add(2)
print(*sets)
sets.update([3,6,7]) # update() take input as list , tuple ,string or set
print(*sets)
# removing elements from sets
sets.discard(5)
print(*sets)
sets.remove(6)
print(*sets)
# pop and clear
sets.pop() # removes an random element from set
print(*sets)
sets.clear()# delete all element from set
print(*sets)
# python set operations
A = {1,2,3}
B = {2,3,4,5}
print(A|B,A.union(B),B.union(A)) #union
print(A&B,A.intersection(B),B.intersection(A)) #intersection
print(A^B,B^A,A.symmetric_difference(B),B.symmetric_difference(A)) # symetric difference
print(A-B,B-A,A.difference(B),B.difference(A)) # difference
# set iteration
for em in A :
print(em)
# check if element exist in set or not
print(1 in A)
print('1' in A)
|
#!/usr/bin/env python
import sys
import Sex
import GenieDB
import Date
# Configuration Control ###############################################
if 1: # for folding
# These settings affect how the program operates. Some are for
# debugging. Others are optional, but produce important results.
# Disambiguate successors/predecessors who have the exact same names
# by adding a roman number suffix. Oldest relative has no suffix,
# first scion is called "II", etc.
DISAMBIGUATE_SCIONS = True
# report each scion relationship detected, whether resolved or not
REPORT_SCIONS = False
# Disambiguate non-scions who happen to have the exact same names
# by adding an arabic number suffix. First relative entered with
# the name has no suffix, first namesake gets "(2)", etc.
DISAMBIGUATE_NAMESAKES = True
# report each namesake relationship detected, whether resolved or not
REPORT_NAMESAKES = False
# try to infer sex of individual from first and middle names
INFER_SEX_FROM_NAME = True
# try to infer sex of individual, if indeterminate, from partner
INFER_SEX_FROM_UNION = True
# try to infer sex of individual, if indeterminate, from other hints
INFER_SEX_FROM_TITLE = False
# report each person who's sex cannot be determined
REPORT_UNK_SEXES = True
# report names that do not appear in our sex-name database
REPORT_NEW_NAMES = False
# report all first and middle names in the entire family
REPORT_ALL_NAMES = False
# include person's sex when printing
INCLUDE_SEX_IN_PERSON_STR = False
# include person's ID when printing
INCLUDE_INDEX_IN_PERSON_STR = True
# trace DB inserts
TRACE_DB_INSERTS = True
# trace Date extraction
TRACE_DATE_EXTRACTION = False
# trace data import
TRACE_TEXT_IMPORT = False
# Constants ###########################################################
import Sex
# quick and dirty conversions to roman numerals and ordinal names;
# good enuf for gov't work. the 0th case "cannot" happen, and the
# 1st cases also are never used as a matter of policy.
ROMAN = [ "[ZERO]", "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X" ]
# quick and dirty lookup of ordinal names:
def ORDINAL( n ):
assert n > 1
try: return [ "ZED", "1st", "2nd", "3rd" ][ n ]
except IndexError: return "%dth" % ( n )
# Globals #############################################################
People = [] # list of all people, indexed by idPerson
Unions = [] # list of all marriages, indexed by idUnion
PeopleStack = {} # recent descendants, indexed by level
UnionsStack = {} # recent unions, indexed by level
Level = 0 # most recent level
# Object to represent Unions ##########################################
class Union( object ):
""" represent the union of two people """
def __init__( self, a, b ):
self.a = a
self.b = b
b.level = a.level
a.marriage = b.marriage = self
self.children = []
if INFER_SEX_FROM_UNION:
Sex.InferSexFromUnion( self )
self.Date, self.Place = ExtractDateAndPlace(
b.attr.get( "m",
a.attr.get( "m", "" )))
self.EndDate, _unused_ = ExtractDateAndPlace(
b.attr.get( "x",
a.attr.get( "x", "" )))
Unions.append( self )
self.idUnion = len( Unions )
def GetDate( self ): return self.Date
def GetPlace( self ): return self.Place
def GetEndDate( self ): return self.EndDate
def AddOffspring( self, child ):
self.children.append( child )
def Export( self, index, out=sys.stdout ):
if index > 1:
print >>out, "+", ORDINAL( index ), self.b.sex.Title(), "OF", self.a
print >>out, self
for child in self.children:
child.Export( out )
def InsertDb( self ):
if TRACE_DB_INSERTS:
print ("... Inserting Union:", self.idUnion)
GenieDB.DbExecute(
GenieDB.Unions({
"idUnion": self.idUnion,
"idPerson_A": self.a.idPerson,
"idPerson_B": self.b.idPerson,
"StartDate": str( self.GetDate()),
"EndDate": str( self.GetEndDate()),
"Location": self.GetPlace(),
}).AsSQLInsert()) \
.Commit()
for child in self.children:
if TRACE_DB_INSERTS:
print ("... Inserting Child:", self.idUnion, child.idPerson)
GenieDB.DbExecute(
GenieDB.Offspring({
"Parents": self.idUnion,
"Offspring": child.idPerson,
}).AsSQLInsert()) \
.Commit()
def __str__( self ):
return "Union #%d: %s %s %s" % ( self.idUnion, str( self.b ), self.title, str( self.a ))
# Object to represent individual People ###############################
class Person( object ):
def __init__( self, lexeme, parents=None, title=None ):
self.name = lexeme.name
self.attr = lexeme.attr
self.line = lexeme.line
self.fields = lexeme.fields
self.level = lexeme.level
self.parents = None
self.idPerson = len( People ) + 1
self.qualifier = 1
self.scion = 1
if title: self.title = title + " OF"
else: self.title = "AND"
try:
self.sex = Sex.LetterMap[ self.attr[ "s" ].lower()]
except KeyError:
self.sex = Sex.Unk
if INFER_SEX_FROM_TITLE:
Sex.InferSexFromTitle( self, title )
if INFER_SEX_FROM_NAME and not self.sex:
self.sex = Sex.InferSexFromNames(
self.GetFirstName(),
self.GetMiddleName())
self.BirthDate, self.BirthPlace = ExtractDateAndPlace( self.attr.get( "b", "" ))
self.DeathDate, self.DeathPlace = ExtractDateAndPlace( self.attr.get( "d", "" ))
self.AddParents( parents )
self.DisambiguateName()
People.append( self )
def AddParents( self, parents ):
if parents:
assert self.parents == None
self.parents = parents
parents.AddOffspring( self )
self.DisambiguateSuccessors( parents )
def DisambiguateSuccessors( self, parents ):
""" if a person is named after any direct predecessor,
then we qualify the successors' names with roman numerals
"""
# this takes precedence over disambiguating other name matches.
# check parents
if self.name == parents.a.name:
self.scion = parents.a.scion + 1
if REPORT_SCIONS:
print ("## Scion:", self, "TO", parents.a)
elif self.name == parents.b.name:
self.scion = parents.b.scion + 1
if REPORT_SCIONS:
print ("## Scion:", self, "TO", parents.b)
# Recursively check all other predecessors.
# Since at this point, all prior scions have been detected,
# we can stop after the first match.
if parents.a.parents:
self.DisambiguateSuccessors( parents.a.parents )
if parents.b.parents:
self.DisambiguateSuccessors( parents.b.parents )
def DisambiguateName( self ):
""" if two relataives on different branches share the same name,
we distinguish them with numeric suffixes.
This is done after any Roman numerals have been added.
"""
namesake = FindPersonOnList( self.GetName())
if namesake:
self.qualifier = namesake.qualifier + 1
if REPORT_NAMESAKES:
print ("## Namesake:", self, "AND", namesake)
def GetFirstName( self ):
fields = self.name.split()
if len( fields ) > 1: return fields[ 0 ]
else: return None
def GetMiddleName( self ):
fields = self.name.split()
if len( fields ) > 2: return fields[ 1 ]
else: return None
def GetName( self ):
""" display name with optional roman numerals, if any """
# first realtive with name gets no qualifier
if self.scion > 1: return "%s (%s)" % ( self.name, ROMAN[ self.scion ])
else: return self.name
def GetQualifiedName( self ):
""" return name with optional qualifier suffix, if any """
# first realtive with name gets no qualifier
if self.qualifier > 1: return "%s (%d)" % ( self.GetName(), self.qualifier )
else: return self.GetName()
def GetBirthDate( self ): return self.BirthDate
def GetBirthPlace( self ): return self.BirthPlace
def GetDeathDate( self ): return self.DeathDate
def GetDeathPlace( self ): return self.DeathPlace
def Export( self, out=sys.stdout ):
""" print out self, and any unions """
print (self, file=out)
for union, index in FindUnions( self ):
union.Export( index, out )
def InsertDb( self ):
row = GenieDB.People({
"Name": self.GetQualifiedName(),
"idPerson": self.idPerson,
"Scion": self.scion,
"Qualifier": self.qualifier,
"Sex": str( self.sex ),
"BirthDate": str( self.GetBirthDate()),
"BirthPlace": self.GetBirthPlace(),
"DeathDate": str( self.GetDeathDate()),
"DeathPlace": self.GetDeathPlace(),
"idParents": self.parents and self.parents.idUnion,
})
if TRACE_DB_INSERTS:
print ("... Inserting Person:", self.idPerson, self)
# print row
GenieDB.DbExecute( row.AsSQLInsert()) \
.Commit()
def __getattr__( self, attr ):
return
def __repr__( self ): return "Person( %s )" % self.name
def __str__( self ):
name = self.GetQualifiedName()
if INCLUDE_SEX_IN_PERSON_STR: name += " " + str( self.sex )
if INCLUDE_INDEX_IN_PERSON_STR: name += " #%d" % self.idPerson
return name
# Descendant Input File Decoding ######################################
class ParsedLine( object ):
""" parse a line in the given format """
def __init__( self, line ):
self.token = line[0]
self.level = Level
self.attr = {}
if self.token in "0123456789": # a new offspring
level, line = line.split( None, 1 )
self.level = int( level )
self.token = "0"
elif self.token == "+": # marriage
line = line[1:].strip()
elif self.token == "*": # nth spouse or "friend"
line = line[1:-1].strip()
assert " of " in line
self.title, self.name = line.split( " of " )
self.name = self.name.replace( ":", "" )
self.attr = { "name": self.name }
return
else: # not recognizeable
assert 0
# now parse the remainder: a name, followed by zero or more attributes
self.line = line
self.attr = {}
self.fields = line.split()
self.attr[ "name" ] = curVals = []
for field in self.fields:
if field[-1] == ":":
self.attr[ field[ : -1 ]] = curVals = []
else:
curVals.append( field )
nattr = {}
for key, val in self.attr.items():
nattr[ key ] = " ".join( val )
self.attr = nattr
self.name = self.attr[ "name" ]
def __getitem__( self, key ):
return self.attr[ key ]
def __str__( self ):
return (
"%d \t" % ( self.level )
+ "\n\t".join([ "%s: %s" % ( key, val )
for key, val in self.attr.items()]))
def Reader( filename ):
""" yield non-empty, stripped lines in a file,
after skipping a premble of non-blank lines
"""
src = file( filename )
for line in src:
if not line.strip():
break
for line in src:
line = line.strip()
if line:
yield ParsedLine( line )
def Import( filename ):
""" read a file in the format provided,
creating all people and relationships
"""
global Level
title = None
for lexeme in Reader( filename ):
if lexeme.token == "0": # regular person
Level = lexeme.level
PeopleStack[ Level ] \
= person \
= Person( lexeme, parents=MostRecentUnionIfAny())
if TRACE_TEXT_IMPORT:
print (len( People ), Level, person)
elif lexeme.token == "*": # additional marriage
person = FindPersonOnStack( lexeme.name )
Level = person.level
title = lexeme.title
if False and TRACE_TEXT_IMPORT:
print (len( People ), Level, "xUnion:", person, title)
elif lexeme.token == "+": # marriage
spouse = Person( lexeme, title=title )
UnionsStack[ Level ] = marriage = Union( PeopleStack[ Level ], spouse )
if TRACE_TEXT_IMPORT:
print (len( People ), Level, marriage)
title = None
else:
raise "impossible token" # "cannot" happen
# Utilities ###########################################################
def FindPersonOnStack( name ):
""" locate by name the highest-level person on the PeopleStack """
level = Level
while level > 0:
if PeopleStack[ level ].name == name:
return PeopleStack[ level ]
level -= 1
raise "Person not found: " + name # "cannot happen"
def FindPersonOnList( name ):
""" locate by name the most recent descendant created """
for person in reversed( People ):
if person.GetName() == name:
return person
return None
def MostRecentUnionIfAny():
""" return the most recent union or null if there is none """
try:
return UnionsStack[ Level - 1 ]
except KeyError:
return None
def FindUnions( person ):
""" yield all relevant unions, in order and number them """
index = 0
for union in Unions:
if person in [ union.a, union.b ]:
index += 1
yield union, index
MONTH = {
"jan": 1, "feb": 2, "mar": 3,
"apr": 4, "may": 5, "jun": 6,
"jul": 7, "aug": 8, "sep": 9,
"oct": 10, "nov": 11, "dec": 12,
}
def INT( s ):
try: return int( s )
except ValueError: return 0
def Month( mm ):
try:
return MONTH[ mm.lower()[:3]]
except ( KeyError, AttributeError ):
if type( mm ) == str and mm.isdigit():
mm = int( mm )
if 1 <= mm <= 31:
return mm
# print ("#!! error converting '%s' to month:" % mm)
return 0
def DATE( yy, mm="0", dd="0" ):
Y = INT( yy )
M = Month( mm )
if "," in dd:
dd = dd.replace(",","")
D = INT( dd )
if Y > 1000:
try:
# print ("yymmdd:", yy, mm, dd)
return Date.Date( Y, M, D )
except ( ValueError, IndexError, KeyError ) as e:
pass
return None
def ExtractDateAndPlace( attr ):
# Sometimes we have a date, sometimes just a place, sometimes both.
# Furthermore, dates come in several forms:
# mm-dd-yy
# mm-yy
# Month Day, Year
# Month Year
# Year only
date, place = GenieDB.NullDate, ""
fields = attr.split()
if fields:
remainder = 0
if "-" in fields[0].lower(): # mm-dd-yy or mm-yy
mmddyy = fields[0].split( "-" )
if len( mmddyy ) == 3:
date = DATE( mmddyy[2], mmddyy[0], mmddyy[1])
elif len( mmddyy ) == 2:
date = DATE( mmddyy[1], mmddyy[0])
if date:
remainder = 1
elif fields[0].isdigit() and int( fields[0]) > 1000: # Year only
date = DATE( int( fields[0]))
if date:
remainder = 1
else:
mm = Month( fields[0])
if 1 <= mm <= 31: # M D, Y; M Y; else no date
if len( fields ) >= 3:
date = DATE( fields[ 2 ], mm, fields[ 1 ])
if date:
remainder = 3
else:
if len( fields ) >= 2:
date = DATE( fields[ 1 ], mm )
if date:
remainder = 2
else:
date = DATE( fields[ 0 ])
if date:
remainder = 1
place = " ".join( fields[ remainder : ])
if not date:
date = GenieDB.NullDate
if attr and TRACE_DATE_EXTRACTION:
print (attr, "=>")
print ("\t->", date)
print ("\t->", place)
return date, place
# Mainline and Unit Testing ###########################################
if __name__=='__main__':
if 1:
for filename in sys.argv[1:]:
Import( filename )
if 0:
print ("# Exported Results ################################################")
People[0].Export() # everything should follow from the first ancestor
if not TRACE_DATE_EXTRACTION:
try:
if TRACE_TEXT_IMPORT:
print ("#############################################################")
print ("### Insert Trace ############################################")
print ("#############################################################")
GenieDB.MasterClear()
for person in People:
person.InsertDb()
for union in Unions:
union.InsertDb()
except:
import TraceBackVars
exctype, value = sys.exc_info()[:2]
print ("exception:", exctype, value )
TraceBackVars.TraceBackVars()
if REPORT_ALL_NAMES:
out = file( "~CommonNames.txt", "wt" )
for person in People:
fn = person.GetFirstName()
if fn: print (fn, file=out)
mn = person.GetMiddleName()
if mn: print (mn, file=out)
if REPORT_NEW_NAMES:
InferSexFromName.SaveDifferences( "~CommonNames.txt", "~NewNames.txt" )
if REPORT_UNK_SEXES:
for person in People:
if not person.sex:
print ("# Sex Unk:", person)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Blueprint
cart_bp = Blueprint("cart", __name__, url_prefix="/cart")
from cart.views import *
|
import uuid
from django.db import models
# Create your models here.
class Quiz(models.Model):
title = models.CharField(blank=False, max_length=40)
uuid = models.CharField(max_length=40, null=True, blank=True, unique=True)
def __init__(self, *args, **kwargs):
super(Quiz, self).__init__(*args, **kwargs)
if self.uuid is None:
self.uuid = str(uuid.uuid4())
|
def setup():
size(300,300)
background(255)
smooth()
#noLoop()
def draw():
background(255)
strokeWeight(30)
stroke(100)
line(mouseX,mouseY, 200, 200)
|
# iterative solution
def sumOfNumbersIterative(number):
sum = 0
for item in range(number + 1):
sum += item
return sum
print(sumOfNumbersIterative(5))
# non-iterative solution
def sumOfNumbersNonIterative(number):
return number * (number + 1) / 2
print(sumOfNumbersNonIterative(5))
|
import sys
import math
import numpy as np
import matplotlib
#matplotlib.rcParams['mathtext.fontset'] = 'stix'
#matplotlib.use('PDF')
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import rc
#from matplotlib.collections import LineCollection
#rc('text',usetex = False)
def dot_product(a1,a2):
sum = 0.0
for i in range(len(a1)):
sum = sum + (a1[i]-a2[i])*(a1[i]-a2[i])
return sum
class KPOINTS:
def __init__(self):
f = open("BANDKS")
lines = f.readlines()
f.close()
self.parse(lines)
def parse(self, lines):
self.nkpt = int(lines[1])
nbandlines = (len(lines)-4+1)/3
self.bandlimits = []
self.bandsymbol = []
nline = 3
for n in range(nbandlines):
for i in range(2):
nline = nline + 1
sline = lines[nline].split()
a = float(sline[0])
b = float(sline[1])
c = float(sline[2])
self.bandlimits.append([a,b,c])
self.bandsymbol.append(sline[-1])
nline = nline + 1
nn = 0
self.spoint = {}
self.kdist = []
sum = 0.0
for n in range(nbandlines):
n1 = n*2
n2 = n*2 + 1
a1 = self.bandlimits[n1]
a2 = self.bandlimits[n2]
base = 0.0
if len(self.kdist) >= 1:
base = self.kdist[-1]
for i in range(self.nkpt):
a3 = [0,0,0]
for j in range(len(a1)):
a3[j] = a1[j] + (a2[j]-a1[j])*i/(self.nkpt-1)
l1 = base + math.sqrt(dot_product(a3,a1))
self.kdist.append(l1)
if i == 0 or i == self.nkpt-1:
if self.kdist[-1] in self.spoint:
if self.spoint[self.kdist[-1]][0] != self.bandsymbol[nn]:
self.spoint[self.kdist[-1]].append(self.bandsymbol[nn])
else:
self.spoint[self.kdist[-1]] = [self.bandsymbol[nn]]
nn = nn + 1
class VaspBand:
def __init__(self, lines, fermi, ymin, ymax, title):
self.lines = lines
self.fermi = fermi
self.ymin = ymin
self.ymax = ymax
self.title = title
self.get_info()
self.get_energy()
self.kp = KPOINTS()
def draw_bands(self):
rc('text', usetex=True)
rc('font', family='serif')
rc('xtick', labelsize=16)
rc('ytick', labelsize=16)
nbands = self.nbands
plt.figure(figsize=(5,8))
for n in range(nbands):
xs = []
ys = []
for k in range(self.nkpt):
xs.append(self.kp.kdist[k])
ys.append(self.evals[k][n])
plt.plot(xs,ys,color='black', lw=2)
plt.subplots_adjust(left=0.15,right=0.95,top=0.95,bottom=0.10)
xt = []
yt = []
for key in self.kp.spoint:
label = self.kp.spoint[key]
print key, label
tics = ""
if len(label) > 1:
tics = label[0] + "(" + label[1] + ")"
else:
tics = label[0]
xt.append(key)
yt.append(tics)
plt.axvline(key,color='black',ls='-')
plt.xticks(xt,yt,fontsize=18)
# plt.xticks((0.0,2),(r'\Gamma','X'),fontsize=16)
plt.yticks( np.arange(self.ymin,self.ymax+1.0,1) )
xmin = min(self.kp.kdist)
xmax = max(self.kp.kdist)
plt.xlim(xmin,xmax)
plt.ylim(self.ymin,self.ymax)
plt.ylabel('Energy (eV)', fontsize=18)
# plt.show()
plt.axhline(y=0,color='blue',ls='-')
plt.title(self.title)
plt.savefig("bands.pdf", format='pdf')
plt.savefig("bands.png", format='png')
def make_bands(self):
for n in range(self.nbands):
for k in range(self.nkpt):
# print k, self.evals[k][n]
print self.kp.kdist[k], self.evals[k][n]
print
print "#", self.kp.spoint
def get_info(self):
line = self.lines[5]
sline = line.split()
self.nbands = int(sline[2])
self.nkpt = int(sline[1])
# print self.nbands, self.nkpt
def get_energy(self):
self.kpoints = []
self.evals = []
for n in range(self.nkpt):
nstart = n*(2+self.nbands) + 7
line = self.lines[nstart]
# print line
sline = line.split()
kx = float(sline[0])
ky = float(sline[1])
kz = float(sline[2])
self.kpoints.append([kx,ky,kz])
nstart = nstart + 1
self.evals.append([])
for m in range(self.nbands):
nline = nstart + m
line = self.lines[nline]
sline = line.split()
ev = float(sline[1]) - self.fermi
self.evals[-1].append(ev)
def main():
filename = sys.argv[1]
fermi = float(sys.argv[2])
ymin = float(sys.argv[3])
ymax = float(sys.argv[4])
title = sys.argv[5]
file = open(filename, 'r')
lines = file.readlines()
vasp = VaspBand(lines, fermi, ymin, ymax, title)
# vasp.make_bands()
vasp.draw_bands()
def main_kpoint():
kp = KPOINTS()
if __name__ == '__main__':
main()
|
from sys import stdin,stdout
t=int(stdin.readline())
l=[0]
c=0
x=1
while x<20002:
l.insert(x,0)
x+=1
line=stdin.readline()
for a in line:
if a== " ":
continue
if(t<=0):
break
t-=1
a=int(a)
l[a]=1
if int(l[a-1])==int(l[a+1]):
if int(l[a-1])>0:
c+=(-1)
else:
c+=1
print(c)
print("Justice\n")
|
d = int(input('Quantos dias pretende alugar? '))
diaria = d*100.00
km = float(input('Quantos kms rodados? '))
kms = (km*1.50)+diaria
print('O total do aluguel do carro custará \033[0;31mR${:.2f}\033[m!'.format(kms))
|
tableau_game_character_sheet = 0
tableau_game_inventory_window = 1
tableau_game_party_window = 2
tableau_troop_note_alpha_mask = 3
tableau_troop_note_color = 4
tableau_troop_character_alpha_mask = 5
tableau_troop_character_color = 6
tableau_troop_inventory_alpha_mask = 7
tableau_troop_inventory_color = 8
tableau_troop_party_alpha_mask = 9
tableau_troop_party_color = 10
tableau_troop_note_mesh = 11
tableau_center_note_mesh = 12
tableau_faction_note_mesh_for_menu = 13
tableau_faction_note_mesh = 14
tableau_faction_note_mesh_banner = 15
tableau_2_factions_mesh = 16
tableau_color_picker = 17
tableau_custom_banner_square_no_mesh = 18
tableau_custom_banner_default = 19
tableau_custom_banner_tall = 20
tableau_custom_banner_square = 21
tableau_custom_banner_short = 22
tableau_background_selection = 23
tableau_positioning_selection = 24
tableau_retirement_troop = 25
tableau_retired_troop_alpha_mask = 26
tableau_retired_troop_color = 27
tableau_retired_troop = 28
tableau_starship_icon = 29
tableau_troop_tree_pic = 30
tableau_troop_detail_dummy_pic = 31
|
# DP 简单题
class Solution:
"""
@param m: positive integer (1 <= m <= 100)
@param n: positive integer (1 <= n <= 100)
@return: An integer
"""
def uniquePaths(self, m, n):
# write your code here
dp = {} # using a hashtable
for i in range(m):
for j in range(n):
if i == 0 or j == 0:
dp[(i,j)] = 1
else:
dp[(i,j)] = dp[(i-1,j)] + dp[(i,j-1)]
return dp[(m-1,n-1)]
x=Solution()
x.uniquePaths(3,3)
|
m = int(input("ingrese el primer numero: "))
n = int(input("ingrese el segundo numero: "))
p = 0
while m > 0:
m = m - 1
p = p + n
print ('El producto de m y n es', p)
|
# -*- coding: utf-8 -*-
import logging
from Pyside2 import QtWidgets, QtCore
class EdlTable(QtWidgets.QTableView):
itemSelectionChanged = QtCore.Signal()
def __init__(self, rows, model):
super(EdlTable, self).__init__()
self.model = model
self.setModel(self.model)
self.model.setRowCount(rows)
self.setColumnHidden(5, True)
self.setAlternatingRowColors(True)
self.resizeColumnsToContents()
self.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection
)
self.prev_selection = self.selectionModel().selectedRows()
self.resizeColumnsToContents()
self.resizeRowsToContents()
self.resize(1280, 720)
def mousePressEvent(self,event):
super(EdlTable, self).mousePressEvent(event)
if self.prev_selection != self.selectionModel().selectedRows():
self.prev_selection = self.selectionModel().selectedRows()
self.itemSelectionChanged.emit()
class MainWindow(QtWidgets.QMainWindow):
saveEvent = QtCore.Signal()
openedFile = QtCore.Signal(str)
openedEdl = QtCore.Signal(str)
def __init__(self, application, model, parent = None):
QtWidgets.QMainWindow.__init__(self, parent)
self.application = application
# Menus & Actions ---
open_act = QtWidgets.QAction("&Open...", self)
open_act.setShortcuts(QtWidgets.QKeySequence.Open)
open_act.setStatusTip("Open an existing file")
open_act.triggered.connect(self.open_file)
menubar = self.menuBar()
self.fileMenu = menubar.addMenu('&File')
self.fileMenu.addAction(open_act)
# UI Elements ---
self.table_edl = EdlTable(10, model)
self.add_clip_field = QtWidgets.QTextEdit("")
self.button_add = QtWidgets.QPushButton("Add")
self.button_del = QtWidgets.QPushButton("Del")
self.button_import = QtWidgets.QPushButton("Import EDL / AFF")
self.comment = QtWidgets.QTextEdit("")
self.layout_main = QtWidgets.QGridLayout()
# UI Layout ---
self.layout_main.addWidget(self.table_edl, 1, 0, 1, 4)
self.layout_main.addWidget(self.add_clip_field, 2, 0, 1, 2)
self.layout_main.addWidget(self.button_add, 2, 2, 1, 1)
self.layout_main.addWidget(self.button_del, 2, 3, 1, 1)
self.layout_main.addWidget(self.button_import, 2, 4, 1, 1)
self.layout_main.addWidget(self.comment, 1, 4, 1, 1)
self.add_clip_field.setFixedHeight(30)
self.table_edl.setMinimumWidth(500)
# Events and connections ---
self.button_add.clicked.connect(self.add_item)
self.button_del.clicked.connect(self.del_item)
self.button_import.clicked.connect(self.open_edl)
self.table_edl.itemSelectionChanged.connect(self.update_comment)
self.comment.textChanged.connect(self.comment_changed)
self.installEventFilter(self)
self.centralWidget = QtWidgets.QWidget()
self.centralWidget.setLayout(self.layout_main)
self.setCentralWidget(self.centralWidget)
def eventFilter(self,obj,event):
if obj is self and event.type() == QtCore.QEvent.Close:
self.saveEvent.emit()
return True
return super(
type(self.application),
self.application
).eventFilter(obj,event)
def open_act(self):
print("open file")
def comment_changed(self):
'''
Notes are stored in hidden column (5) so each time we edit the text
in elem_desc we update the column.
'''
for index in self.table_edl.selectionModel().selectedRows():
selectedItem = self.table_edl.model.data(CellIndex(index.row(),
5),
QtCore.Qt.UserRole)
break
if not 'selectedItem' in locals():
return
# print "\nComment_changed :\nCell : {} Comment : {}".format( \
# selectedItem, self.comment.toPlainText())
if self.comment.toPlainText() != selectedItem:
self.table_edl.model.setData(CellIndex(index.row(), 5),
self.comment.toPlainText(),
2)
def update_comment(self):
sel = self.table_edl.selectionModel().selectedRows()
if len(sel) > 0:
# print "\nview - update comment\nselected row : {} value : {}".format( \
# sel[0],
# self.table_edl.model.data(CellIndex(sel[0].row(),
# 5),
# QtCore.Qt.UserRole))
self.comment.blockSignals(True)
self.comment.setText(
self.table_edl.model.data(CellIndex(sel[0].row(),
5),
QtCore.Qt.UserRole)
)
self.comment.blockSignals(False)
return
def add_item(self):
item = self.add_clip_field.toPlainText().split(" ")
row = compare_master_TC_in(item[3])
for i in item:
pass
def compare_master_TC_in(self, timecode):
# used to know on which row we should add item X
pass
def del_item(self):
pass
def open_file(self):
browser = QtWidgets.QFileDialog()
opened_file = browser.getOpenFileName(caption = "Open JSON File",
filter = "JSON Files (*.json)")
self.openedFile.emit(opened_file)
def open_edl(self):
browser = QtWidgets.QFileDialog()
edl = browser.getOpenFileName(caption = "Open EDL / AFF File",
filter = "EDL / AFF Files (*.edl *.aff)")
self.openedEdl.emit(edl)
# Use self.open_edl in main app to read EDL using python split.
# Then set table according to data
def export_table(self):
return self.table_edl
class CellIndex:
def __init__(self,row,column):
self._row = row
self._column = column
def row(self):
return self._row
def column(self):
return self._column
|
import wx
import wx.lib.ogl as ogl
class AppFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__( self,
None, -1, "Demo",
size=(300,200),
style=wx.DEFAULT_FRAME_STYLE )
sizer = wx.BoxSizer( wx.VERTICAL )
# put stuff into sizer
canvas = ogl.ShapeCanvas( self )
sizer.Add( canvas, 1, wx.GROW )
canvas.SetBackgroundColour( "LIGHT BLUE" ) #
diagram = ogl.Diagram()
canvas.SetDiagram( diagram )
diagram.SetCanvas( canvas )
shape = ogl.CircleShape( 20.0 ) #
shape.SetX( 25.0 ) #
shape.SetY( 25.0 ) #
canvas.AddShape( shape ) #
diagram.ShowAll( 1 ) #
# apply sizer
self.SetSizer(sizer)
self.SetAutoLayout(1)
self.Show(1)
app = wx.PySimpleApp()
ogl.OGLInitialize()
frame = AppFrame()
app.MainLoop()
app.Destroy() |
from random import randint
from orator.seeds import Seeder
from models.project import Project
class ProjectTableSeeder(Seeder):
def projects_factory(self, faker):
"""
Defines the template of user test records
"""
return {
'name' : faker.company(),
'description' : faker.paragraph(),
'code': faker.isbn10(separator="-"),
'active' : True,
'user_id' : randint(1,50)
}
def run(self):
"""
Run the database seeds.
"""
self.factory.register(Project, self.projects_factory)
# Adding 50 projects
self.factory(Project, 50).create()
|
# Generated by Django 3.1.5 on 2021-01-21 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FPT', '0012_auto_20210120_0303'),
]
operations = [
migrations.RemoveField(
model_name='trainee',
name='department',
),
migrations.RemoveField(
model_name='trainee',
name='description',
),
migrations.RemoveField(
model_name='trainee',
name='email',
),
migrations.RemoveField(
model_name='trainee',
name='name',
),
migrations.RemoveField(
model_name='trainer',
name='description',
),
migrations.RemoveField(
model_name='trainer',
name='email',
),
migrations.RemoveField(
model_name='trainer',
name='name',
),
migrations.AddField(
model_name='trainer',
name='education',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AddField(
model_name='user',
name='full_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='trainee',
name='education',
field=models.CharField(blank=True, default='FPT Education', max_length=50),
),
migrations.AlterField(
model_name='trainee',
name='location',
field=models.CharField(blank=True, default='Da Nang', max_length=50),
),
migrations.AlterField(
model_name='trainee',
name='phone',
field=models.CharField(blank=True, default='09xx', max_length=12),
),
migrations.AlterField(
model_name='trainee',
name='toeic_score',
field=models.IntegerField(default=5),
),
migrations.AlterField(
model_name='trainer',
name='working_place',
field=models.CharField(blank=True, default='', max_length=50),
),
]
|
__all__ = ['UtilClasses']
from UtilClasses import Location
from UtilClasses import ModemResult
from UtilClasses import SMS
from UtilClasses import RWLock
|
from main.views import main_response
from django.urls import path
urlpatterns = [
path('', main_response, name='main_response'),
]
|
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity, KnownArtist
from .serializers import ArtistSerializer, SimilaritySerializer, KnownArtistSerializer
from bandcamp import tasks as bandcamp_tasks
MIN_TRACKS_SIGNIFICANT = 3
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
limit = 100
def order_queryset(self, qs):
significant = qs.filter(links__num_tracks__gte=MIN_TRACKS_SIGNIFICANT)
results = list(significant[:self.limit])
if len(results) < self.limit:
results.extend(qs.exclude(pk__in=significant)[:self.limit - len(results)])
return results
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = self.order_queryset(get_similar(name))
bandcamp_tasks.check_for_cc.delay(name)
else:
qs = super().get_queryset()
return qs[:self.limit]
class KnownArtistViewSet(viewsets.ModelViewSet):
"""Endpoint for users to manage a list of known artists."""
lookup_field = 'artist'
queryset = KnownArtist.objects.all()
serializer_class = KnownArtistSerializer
def get_queryset(self):
return self.request.user.knownartist_set
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright Bernardo Heynemann <heynemann@gmail.com>
# Licensed under the Open Software License ("OSL") v. 3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.opensource.org/licenses/osl-3.0.php
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import Popen, PIPE
import commands
class ShellExecuter(object):
def execute(self, command, base_path, change_dir=True):
try:
if os.name == "nt":
proc = Popen(command, stdout=PIPE, stderr=PIPE, cwd=base_path, shell=True)
log = "\n".join(proc.communicate())
exit_code = proc.returncode
else:
complement=""
if change_dir:
complement = "cd %s && " % base_path
result = commands.getstatusoutput("%s%s" % (complement, command))
log = result[1]
exit_code = result[0]
return ExecuteResult(command, log, exit_code)
except Exception, err:
error_message = "An error occured while executing command %s: %s" % (command, err)
return ExecuteResult(command, error_message, 1)
class ExecuteResult(object):
def __init__(self, command, run_log, exit_code):
self.command = command
self.run_log = run_log
self.exit_code = exit_code
|
from functools import wraps
def method_decorator_adaptor(adapt_to, *decorator_args, **decorator_kwargs):
def decorator_outer(func):
@wraps(func)
def decorator(self, *args, **kwargs):
@adapt_to(*decorator_args, **decorator_kwargs)
def adaptor(*args, **kwargs):
return func(self, *args, **kwargs)
return adaptor(*args, **kwargs)
return decorator
return decorator_outer |
def gcdIter(a, b):
c = a + b
while c > 0:
if a % c == 0 and b % c == 0:
return c
c -= 1
return 1
|
class Student:
def set_student(self,rollno,name,total):
self.rollno=rollno
self.name=name
self.total=total
def print_student(self):
print(self.rollno)
print(self.name)
print(self.total)
obj=Student()
obj.set_student(10,"Sarath",100)
obj.print_student() |
#!/usr/bin/env python
#coding:utf-8
# Weather forecast
# libdoor weather API, 2016/12
import urllib2, sys
import json
import aitalk
import audioplayer
from pprint import pprint
citycode = '270000' #Osakaの都市コード
resp = urllib2.urlopen('http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%citycode).read()
obj = json.loads(resp)
#print obj['title']
#print obj['description']['text']
forecasts = obj['forecasts']
tomorrow = forecasts[1]
#tomorrow['dateLabel']
tomorrow_weather = tomorrow['telop']
#天気概況テキストを作成する
text = tomorrow['dateLabel'] +u'、'+ tomorrow_weather + u'。'+ obj['description']['text']
#AITALKが受けられる文字列へ変換
# 空行がまずいので分割
a = text.splitlines()
#LivedoorWeatherは半角スペースこみの文字列を返すが、
#これが含まれるとAITalkはエラー(HTTP 400)となるので除去する
txt=""
for i in a:
txt = txt + i.replace(' ','')
print txt
client = aitalk.AITalkClient()
response = client.talk(txt)
print response
#WAVを再生
player=audioplayer.AudioPlayer()
player.setAudioFile("aitalk.wav")
player.playAudio()
|
import tensorflow as tf
def H0(normals):
B, C, L = normals.shape.as_list()
# return tf.ones([B, 1, L], dtype=tf.float32)
return tf.ones_like(normals, dtype=tf.float32)[:, 0:1, :]
def H1(normals):
return normals[:, 1:2, :]
def H2(normals):
return normals[:, 2:3, :]
def H3(normals):
return normals[:, 0:1, :]
def H4(normals):
return tf.multiply(normals[:, 0:1, :], normals[:, 1:2, :])
def H5(normals):
return tf.multiply(normals[:, 1:2, :], normals[:, 2:3, :])
def H6(normals):
return - tf.multiply(normals[:, 0:1, :], normals[:, 0:1, :]) \
- tf.multiply(normals[:, 1:2, :], normals[:, 1:2, :]) \
+ 2 * tf.multiply(normals[:, 2:3, :], normals[:, 2:3, :])
def H7(normals):
return tf.multiply(normals[:, 2:3, :], normals[:, 0:1, :])
def H8(normals):
return tf.multiply(normals[:, 0:1, :], normals[:, 0:1, :]) \
- tf.multiply(normals[:, 1:2, :], normals[:, 1:2, :])
def get_H(normals, mask):
"""
:param normals: (B,3,H,W)
:param mask: (B,1,H,W)
:return: (B,9,HW)
"""
B, C, H, W = normals.shape.as_list()
normals = tf.reshape(normals, [-1, C, H*W])
mask = tf.reshape(mask, [-1, 1, H*W])
return tf.multiply(tf.concat([H0(normals), H1(normals), H2(normals), H3(normals), H4(normals),
H5(normals), H6(normals), H7(normals), H8(normals)], 1), mask)
def get_lighting(normals, image, abd, mask, rm_graz=False, eps=1e-4):
"""
:param normals: BCHW
:param image: BCH1
:param abd: BCHW
:param mask: BCHW
:return: lighting with shape (B,9), LH with shape (B,1,H,W)
"""
## Remove normals at high grazing angle
if rm_graz:
mask_angle = tf.cast(tf.greater(normals[:, 2:3, :, :], 0.5), tf.float32)
mask = tf.multiply(mask, mask_angle)
image = (tf.multiply(image, mask) + 1.0) / 2 # transform to [0,1] for lighting estimation
B, C, H, W = image.shape.as_list()
image = tf.reshape(image, [-1, C, H*W])
image = tf.transpose(image, perm=[0, 2, 1])
A = get_H(normals, mask)
# Use offline estimated albedo
if abd is not None:
abd = tf.reshape(abd, [1, -1, H*W])
A = tf.multiply(A, abd)
A_t = tf.transpose(A, perm=[0, 2, 1])
AA_t = tf.matmul(A, A_t) + eps*tf.eye(9, name='lighting_inverse_eps')
# TODO: image rescale to [0,1]?
lighting = tf.squeeze(tf.matmul(tf.matmul(tf.matrix_inverse(AA_t), A), image), axis=2)
LH = tf.reshape(tf.matmul(tf.expand_dims(lighting, 1), A), [-1, 1, H, W])
return lighting, LH, mask
if __name__ == '__main__':
from PIL import Image
import numpy as np
print("Test for lighting")
normals = Image.open('../dataset/20170907/group1/high_quality_depth_n/frame_000001_n.png').convert('RGB')
image = Image.open('../dataset/20170907/group1/color_map/frame_000001.png').convert('L')
mask = Image.open('../dataset/20170907/group1/mask/frame_000001.png').convert('L')
normals = tf.convert_to_tensor(np.asarray(normals), dtype=tf.float32) / 255.0
image = tf.expand_dims(tf.convert_to_tensor(np.asarray(image), dtype=tf.float32), 0) / 255.0
mask = tf.expand_dims(tf.convert_to_tensor(np.asarray(mask), dtype=tf.float32), 0) / 255.0
normals = tf.expand_dims(tf.transpose(normals, perm=[2, 0, 1]), 0)
image = tf.expand_dims(image, 0)
mask = tf.expand_dims(mask, 0)
print(normals.shape, image.shape, mask.shape)
sess = tf.Session()
lighting, _ = sess.run(get_lighting(normals, image, None, mask))
print(lighting)
|
#Basic Calculator
#HackerRank Pythonista Contest
#Created by Brandon Morris 11/1/2014
x = float(input())
y = float(input())
print("%.2f" % (x + y))
print("%.2f" % (x - y))
print("%.2f" % (x * y))
print("%.2f" % (x / y))
print("%.2f" % (x // y)) |
T = int(input())
for i in range(1, T+1):
N = int(input())
A = list(map(int, input().split()))
diff = A[1] - A[0]
count = 2
maxcount = 2
for j in range(2, N):
if A[j] - A[j-1] == diff:
count += 1
maxcount = max(maxcount, count)
else:
diff = A[j] - A[j-1]
count = 2
print("Case #{}: {}".format(i, maxcount)) |
HTML_TABLE = """
<table class='center' height="50%" width="100%" align=center cellpadding ="25">
<tr>
<th><h2>Question</h2></th>
<th><h2>Answer</h2></th>
</tr>
{table_rows}
</table>
"""
TABLE_CSS = """.center {
margin-left: auto;
margin-right: auto;
}
"""
BUTTON_CSS = """
.mybutton {
left: 46%;
}
"""
|
"""
This script allows you to verify if the imagenet ILSVRC images you downloaded
are correct (i.e., images are not corrupted). You can run them in parallel if
you have multiple machines.
We found that there is one image (an image that contains a monkey) that is
actually a valid JPEG image, but cannot be read in python using our code (based
on PIL). If this happens in your case, open the file using gimp or any image
editing software, re-save it, and you should be good.
"""
from iceberk import mpi
import gflags, glob, logging, os, sys
from PIL import Image
gflags.DEFINE_string("train", "", "The root for the training data")
gflags.DEFINE_string("val", "", "The root for the validation data")
gflags.DEFINE_string("test", "", "The root for the testing data")
gflags.FLAGS(sys.argv)
FLAGS = gflags.FLAGS
mpi.log_level(logging.ERROR)
mpi.root_log_level(logging.INFO)
files = []
if mpi.is_root():
if FLAGS.train != "":
logging.info("Adding training images..")
files += glob.glob(os.path.join(FLAGS.train, '*', '*.JPEG'))
if FLAGS.val != "":
logging.info("Adding validation images..")
files += glob.glob(os.path.join(FLAGS.val, '*.JPEG'))
if FLAGS.test != "":
logging.info("Adding testing images..")
files += glob.glob(os.path.join(FLAGS.test, '*.JPEG'))
logging.info("A total of %d images to check" % (len(files)))
files = mpi.distribute_list(files)
logging.info('Validating...')
errornum = 0
for i, filename in enumerate(files):
try:
verify = Image.open(filename)
except Exception, e:
logging.error(filename)
errornum += 1
errornum = mpi.COMM.allreduce(errornum)
if errornum == 0:
logging.info("Done. No corrupted images found.")
else:
logging.info("Done. %d corrupted images found." % (errornum,))
|
#coding=utf-8
import time,sys,os,win32gui, win32ui, win32con,traceback
from sensetimebi_productstests.Sharedscript.SharedGetYamlConfigData import DataGetConfig
from PIL import Image
import pytesseract
class images_dispose(object):
def __init__(self):
'''
'''
getConfig = DataGetConfig()
self.images_path = getConfig.getConfig().get("images_path") # 获取批量添加图片地址
def get_filenames(self,path):
filenames = []
for files in os.listdir(path):
if files.endswith('jpg') or files.endswith('jpeg') or files.endswith('png') or files.endswith('JPG'):
file = os.path.join(path, files)
filenames.append(file) # 获取所有图片名List
return filenames
def count_img(self,path):
counts = self.get_filenames(path)
return len(counts)
def get_names(self,path):
filenames = self.get_filenames(path)
names = []
lengs = len(path)
for filename in filenames:
name = filename[lengs + 1:-4]
names.append(name)
return names
def imagesName(self, firstName):
imagesName = []
for i in range(1, 101):
name = firstName + str(i)
imagesName.append(name)
return imagesName
def rename(self, firstName):
# 原始图片路径
inames = self.imagesName(firstName)
len(inames)
# __path = 'E:\新建文件夹'
# 获取该路径下所有图片
fileList = os.listdir(self.images_path)
# print(filelist)
j = 0
for files in fileList:
# 原始路径
Olddir = os.path.join(self.images_path, files)
# print(Olddir)
filename_img = os.path.splitext(files)[0]
# print(filename_img)
filetype = os.path.splitext(files)[1]
# print(filetype)
# 需要存储的路径 a 是需要定义修改的文件名
Newdir = os.path.join(self.images_path, str(inames[j]) + filetype)
os.rename(Olddir, Newdir)
j += 1
time.sleep(1)
def images_str(self):
lenghts = len(self.images_path)
images = []
for fileimages in os.listdir(self.images_path):
if fileimages.endswith('jpg'):
image = os.path.join(self.images_path, fileimages)
name = image[lenghts + 1:]
images.append(name)
images = str(images) # 字符处理
images = images[1:-1] # 字符处理
images = images.replace("'", "\"") # 字符处理
images = images.replace(",", "") # 字符处理
time.sleep(1)
return images
def get_img_text(self,img_path):
image = Image.open(img_path)
text = pytesseract.image_to_string(image,lang = "eng",
config="--psm 6 --oem 3 -c tessedit-char-whitelist=0123456789").strip()
return text
if __name__ == '__main__':
dispose= images_dispose()
print(dispose.count_img("D:\\test\data1"))
|
#!/usr/bin/python
import os, glob, subprocess, sys
def clamp(v, mn, mx):
return min(mx, max(mn, v))
def mix(a, b, m):
return a * (1.0-m) + b * m
def smoothstep(edge0in, edge1in, xin):
edge1 = float(edge1in)
edge0 = float(edge0in)
x = float(xin)
# Scale, bias and saturate x to 0..1 range
ret = edge1
if edge1 > edge0:
x = clamp((x - edge0)/(edge1 - edge0), 0.0, 1.0);
# Evaluate polynomial
ret = x*x*(3 - 2*x);
return ret
def smoothlaunch(edge0, edge1, x):
return min(1.0,2.0*smoothstep(edge0, edge0+(edge1-edge0)*2, x))
newLeafName = None
sfx=""
sc = 1
if len(sys.argv) > 3:
print "\nUSAGE: juZoom.py"
sys.exit()
for arg in sys.argv[1:]:
if arg[:2] == "sc":
sc = float(arg.split("=")[1])
print "setting sc to", sc
elif arg[:3] == "sfx":
sfx = arg.split("=")[1]
print "setting sfx to", sfx
yCent = .15
cwd = os.getcwd()
pngPaths = glob.glob(cwd + "/*png")
pngPaths.sort()
info = subprocess.check_output(["identify", pngPaths[0]])
#os.system(sysCmd)
print "info:"
print info
resS = info.split()[2].split("x")
res = (int(resS[0]), int(resS[1]))
print "res from", pngPaths[0] + ":", res
yofs = res[1] * yCent
i = 1
scStart = 1
scEnd = 2.5
scStartFr = 1600
scEndFr = 3500
#scStartFr = 3333
#scEndFr = 3347
#fadeStartFr = 3180
#fadeEndFr = 3350
fadeStartFr = 3260
fadeEndFr = 3400
cropStr = "%dx%d+%d+%d" % (res[0], res[1], 0, 0)
#xsc = res[0] * scStart
#ysc = res[1] * scStart
#cropStr = "%dx%d+%d+%d" % (xsc, ysc, xsc*(1-scStart), ysc*(1-scStart))
for pngPath in pngPaths:
leaf = os.path.basename(pngPath)
leafSpl = leaf.split(".")
fr = leafSpl[-2]
#print "leafSpl pre", leafSpl, "sfx", sfx
leafSpl[-3] += sfx
#print "leafSpl pos", leafSpl, "sfx", sfx
leaf = ".".join(leafSpl)
prog = smoothlaunch(scStartFr, scEndFr, fr)
frSc = mix(scStart, scEnd*scStart, prog)
i += 1
print "cropStr", cropStr
fadeFl = smoothlaunch(fadeStartFr, fadeEndFr, fr)
fade = str(int(100*fadeFl*fadeFl))
cmd = "convert " + pngPath + (" -distort ScaleRotateTranslate '%d,%d %f 0' -crop " % (res[0]/2, res[1]*yCent, frSc)) + cropStr + " -brightness-contrast -" + fade + ",-" + fade + " -resize " + str(100*sc) + "% zm/" + leaf
#print "fr", fr, ("prog %.2f" % prog), ":", "*"*int(100*prog)
print "fr", fr, "prog", prog, ":", cmd
os.system(cmd)
|
# This file is part of the calculator_oop.py Task
# import Calculator class so we can inherit from it
from calculator_oop import Calculator
import math
# Create class that inherits from Calculator
class FuncCalculator(Calculator):
# Calculate area of circle (pi*radius^2) and round to 2 decimal points
def area_of_circle(self, radius):
area = math.pi * (radius ** 2)
return round(area, 2)
def area_of_square(self, side):
return side ** 2
def area_of_triangle(self, height, base):
return (height * base) / 2
functional = FuncCalculator()
print(functional.area_of_circle(5))
print(functional.area_of_square(3))
print(functional.area_of_triangle(5, 8))
# Methods inherited from Calculator class still work, but aren't automatically ran because of __name__ on other file
print(functional.Add(1, 5))
|
#Măriuca ţine evidenţa iepurilor din crescătorie. Ea îşi notează câţi iepuri sunt la
#începutul fiecărei luni, câţi au murit şi câţi s-au născut în cursul fiecăei luni. Puteţi să
#realizaţi un program care, primind aceste date, să afişeze la sfârşitul fiecărei luni câţi
#iepuri sunt în crescătorie? Exemplu : Date de intrare : nr. Iepuri la început de luna 10
#nr. iepuri morti 2 nr. iepuri nascuti 6 Date de ieşire : 14 iepuri.
num_inc=int(input("Dati numarul de iepuri la inceput de luna:"))
num_mor=int(input("Dati numarul de iepuri morti:"))
num_nas=int(input("Dati numarul de iepuri ce s-au nascut:"))
print("La sfarsit de luna in crescatorie sunt",num_inc-num_mor+num_nas,"iepuri") |
#George West
#14-10-14
#stars
number = int(input("How many stars do you want per row? "))
rows = int(input("How many rows do you want? "))
list1=''
for count in range(number):
list1= list1 + '*'
for count in range(rows):
print(list1)
|
from django.test import TestCase
from django.urls import reverse
from project_core.tests import database_population
class ChangelogTest(TestCase):
def setUp(self):
self._client_management = database_population.create_management_logged_client()
def test_get(self):
response = self._client_management.get(reverse('logged-changelog'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Version deployed')
self.assertContains(response, 'Changelog')
|
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = [], excludes = [])
msiOptions = dict(
add_to_path = True,
all_users = True
)
base = 'Console'
executables = [
Executable('nitropy.py', base=base)
]
setup(name='pynitrokey',
version = '0.4.0',
description = 'Nitrokey Python Tools',
options = dict(build_exe = buildOptions,
bdist_msi = msiOptions),
executables = executables)
|
# coding=utf-8
from typing import Text, List, Any, Optional
from abc import ABCMeta
from modelscript.base.issues import (
Issue,
LocalizedSourceIssue,
Level,
WithIssueList,
IssueBox)
import re
from modelscript.base.annotations import (
Annotations
)
DEBUG = 0
#TODO:4 The type ModelElement should be better defined
# Currently classes inherits from SourceElements which
# is not really appropriate.
class ModelElementIssue(Issue):
modelElement: 'ModelElement'
locationElement: 'ModelElement'
actualIssue: Issue
def __init__(self,
modelElement: 'ModelElement',
level: Level,
message: str,
code=None,
locationElement: 'ModelElement' = None) -> None:
self.modelElement = modelElement
self.locationElement = (
locationElement if locationElement is not None
else modelElement)
if DEBUG >= 2:
print(('ISM: %s ' % self.locationElement))
if hasattr(self.locationElement, 'lineNo'):
line_no = self.locationElement.lineNo
else:
line_no = None
if line_no is None:
if DEBUG >= 1:
print(('ISM: Unlocated Model Issue %s' % message))
issue = Issue(
origin=modelElement.model,
code=code,
level=level,
message=message)
else:
if DEBUG >= 1:
print(('ISM: Localized Model Issue at %s %s' % (
line_no,
message)))
issue = LocalizedSourceIssue(
code=code,
sourceFile=self.locationElement.model.source,
level=level,
message=message,
line=line_no,
)
self.actualIssue = issue
@property
def origin(self):
return self.actualIssue.origin
@property
def message(self):
return self.actualIssue.message
@property
def level(self):
return self.actualIssue.level
# @property
# def origin(self):
# return self.actualIssue.level
def str(self,
pattern=None,
styled=False): # not used, but in subclasses
return self.actualIssue.str(
pattern=pattern,
styled=styled)
# def str(self,
# pattern=None,
# displayOrigin=False,
# displayLocation=True,
# styled=False):
# if pattern is None:
# pattern=( Annotations.prefix
# +'{origin}:{kind}:{level}:{location}:{message}')
# text= pattern.format(
# origin=self.origin.label,
# message=self.message,
# kind=self.kind,
# level=self.level.str(),
# location='?')
# return self.level.style.do(
# text,
# styled=styled,
# )
class WithIssueModel(WithIssueList, metaclass=ABCMeta):
def __init__(self,
parents: List[IssueBox] = ()) -> None:
super(WithIssueModel, self).__init__(parents=parents)
from modelscript.megamodels import Megamodel
Megamodel.registerIssueBox(self._issueBox)
|
#!/usr/bin/env python
"""Run doctests"""
import doctest
import re
import sys
import unittest
from . import engine, fetchers
# From https://dirkjan.ochtman.nl/writing/2014/07/06/single-source-python-23-doctests.html
class Py23DocChecker(doctest.OutputChecker):
"""Python 2&3 compatible docstring checker"""
#pylint:disable=no-init
def check_output(self, want, got, optionflags):
if sys.version_info[0] > 2:
want = re.sub("u'(.*?)'", "'\\1'", want)
want = re.sub('u"(.*?)"', '"\\1"', want)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def load_tests(loader, tests, ignore): #pylint:disable=unused-argument
"""Docstring test loader"""
tests.addTests(doctest.DocTestSuite(engine, checker=Py23DocChecker()))
tests.addTests(doctest.DocTestSuite(fetchers, checker=Py23DocChecker()))
return tests
load_tests.__test__ = False
if __name__ == '__main__':
unittest.main()
|
import sys, os
import time
import urllib2
import simplejson
sys.path.append('/home/mednet/build')
os.environ['DJANGO_SETTINGS_MODULE'] ='quicksms.settings'
from quicksms.sms.models import Incoming,Outgoing,Pull
import pygsm
from datetime import datetime
modem = pygsm.GsmModem(port="/dev/ttyUSB0", baudrate=115200)
print "loaded modem"
while True:
# find the last time that the pull occured
# check for the special case that no pulls occured
ps = Pull.objects.all().order_by('pull_date')
if ps.count() == 0:
pull_date = datetime.now()
else:
pull_date = ps.pull_date
# send the incoming ( sms to be sent to kapab ) messages
msg = modem.next_message()
while msg:
print msg
#outgoing = Outgoing.objects.filter(date_sent=None)
#print outgoing
#for o in outgoing:
# print o
# modem.send_sms("+1%s" % (o.sender), o.text)
# o.sent_date = datetime.now()
# o.save()
# download new messages from other website
url_file = urllib2.urlopen("http://haiti.opensgi.net/mednet/api/0.1/rest/outsms/")
json = url_file.read()
obj = simplejson.loads(json)
print obj
#if Outgoing.objects.filter(
time.sleep(2)
|
from tools import shell_cmd
import json
from log import logger
import os
from config import config
import ConfigParser
def getremote_cpu_model(ip):
put_scrit_args = "scp %s/bin/get_cpu_mode.py %s:/tmp/" % (os.getcwd(), ip)
create_cpu_json_args = "python /tmp/get_cpu_mode.py %s " % ip
shell_cmd.shell_run(put_scrit_args, exec_mode='localhost')
shell_cmd.shell_run(create_cpu_json_args, host=ip, exec_mode='remote')
logger.debug(config.FILE_PATH["CPU_JSON_PATH"] % ip + "create file successfully")
def getremote_nova_conf(ip):
get_nova_args = "scp %s:/etc/nova/nova.conf %s "% (ip, config.FILE_PATH["NOVA_PATH"] % ip)
shell_cmd.shell_run(get_nova_args, exec_mode="localhost")
logger.debug(config.FILE_PATH["NOVA_PATH"] % ip + " create successfully ")
def compare_cpu_model(ip1, ip2):
cpu_list = list()
for ip in [ip1, ip2]:
getremote_cpu_model(ip)
file_path = config.FILE_PATH["CPU_JSON_PATH"] % ip
args = "scp %s:/%s /tmp/" % (ip, file_path)
shell_cmd.shell_run(args, exec_mode='localhost')
with open(file_path, "r") as load_f:
cpu_dict = json.load(load_f)
cpu_list.append(cpu_dict)
if cpu_list[0]["cpu_model"] == cpu_list[1]["cpu_model"]:
logger.debug(cpu_list)
return True, cpu_list
else:
logger.debug(cpu_list)
return False, cpu_list
def compare_nova_conf(ip1, ip2):
host_cpu_flag, host_cpu_model = compare_cpu_model(ip1, ip2)
nova_dict = {}
for ip in [ip1, ip2]:
getremote_nova_conf(ip)
conf = ConfigParser.ConfigParser()
conf.read(config.FILE_PATH["NOVA_PATH"] % ip)
cpu_mode = conf.get("libvirt", "cpu_mode")
nova_dict[ip] = cpu_mode
if host_cpu_flag:
if nova_dict[ip1] == nova_dict[ip2]:
logger.debug("host cpu model equal")
return True
else:
if nova_dict[ip1] == nova_dict[ip2] and not nova_dict[ip1] == "host-passthrough":
return True
else:
logger.debug(host_cpu_model)
logger.error("cpu model is Different, please modify nova_conf")
return False
|
{
'verbose': True,
'from_pickle': True,
'pickle_data': 'images-50000-(20, 20)-2016-11-28 10-05-07.471249.p',
'folder': '\Train',
'img_size': (20, 20),
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-21 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0073_auto_20180709_1001'),
]
operations = [
migrations.AlterField(
model_name='company',
name='employees',
field=models.CharField(blank=True, choices=[('1-10', '1-10'), ('11-50', '11-50'), ('51-200', '51-200'), ('201-500', '201-500'), ('501-1000', '501-1,000'), ('1001-10000', '1,001-10,000'), ('10001+', '10,001+')], default='', max_length=20),
),
]
|
import click
# import pandas as pd
# from datetime import datetime
# from faker import Faker
from snakeeyes.app import create_app
from snakeeyes.extensions import db
from snakeeyes.blueprints.contact2.models import Projects
from snakeeyes.blueprints.user2.models import User2
# Create an app context for the database connection.
app = create_app()
db.app = app
# fake = Faker()
def _log_status(count, model_label):
"""
Log the output of how many records were created.
:param count: Amount created
:type count: int
:param model_label: Name of the model
:type model_label: str
:return: None
"""
click.echo('Created {0} {1}'.format(count, model_label))
return None
def _bulk_insert(model, data, label):
"""
Bulk insert data to a specific model and log it. This is much more
efficient than adding 1 row at a time in a loop.
:param model: Model being affected
:type model: SQLAlchemy
:param data: Data to be saved
:type data: list
:param label: Label for the output
:type label: str
:param skip_delete: Optionally delete previous records
:type skip_delete: bool
:return: None
"""
with app.app_context():
model.query.delete()
db.session.commit()
db.engine.execute(model.__table__.insert(), data)
_log_status(model.query.count(), label)
return None
@click.group()
def cli():
""" Add items to the database. """
pass
@click.command()
def recommend():
"""
Generate fake users.
"""
recommender = []
# read projects file
#input_file = "/Users/riteshmehta/Documents/OrgRiseCode/wireframe/OrgRiseProjects.csv"
#df_projects = pd.read_csv(input_file, header=0)
# read employees file
#input_file = "/Users/riteshmehta/Documents/OrgRiseCode/wireframe/OrgRiseEmployees.csv"
#df_emp = pd.read_csv(input_file, header=0)
# showcase projects array
#projects = df_projects.values
projects = Projects.query \
.order_by(Projects.created_on.desc())
employees = User2.query \
.order_by(User2.created_on.desc())
#projects_out = projects.run()
for project in projects:
for employee in employees:
if project.skills == employee.skills:
recommender.append([project.email,employee.email])
click.echo('Recommendations {0}'.format(recommender))
# showcase employees array
# employees = df_emp.values
# for project in projects:
# for employee in employees:
# print(sum(project[1:-1] * employee[1:]))
# print(employee[1:])
# employeescore = sum(project[1:-1]*employee[1:])
# requiredscore = project[-1] * 0.8
# if employeescore >= requiredscore:
# recommender.append([project[0],employee[0]])
# click.echo('Recommendations {0}'.format(recommender))
# return _bulk_insert(User2, data, 'users2')
@click.command()
@click.pass_context
def all(ctx):
"""
Generate all data.
:param ctx:
:return: None
"""
ctx.invoke(recommend)
return None
cli.add_command(recommend)
cli.add_command(all)
|
while True :
byk = int(input())
if byk==0:
break
hls = []
hls1 = []
for i in range (byk):
kl = input()
hls1.append(kl)
data = kl.split(" ")
for j in range(len(data)):
if j ==0 :
continue
if data[j] not in hls :
hls.append(data[j])
hls.sort()
hls1.sort()
for i in range(len(hls)):
print(hls[i],end=" ")
for j in range(len(hls1)):
if hls[i] in hls1[j]:
print(hls1[j].split(" ")[0],end=" ")
print("")
print("")
|
'''
на экран по одному выводятся 20 вопросов типа:
Чему равно произведение чисел 4 и 9?
Множители (числа 2, 3, …, 9) задаются случайным образом с использованием
функции randint().
Пользователь должен ввести ответ. Этот ответ оценивается как правильный или нет
(проводится подсчет количества правильных ответов, окончательное значение
которого выводится на экран).
'''
# доделать - исправление ошибок
from random import randint
n = 10
t = 0
f = 0
for k in range(n):
x = randint(1, 10)
y = randint(1, 10)
print('what is result:',x,'*',y,'= ', end='')
z = int(input())
if z == x * y:
t += 1
print('% 6d '% z,'-> OK')
print(t,'of',n)
else:
f += 1
print('all is BAD...',z,' - NO')
s= round((t / n) * 100, 1)
print('final score:',s,'%', end='')
if s > 75:
print(' GOOD')
elif s < 75:
print(' BAD') |
import os
import re
import requests
import threading
import time
# url = 'https://www.77nt.com/50750/'
# url = 'https://www.77nt.com/50750/12068063.html'
# url = 'https://www.77nt.com/107094/34439391.html'
text_index_list = []
lock = threading.Lock()
def get_date(url):
html = requests.get(url)
html_bytes = html.content
html_str = html_bytes.decode()
text_index_list.append(url)
return html_str # 返回值为从服务器获得的数据,字符串类型
#
#
def get_topic(index_url, index_html):
topic_url_list = []
topic_block = re.findall(r'<dl>(.*?)</dl>', index_html, re.S)[0] # tips: 加个括号框起来,只返回括号里的数据
topic_url = re.findall(r'href="(.*?)"', topic_block, re.S)
for u in topic_url:
topic_url_list.append(index_url + u)
return topic_url_list # 得到小说的目录地址(URL)
# pycharm 遇到 \r 会回到开头,若是没有\n配合,会覆盖前面的内容
def get_article(article_html, index):
chapter_name = re.search(r'<h1>(.*)</h1>', article_html, re.S).group(1)
chapter_name = re.sub(r'[/\\:*?"<>|《》7nt.com]', '', chapter_name)
chapter_name = re.search(r'[\u4e00-\u9fa5]+\s(.*)', chapter_name, re.S).group(1)
# print(chapter_name)
chapter_name = str(index + 1) + '-' + chapter_name
try:
text_block = re.findall(r'<div class="con_show_l"><script type="text/javascript">show_d\(\);</script></div>('
r'.*?)<div', article_html, re.S)[0]
text_block = text_block.replace('\r<br />', "")
text_block = text_block.replace('<br />', '')
text_block = text_block.replace(' ', '\r\n')
return chapter_name, text_block # 得到小说章节名称和内容
except Exception as e:
print(chapter_name + '----出错----')
text_block = '无内容'
return chapter_name, text_block
def save(chapter, article, name, i):
if not os.path.exists(f"../小说/{name}"):
os.mkdir(f"../小说/{name}")
# print(chapter)
if not os.path.exists(os.path.join(f"../小说/{name}", chapter + '.txt')):
with open(os.path.join(f"../小说/{name}", chapter + '.txt'), 'w+', encoding='utf-8') as f:
f.write(chapter)
f.write('\r\n')
f.write(article)
def control(name, url):
index_html = get_date(url)
top_list = get_topic(url, index_html)
for i in top_list:
index = top_list.index(i) # 获得页面列表的索引,方便排序
if i not in text_index_list: # 判断网页是否已经读取过,防止线程重复进行
lock.acquire()
text_index_list.append(i)
lock.release()
article_html = get_date(i)
chapter_name, text_block = get_article(article_html, index)
save(chapter_name, text_block, name, i)
if __name__ == '__main__':
Aim = ('轮回乐园', 'https://www.77nt.com/98380/')
sub_t1 = threading.Thread(target=control, args=Aim, daemon=True)
sub_t2 = threading.Thread(target=control, args=Aim, daemon=True)
sub_t3 = threading.Thread(target=control, args=Aim, daemon=True)
sub_t4 = threading.Thread(target=control, args=Aim, daemon=True)
sub_t5 = threading.Thread(target=control, args=Aim, daemon=True)
print("采集开始")
sub_t1.start()
sub_t2.start()
sub_t3.start()
sub_t4.start()
sub_t5.start()
sub_t1.join()
sub_t2.join()
sub_t3.join()
sub_t4.join()
sub_t5.join()
print("采集完成") |
# _*_ coding: utf-8 _*_
__author__ = 'onewei'
__date__ = '2018/1/31 6:41'
import hashlib
def get_md5(url):
if isinstance(url, str):
url = url.encode("utf-8")
m = hashlib.md5()
m.update(url)
return m.hexdigest()
if __name__ == '__main__':
print(get_md5("http://jobbole.com"))
|
"""
!sudo ./darknet detect cfg/yolov3.cfg yolov3.weights data/dog.jpg
!sudo ./darknet detect cfg/yolov3-tiny.cfg yolov3-tiny_final.weights data/1.jpg
from google.colab import drive
drive.mount('/content/drive')
!cd /content/drive/My Drive
!sudo ./darknet detect cfg/yolov3-tiny.cfg yolov3-tiny_final.weights data/2.jpg -dont_show
!sudo ./darknet detector demo Hardhat/hardhat.data Hardhat/yolov3-tiny.cfg yolov3-tiny_final.weights Hardhat/engine4.mp4 -out_filename Hardhat/result2.avi -dont_show -thresh 0.05
import cv2 |
# -*- coding: utf-8 -*-
import os
import shutil
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
from xml.sax import saxutils
from xml.sax import ContentHandler
from xml.sax.saxutils import XMLGenerator
from xml.sax.saxutils import escape
from dicht_trefw import add_jaar, trefwoordenlijst, jarenlijst
from dicht_datapad import xmlpad
nieuwjaarxml = """\
<?xml version='1.0' encoding='iso-8859-1' ?>
<?xml-stylesheet href='http://dicht.magiokis.nl/dicht.css' type='text/css' ?>
<gedichten>
<laatste id='0' />
</gedichten>
"""
class FindList(ContentHandler):
"Bevat alle gedichten van een bepaald jaar (item == None)"
"of alle gedichten met een zoektekst"
"als er geen titel is moeten we de eerste regel hiervoor gebruiken"
def __init__(self, item=None, seltitel=False, seltext=False):
if item is None:
self.geef_alles = True
else:
self.search_item = item
self.sel_titel = seltitel
self.sel_tekst = seltext
self.geef_alles = False
# Initialize the flags to false
self.id_titels = []
self.titel = self.tekst = ""
self.in_titel = self.in_tekst = False
self.in_trefwoord = self.in_regel = False
self.founditem = self.itemfound = False
def startElement(self, name, attrs):
if name == 'gedicht':
item = attrs.get('id', None)
self.deze_item = [ item ]
self.titel = self.tekst = ""
self.in_titel = self.in_tekst = False
self.in_trefwoord = self.in_regel = False
self.got_titel = False
## self.trefwoorden = []
## self.alineas = []
elif name == 'titel':
self.in_titel = True
self.got_titel = True
elif name == 'tekst':
self.in_tekst = True
## elif name == 'trefwoord':
## self.in_trefwoord = 1
## self.trefwoord = ""
elif name == 'regel':
self.in_regel = True
self.regel = ""
if not self.got_titel:
self.got_titel = True
def characters(self, ch):
if self.in_titel:
self.titel += ch
if self.in_tekst:
self.tekst += ch
## elif self.in_trefwoord:
## self.trefwoord += ch
elif self.in_regel:
self.regel += ch
def endElement(self, name):
if name == 'gedicht':
if self.geef_alles:
self.deze_item.append(self.titel)
self.id_titels.append(self.deze_item)
else:
oktoappend = False
if self.sel_titel:
if self.titel != "":
if self.search_item.upper() in self.titel.upper():
oktoappend = True
if self.sel_tekst:
if self.tekst != "":
if self.search_item.upper() in self.tekst.upper():
oktoappend = True
if oktoappend:
self.deze_item.append(self.titel)
self.id_titels.append(self.deze_item)
elif name == 'titel':
if self.in_titel:
self.in_titel = False
elif name == 'tekst':
if self.in_tekst:
self.in_tekst = False
## elif name == 'trefwoord':
## if self.in_trefwoord:
## self.in_trefwoord = False
## self.trefwoorden.append(self.trefwoord)
elif name == 'regel':
if self.in_regel:
self.in_regel = False
if not self.got_titel:
self.titel = "(" + self.regel + ")"
class DichtLijst(object):
"""lijst alle gedichten van een bepaald jaar of met een bepaalde zoektekst
lijst alle gedicht's: id en titel
object DichtZoek: zoek de gedichts met een bepaalde string in de titel
object DichtZoekT: zoek de gedichts met een bepaalde string in de tekst"""
def __init__(self, jaar=None, item=None, type=None):
id_titels = []
self.search_item = item
self.search_type = type
if jaar is not None: # item and type should be None
if item is None and type is None:
self.fn = os.path.join(xmlpad, 'Dicht_{}.xml'.format(jaar))
self.parse()
for y in self.item_list.id_titels:
y.insert(0, jaar)
id_titels.append(y)
else:
dh = jarenlijst()
if len(dh) > 0:
for x in dh:
self.fn = os.path.join(xmlpad, 'Dicht_{}.xml'.format(x))
self.parse()
for y in self.item_list.id_titels:
y.insert(0, x)
id_titels.append(y)
self.id_titels = []
for x in id_titels:
e = []
for y in x:
e.append(y) # .encode('ISO-8859-1'))
self.id_titels.append(e)
def parse(self):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
if self.search_item == None:
dh = FindList()
elif self.search_type == "selTitel":
dh = FindList(self.search_item, seltitel=True)
elif self.search_type == "selTekst":
dh = FindList(self.search_item, seltext=True)
elif self.search_type == "selBeide":
dh = FindList(self.search_item, seltitel=True, seltext=True)
parser.setContentHandler(dh)
parser.parse(self.fn)
self.item_list = dh
class FindItem(ContentHandler):
"Bevat de gegevens van een bepaald gedicht: Titel, Tekst, gedicht"
def __init__(self, item):
self.search_item = item
# Initialize the flags to false
self.in_titel = self.in_tekst = self.in_regel = False
self.titel = self.tekst = ""
self.id_titels = []
## self.trefwoorden = []
self.gedicht = []
self.founditem = self.itemfound = False
def startElement(self, name, attrs):
if name == 'gedicht':
item = attrs.get('id', None)
if item == self.search_item:
self.founditem = True
elif name == 'titel':
if self.founditem:
self.in_titel = True
self.titel = ""
elif name == 'tekst':
if self.founditem:
self.in_tekst = True
self.tekst = ""
elif name == 'couplet':
if self.founditem:
if len(self.gedicht) > 0:
self.gedicht.append('')
elif name == 'regel':
if self.founditem:
self.in_regel = True
self.regel = ""
def characters(self, ch):
if self.in_titel:
self.titel += ch
elif self.in_tekst:
if ch[:1] == " ":
self.tekst += "" + ch.strip()
else:
self.tekst += ch
## elif self.inTrefwoordContent:
## self.Trefwoord = self.Trefwoord + ch
elif self.in_regel:
self.regel += ch
def endElement(self, name):
if name == 'gedicht':
if self.founditem:
self.itemfound = True
self.founditem = False
elif name == 'titel':
if self.in_titel:
self.in_titel = False
elif name == 'tekst':
if self.in_tekst:
self.in_tekst = False
if self.tekst[0] == "\n":
self.tekst = self.tekst[1:]
self.tekst = self.tekst.strip()
# self.Tekst = self.Tekst.rstrip()
if self.tekst[-1] == "\n":
self.tekst = self.tekst[:-1]
## elif name == 'trefwoord':
## if self.in_trefwoord:
## self.in_trefwoord = 0
## self.trefwoorden.append(self.trefwoord)
elif name == 'regel':
if self.in_regel:
self.in_regel = 0
self.gedicht.append(self.regel)
class UpdateItem(XMLGenerator):
"denktekst updaten"
# aan het eind zit een element genaamd laatste. Als het id van de tekst hoger is dan deze, dan laatste aanpassen.
"schrijf tekst weg in XML-document"
def __init__(self, item):
self.dh = item
self.search_item = self.dh.id
self.fh = open(self.dh.fn,'w')
self.founditem = self.itemfound = False
self.dontwrite = False
XMLGenerator.__init__(self,self.fh)
def startElement(self, name, attrs):
#-- kijk of we met de te wijzigen tekst bezig zijn
if name == 'gedicht':
item = attrs.get('id', None)
if item == str(self.search_item):
self.founditem = self.itemfound = True
elif name == 'laatste':
self.laatste = attrs.get('id', None)
#-- xml element (door)schrijven
if not self.founditem:
if name != 'laatste':
XMLGenerator.startElement(self, name, attrs)
else:
if name == 'gedicht':
XMLGenerator.startElement(self, name, attrs)
def characters(self, ch):
if not self.founditem:
if not self.dontwrite:
XMLGenerator.characters(self,ch)
def endElement(self, name):
if name == 'laatste':
dontwrite = False
elif name == 'gedichten':
if not self.itemfound:
self.startElement("gedicht", {"id": self.dh.id})
self.endElement("gedicht")
self._out.write("\n ")
self.laatste = self.dh.id
self._out.write(' <laatste id="%s" />\n' % self.laatste)
self._out.write('</gedichten>\n')
elif name == 'gedicht':
if not self.founditem:
self._out.write('</gedicht>')
else:
self._out.write("\n")
if self.dh.titel != "":
self._out.write(' <titel>%s</titel>\n' % self.dh.titel)
if self.dh.tekst != "":
self._out.write(' <tekst>\n%s\n </tekst>\n' % self.dh.tekst)
if len(self.dh.gedicht) > 0:
self._out.write(' <couplet>\n')
for x in self.dh.gedicht:
if x == "":
self._out.write(' </couplet>\n')
self._out.write(' <couplet>\n')
else:
self._out.write(' <regel>%s</regel>\n' % x)
self._out.write(' </couplet>\n')
## for x in self.dh.trefwoorden:
## self._out.write(' <trefwoord>%s</trefwoord>\n' % x)
self._out.write(' </gedicht>')
self.founditem = False
elif not self.founditem:
XMLGenerator.endElement(self, name)
def endDocument(self):
self.fh.close()
class FindLaatste(ContentHandler):
def __init__(self):
self.laatste = 0
def startElement(self, name, attrs):
if name == 'laatste':
t = attrs.get('id', None)
self.laatste = t
class DichtItem(object):
"""lijst alle gegevens van een bepaald 'gedicht'-item
zoek een gedicht met een bepaald id en maak een lijst
van alle trefwoorden en alinea's"""
def __init__(self, jaar, id_="0"):
self.fn = os.path.join(xmlpad, 'Dicht_{}.xml'.format(jaar))
self.jaar = jaar
self.id = id_
if id_ == "0":
self.new()
self.fno = '_old'.join(os.path.splitext(self.fn))
self.fnn = '_new'.join(os.path.splitext(self.fn))
self.titel = ""
self.trefwoorden = []
self.tekst = ""
self.gedicht = []
self.found = 0
def new(self):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = FindLaatste()
parser.setContentHandler(dh)
parser.parse(self.fn)
self.id = str(int(dh.laatste) + 1)
def read(self):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = FindItem(str(self.id))
parser.setContentHandler(dh)
parser.parse(self.fn)
self.found = dh.itemfound
if self.found:
self.titel = dh.titel # .encode('ISO-8859-1')
self.tekst = dh.tekst # .encode('ISO-8859-1')
for x in dh.gedicht:
self.gedicht.append(x) # .encode('ISO-8859-1'))
# trefwoorden worden niet bij het gedicht opgeslagen maar bij het trefwoord.
# hiervoor moeten we een trefwoodenlijst ophalen
for x in trefwoordenlijst((self.jaar, self.id))[1]:
self.trefwoorden.append(x) # .encode('ISO-8859-1'))
def write(self):
shutil.copyfile(self.fn, self.fno)
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = UpdateItem(self)
parser.setContentHandler(dh)
parser.parse(self.fno)
def add_trefw(self, item):
"voeg een trefwoord toe aan self.Trefwoorden"
self.trefwoorden.append(item)
def rem_trefw(self, item):
"haal een trefwoord weg uit self.Trefwoorden"
try:
self.trefwoorden.remove(item)
except ValueError:
pass
## def wijzig_gedicht(self, item):
## self.gedicht = []
## for x in item:
## self.gedicht.append(x)
def nieuw_jaar(jaar):
fn = os.path.join(xmlpad, 'Dicht_{}.xml'.format(jaar))
if not os.path.exists(fn):
with open(fn, 'w') as f:
f.write(nieuwjaarxml)
add_jaar(jaar)
|
detected_called_method = "cloned.put(buffer.duplicate().append('a'));"
n1 = detected_called_method.find('.')+1
print('n1: ' + str(n1))
m1 = detected_called_method[n1:]
k1 = m1.find('(')+1
print('m1: ' + str(m1))
print('k1: ' + str(m1[:k1-1]))
n2 = m1.find('.')+1
print('n2: ' + str(n2))
m2 = m1[n2:]
print('m2: ' + str(m2))
k2 = m2.find('(')+1
print('k1: ' + str(m2[:k2-1]))
n3 = m2.find('.')+1
print('n3: ' + str(n3))
m3 = m2[n3:]
print('m3: ' + str(m3))
k3 = m3.find('(')+1
print('k1: ' + str(m3[:k3-1]))
detected_called_method2 = "cloned.put(buffer.duplicate().append('a'));"
l = detected_called_method2.split('.')
print(l)
print(detected_called_method2.count('.'))
|
from .models import City
from .serializers import CitySerializer
from rest_framework import generics
class CityListIndex(generics.ListCreateAPIView):
queryset = City.objects.all()
serializer_class = CitySerializer
class CityElementShow(generics.RetrieveAPIView):
serializer_class = CitySerializer
lookup_field = 'pk'
def getObject(self):
pk = self.kwargs('pk')
return get_object_or_404(City, pk=pk)
# queryset = City.objects.all()
# serializer_class = CitySerializer
# TODO: put something in here to tell it to just return one object instead of a massive object of stuff. What's the show equiv of ListCreateAPIView? Then use this in urls.py in this folder. Also get a logger!!!!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.