text stringlengths 38 1.54M |
|---|
import sys, pygame
import time
import random
pygame.init()
white=(255,255,255)
size = width, height = 800, 600
speed = [20,0]
black = 0, 0, 0
car_width =128
screen = pygame.display.set_mode(size)
pygame.display.set_caption("my game")
clock=pygame.time.Clock()
ballimg = pygame.image.load("ball.png")
ballrect = ballimg.get_rect()
print ballrect
def car(x,y):
#screen.blit(ballimg,ballrect)
screen.blit(ballimg,(x,y))
def text_objects(text,font):
textSurface=font.render(text,True,black)
return textSurface,textSurface.get_rect()
def obstacles(thingx,thingy,thingw,thingh,color):
pygame.draw.rect(screen,color,[thingx,thingy,thingw,thingh])
def display_message(text):
textfont=pygame.font.Font('freesansbold.ttf',115)
TextSurf,TextRect=text_objects(text,textfont)
TextRect.center=((width/2),(height/2))
screen.blit(TextSurf,TextRect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
display_message("You crashed!")
def game_loop() :
car_xdist=width * 0.45
car_ydist=height *0.8
thing_x=random.randrange(0,width)
thing_y=-400
thing_w =100
thing_h =100
thing_speed =7
x_change =0
#crashed=False
game_exit=False
while not game_exit:
for event in pygame.event.get():
if event.type==pygame.QUIT:
game_exit=True
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change =-5
if event.key == pygame.K_RIGHT:
x_change =5
if event.type == pygame.KEYUP:
x_change =0
car_xdist+= x_change
#print(event)
screen.fill(white)
obstacles(thing_x,thing_y,thing_w,thing_h,black)
thing_y+= thing_speed
car(car_xdist,car_ydist)
if thing_y > height:
thing_y= 0 - thing_h
thing_x=random.randrange(0,width)
if car_xdist > (width - car_width)or car_xdist <0:
crash()
if car_ydist< thing_y +thing_h:
print "y crossover"
if car_xdist > thing_x and car_xdist < thing_x + thing_w or car_xdist +car_width > thing_x and car_xdist + car_width < thing_x +thing_w:
print "x crossover"
crash()
pygame.display.update() #if no parameter mentioned in paranthesis then it displays entire surface other wise it will update that specific paramater
clock.tick(60)
game_loop()
|
"""********************************************
* A sample Python script for creating / updating layers
* from Dataminr API responses.
Open questions:
- Should this be a push vs a bulk update (if the latter, how to paginate)?
- Are alertIds unique? s.t. we can query all alerts and ignore those that exist
- What's the relationship between lists and alerts?
- Will there be overlapping alerts between lists?
- What should list field values be for an alerts feature?
********************************************"""
import datetime
import logging
import os
import tempfile
import json
import shutil
import requests
from arcgis.gis import GIS, Item # https://developers.arcgis.com/python/
"""********************************************
* Utility functions
********************************************"""
def extract(obj, keys, **kwargs):
"""returns a nested object value for the specified keys"""
required = kwargs.pop('required', False)
default = kwargs.pop('default', None)
warn = kwargs.pop('warn', False)
o = obj
for i in range(0, len(keys)):
try:
o = o[keys[i]]
except (KeyError, IndexError):
if warn:
print('Warning key does not exist. Key: {0} in Keys: {1}'.format(keys[i], keys))
if required and default == None:
raise KeyError('Required key does not exist in object and no default')
return default
return o
def d_extract(obj, keys_delimited, **kwargs):
"""returns a nested object value for delimited keys"""
keys = keys_delimited.split('.')
return extract(obj, keys, **kwargs)
def row_to_geojson(row, lon_field, lat_field):
"""returns a geojson feature for a flat dictionary row"""
return {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [row[lat_field], row[lon_field]]
},
'properties': {**row}
}
def rows_to_geojson(rows, lon_field, lat_field):
"""returns a geojson feature collection for a list of flat dictionary rows"""
features = [row_to_geojson(r, lon_field, lat_field) for r in rows]
return {
'type': 'FeatureCollection',
'features': features
}
def date_to_ags(date):
"""Returns an ArcGIS-formatted date from a Python date object"""
tz = datetime.timezone.utc
return date.astimezone(tz).strftime('%m/%d/%Y %H:%M:%S')
def timestamp_to_ags(timestamp):
"""Returns an ArcGIS-formatted date from a ms timestamp"""
seconds = timestamp / 1000
date = datetime.datetime.fromtimestamp(seconds)
return date_to_ags(date)
"""********************************************
* Dataminr API response parsing functions
********************************************"""
def alert_to_row(obj):
"""returns a flat dictionary row parsed from a dataminr alert object"""
f_e = lambda keys, **kwargs: extract(obj, keys, warn=False, **kwargs)
f_de = lambda keys, **kwargs: d_extract(obj, keys, warn=False, **kwargs)
# simple JSON parsed values
props = {
'alert_id': f_de('alertId', required=True), #hardcoded later to identify alerts
'place': f_de('eventLocation.name'),
'alert_type': f_de('alertType.name'),
'alert_type_color': f_de('alertType.color'),
'caption': f_de('caption'),
'publisher_category': f_de('publisherCategory.name'),
'publisher_category_color': f_de('publisherCategory.color'),
'related_terms_query_url': f_de('relatedTermsQueryURL'),
'expand_alert_url': f_de('expandAlertURL'),
'post_text': f_de('post.text'),
'post_text_transl': f_de('post.translatedText'),
'lon': f_e(['eventLocation','coordinates',0]),
'lat': f_e(['eventLocation','coordinates',1])
}
# JSON parsed values with manipulations
event_time = f_de('eventTime') # hardcoded later to delete old events
if event_time and event_time > 0:
props['event_time'] = timestamp_to_ags(event_time)
post_time = f_de('post.timestamp')
if post_time and post_time > 0:
props['post_time'] = timestamp_to_ags(post_time)
channels = f_de('source.channels')
if channels:
props['source'] = ','.join(channels)
terms = f_de('relatedTerms')
if terms:
props['related_terms'] = ','.join([t['text'] for t in terms])
categories = f_de('categories')
if categories:
props['categories'] = ','.join([c['name'] for c in categories])
return props
def list_to_row(obj):
"""returns a flat dictionary parsed from a dataminr list object"""
f_de = lambda keys, **kwargs: d_extract(obj, keys, warn=True, **kwargs)
return {
'list_id': f_de('id', required=True),
'list_name': f_de('name'),
'list_color': f_de('properties.watchlistColor')
}
"""********************************************
* Dataminr API wrappers
********************************************"""
def get_auth_header(client_id, client_secret):
params = {'grant_type': 'api_key', 'client_id': client_id, 'client_secret': client_secret}
r = requests.post('https://gateway.dataminr.com/auth/2/token', params)
j = r.json()
return {'Authorization': 'dmauth {0}'.format(j['dmaToken'])}
def get_lists(headers):
r = requests.get('https://gateway.dataminr.com/account/2/get_lists', headers=headers)
j = r.json()
topics = d_extract(j, 'watchlists.TOPIC', default=[])
companies = d_extract(j, 'watchlists.COMPANY', default=[])
custom = d_extract(j, 'watchlists.CUSTOM', default=[])
lists = topics + companies + custom
return [list_to_row(l) for l in lists]
def get_alerts(headers, list_ids, **kwargs):
pagesize = kwargs.pop('pagesize', 100)
params = {'alertversion': 14, 'lists': list_ids, 'pagesize': pagesize, **kwargs}
r = requests.get('https://gateway.dataminr.com/alerts/2/get_alert', params=params, headers=headers)
alerts = r.json()
return [alert_to_row(a) for a in alerts]
"""********************************************
* ArcGIS functions
********************************************"""
def add_geojson(gis, geojson, **item_options):
"""Uploads geojson and returns the file item"""
# get default args
title = item_options.pop('title', 'Dataminr Sample')
tags = item_options.pop('tags', 'dataminr-poc')
# save geojson to tempfile and add as item
with tempfile.NamedTemporaryFile(mode="w", suffix='.geojson') as fp:
fp.write(json.dumps(geojson))
item = gis.content.add({
**item_options,
'type': 'GeoJson',
'title': title,
'tags': tags,
}, data=fp.name)
return item
def create_scratch_layer(gis, geojson, **item_options):
"""Publishes parsed dataminr geojson as a service and returns the resulting layer item
Note, use this to quickly add geojson with system default properties. In production,
it's easier to set desired properties on a template layer then use create_layer."""
item = add_geojson(gis, geojson, **item_options)
try:
lyr_item = item.publish()
except Exception as e:
item.delete()
logging.error('Error creating a new layer: {0}'.format(str(e)))
return
item.delete() # if not deleted next run will eror
# add a unique index for upsert operations
new_index = {
"name" : "Alert ID",
"fields" : "alert_id",
"isUnique" : True,
"description" : "Unique alert index"
}
add_dict = {"indexes" : [new_index]}
lyr = lyr_item.layers[0]
lyr.manager.add_to_definition(add_dict)
return lyr_item
def create_layer(gis, geojson, template_item):
"""Publishes parsed dataminr geojson as a service based on an existing
template item and returns the resulting layer item"""
try:
results = gis.content.clone_items([template_item], copy_data=False)
item = results[0]
lyr = item.layers[0]
except Exception as e:
logging.error('Error creating a new layer from template: {0}'.format(str(e)))
return append_to_layer(gis, lyr, geojson)
def append_to_layer(gis, layer, geojson):
"""Appends parsed dataminr geojson to an existing service
Note, this is the best approach for bulk updates in ArcGIS Online.
There are other options here, such as transactional edits
> https://github.com/mpayson/esri-partner-tools/blob/master/feature_layers/update_data.ipynb
"""
item = add_geojson(gis, geojson, title="Dataminr update")
result = layer
test = item.id
try:
result = layer.append(
item_id=test,
upload_format="geojson",
#upsert_matching_field="alert_id"
)
print(item)
print(result)
except Exception as e:
logging.error('Error appending data to existing layer: {0}'.format(str(e)))
finally:
item.delete() # if not deleted next run will eror
return result
def delete_before(lyr, date, field):
"""Deletes all features in a layer before a given date"""
where = "{0} < '{1}'".format(field, date_to_ags(date))
return lyr.delete_features(where=where)
def delete_before_days(lyr, number_days, field):
"""Deletes all features with dates before the specified
number of days back from today"""
dt = datetime.datetime.today() - datetime.timedelta(number_days)
return delete_before(lyr, dt, field)
"""********************************************
* The main show
********************************************"""
def run(gis_un, gis_pw, client_id, client_secret):
# if user has previously signed in to system, can also construct with token
# > gis = GIS(token="<access token>")
logging.info('Authenticating to GIS and Dataminr')
gis = GIS(username=gis_un, password=gis_pw)
headers = get_auth_header(client_id, client_secret)
# get alerts for each list, note alert ids need to be unique so only
# use the alert the first time it is returned from a list request
# TODO is this the best approach?
logging.info('Getting Dataminr data')
lists = get_lists(headers)
alerts = []
alert_ids = set()
for l in lists:
new_alerts = get_alerts(headers, str(l['list_id']), pagesize=100)
for a in new_alerts:
if a['alert_id'] in alert_ids:
continue
alerts.append({**a, **l})
alert_ids.add(a['alert_id'])
geojson = rows_to_geojson(alerts, 'lon', 'lat')
# check to see if a layer already exists, if so update, else create
# can alternatively save layer item ids to a store then reference
item = Item(gis, 'fcd1dad0687741ae87bac9966fa727e1')
lyr = item.layers[0]
delete_before_days(lyr,30,'event_time') #delete old features
logging.info('Updating existing layer {0} with {1} alerts'.format(item.id, len(alerts)))
append_to_layer(gis, lyr, geojson)
#search_items = gis.content.search('title:"test" AND type:"Feature Service"')
#if len(search_items) > 0:
# item = search_items[0]
# lyr = item.layers[0]
# delete_before_days(lyr, 30, 'event_time') # delete old features
# logging.info('Updating existing layer {0} with {1} alerts'.format(item.id, len(alerts)))
# append_to_layer(gis, lyr, geojson)
#else:
#logging.info('Creating a new layer with {0} alerts'.format(len(alerts)))
#create_scratch_layer(gis, geojson, title="Supply Chain Demo", tags="dataminr-poc")
# can alternatively create a layer from an existing item used as a template:
#template_item = Item(gis, 'fcd1dad0687741ae87bac9966fa727e1')
#item = create_layer(gis, geojson, template_item)
# here, the gis parameter should reference the account where the item lives
# in this case, the accounts are the same so the gis is the same
logging.info('Seemingly, a success')
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
ags_un = 'mliss_dataminr1'
ags_pw = '9IIwSp1!f1Hf'
dm_id = 'f2c34fe6e15f4537ab3312af9ce9f11f'
dm_se = 'eb106dfd83df4fb09331d08496c1b174'
run(ags_un, ags_pw, dm_id, dm_se)
|
import FWCore.ParameterSet.Config as cms
CSCTFObjectKeysOnline = cms.ESProducer("CSCTFObjectKeysOnlineProd",
onlineAuthentication = cms.string('.'),
subsystemLabel = cms.string('CSCTF'),
onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R'),
enableConfiguration = cms.bool( True ),
enablePtLut = cms.bool( True )
)
|
dani = 5
num = [11,22,33,44,55]
name = ["ina","pena", "gogudka", "razmarinapetkova"]
'''while body'''
while dani < num[0] :
print(dani)
'''increment the main variable'''
dani+=1 |
#!/usr/bin/python
from fortigateconf import FortiOSConf
import sys
import json
import pprint
import json
from argparse import Namespace
import logging
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger = logging.getLogger('fortinetconflib')
hdlr = logging.FileHandler('/var/tmp/testapi.log')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
logger.debug('often makes a very good meal of %s', 'visiting tourists')
fgt = FortiOSConf()
def json2obj(data):
return json.loads(data, object_hook=lambda d: Namespace(**d))
def main():
# Login to the FGT ip
fgt.debug('on')
fgt.login('192.168.40.8','admin','')
data = {
# "action" : "add",
"seq-num" :"8",
"dst": "10.10.30.0 255.255.255.0",
"device": "port2",
"gateway": "192.168.40.254",
}
pp = pprint.PrettyPrinter(indent=4)
d=json2obj(json.dumps(data))
pp.pprint(fgt.get_name_path_dict( vdom="root"))
# resp = fgt.schema('diagnose__tree__','debug', vdom="root")
# pp.pprint(resp)
resp = fgt.post('diagnose__tree__','debug', vdom="root", mkey="enable")
pp.pprint(resp)
fgt.logout()
if __name__ == '__main__':
main()
|
from __future__ import print_function
import connect4game as c4
import math as m
import time
neighbors = [-1,0,1]
#bot that takes board state c4 game and chooses a move
#return all valid moves for a given board state
def find_moves(board):
valid = []
for i in range(0,c4.width):
for j in range(0, c4.height):
if board.state[i][j]['player'] is 0 and (j is 0 or board.state[i][j-1]['player'] is not 0):
valid.append((i,j))
break
return valid
#evaluate the board position for a player, returns in the form [winning_move, position_strength]
def eval(board, player, valid_moves, off_turn = False):
strength = 0
#hard-coded variable to indicate if game is over
closed = False
#loop over every entry in the board
for i in range(0,c4.width):
for j in range(0,c4.height):
if board.state[i][j]['player'] is player:
#check all neighbors
for k in neighbors:
for l in neighbors:
#if neighboring space is empty, evaluate closeness c4
if not (l is 0 and k is 0) and not (i + k < 0 or i + k > c4.width-1 or j + l < 0 or j + l > c4.height-1 ):
filled = 1
viable = True
for slot in range(1,c4.n):
if i + slot*(-k) < 0 or i + slot*(-k) > c4.width-1 or j + slot*(-l) < 0 or j + slot*(-l) > c4.height-1 or board.state[i+slot*(-k)][j+slot*(-l)]['player'] is not player:
break
else:
filled+=1
for slot in range(1,c4.n-filled+1):
if i + slot*(k) < 0 or i + slot*(k) > c4.width-1 or j + slot*(l) < 0 or j + slot*(l) > c4.height-1 or board.state[i+slot*(k)][j+slot*(l)]['player'] is 1+(player%2):
viable = False
break
elif board.state[i+slot*(k)][j+slot*(l)]['player'] is player:
filled+=1
#score of individual position
'''
if filled is 3 and (i + k, j + l) in valid_moves):
return [(i + k, j + l), None]
else:
'''
#heuristic to weight towards fewer bigger sequences
if viable:
if filled is c4.n:
strength += 100000
closed = True
elif filled is c4.n-1 and off_turn and (i + k,j + l) in valid_moves:
strength += 10000
closed = True
strength += filled**1.5
return [closed, strength]
#returns tuple of best move and score
def choose(board, player, depth):
valid = find_moves(board)
best_move = None
best_score = None
for move in valid:
#hacky workaround using same object, lookup how to copy object when home
board.state[move[0]][move[1]]['player'] = player
new_valid = find_moves(board)
dif = None
score1 = eval(board,player,new_valid)
score2 = eval(board,1 + (player % 2), new_valid, True)
#if we have reached bottom of game tree or is losing position, return evaluation
if depth is 1:
dif = score1[1] - score2[1]
#if we have a winning move, stop recursing from this level
#if we have a losing move, stop going down this path but not this level
elif score1[0] or score2[0]:
dif = score1[1] - score2[1]
if dif > 0:
depth = 1
else:
opp = choose(board, 1 + (player % 2), 1)
board.state[opp[0][0]][opp[0][1]]['player'] = 1 + (player % 2)
dif = choose(board, player, depth-1)[1]
board.state[opp[0][0]][opp[0][1]]['player'] = 0
#reverse the hack
board.state[move[0]][move[1]]['player'] = 0
#print("Move: %d,%d Score: %f; " % (move[0],move[1],dif),end="")
if dif > best_score:
best_move = move
best_score = dif
print("")
ret = (best_move, best_score)
print("Choosing: %s" % repr(ret))
return (best_move, best_score)
def robbot(board, player = 1):
start = time.time()
choice = choose(board, player, 3)[0]
print("Time to choose: %f" % (time.time() - start))
return choice
|
from datetime import *
def get_datetime_object(string):
return datetime.strptime(string, '%Y-%m-%d %H:%M')
def key_func(i):
date_time = i.split('\t')[2][:-1]
return get_datetime_object(date_time)
def start():
with open('logs.txt', 'r') as f:
lines = f.readlines()
lines.sort(key=key_func)
with open('sort_results.txt', 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
start() |
import logging
from pathlib import Path
from yapsy.PluginManager import PluginManager
def get_module_logger():
return logging.getLogger(__name__)
THIS_PATH = Path(__file__).parent
modules_plugin_manager = PluginManager()
modules_plugin_manager.setPluginPlaces([str(THIS_PATH)])
modules_plugin_manager.collectPlugins()
def activate_all():
for plugin in modules_plugin_manager.getAllPlugins():
logging.getLogger(__name__).info(
"Loaded module plugin '%s'", plugin.name)
modules_plugin_manager.activatePluginByName(plugin.name)
def get_single_module(module):
logging.getLogger(__name__).info("Trying to load module '%s'", module.name)
try:
return modules_plugin_manager.getPluginByName(module.name).plugin_object.get(module)
except AttributeError:
get_module_logger().error("Could not load plugin '{}'".format(module.name))
raise # sys.exit()
def get(modules):
return [get_single_module(module) for module in modules]
def set_log_level(level):
logging.getLogger(__name__).setLevel(level)
for plugin in modules_plugin_manager.getAllPlugins():
plugin.plugin_object.set_log_level(level)
|
import requests
import json
import base64
def startlogo():
print('''
$$$$$$$$\ $$$$$$$$\ $$$$$$\ $$\
$$ _____| $$ _____| $$ __$$\ \__|
$$ | $$$$$$\ $$ | $$$$$$\ $$ / $$ | $$$$$$\ $$\
$$$$$\ $$ __$$\ $$$$$\ \____$$\ $$$$$$$$ |$$ __$$\ $$ |
$$ __| $$ / $$ |$$ __| $$$$$$$ |$$ __$$ |$$ / $$ |$$ |
$$ | $$ | $$ |$$ | $$ __$$ |$$ | $$ |$$ | $$ |$$ |
$$ | \$$$$$$ |$$ | \$$$$$$$ |$$ | $$ |$$$$$$$ |$$ |
\__| \______/ \__| \_______|\__| \__|$$ ____/ \__|
$$ |
$$ |
\__|
''')
def fofasearch(keyword):
email="" #email
key="" #key
target=base64.b64encode(keyword.encode('utf-8')).decode('utf-8')#先转换utf-8格式再加密再转回utf-8
page="2" #翻页数
size="100" #每页返回记录数
url="https://fofa.so/api/v1/search/all?email="+email+"&key="+key+"&qbase64="+target+"&size="+size
resp = requests.get(url)
data_model = json.loads(resp.text) #将请求到的json字符串解码为python对象
data_url=[]
fofaurl=open(keyword+'.txt','w+')
for results in data_model['results']: #取结果列表
for url in results[0:1]: #取结果列表中的每个列表的url,需要IP则改为[1:2]
data_url.append(url) #添加到列表末尾
for url in data_url:
fofaurl.write(url+"\n") #将列表中的url迭代保存到文件对象中并换行
fofaurl.close()
if __name__ == '__main__':
startlogo()
keyword=input('请输入检索内容:')
fofasearch(keyword)
print("检索结果已保存至%s.txt文件中"%(keyword))
|
#! /usr/bin/env python3
"""
You have the following data structure:
arp_table = [('10.220.88.1', '0062.ec29.70fe'),
('10.220.88.20', 'c89c.1dea.0eb6'),
('10.220.88.21', '1c6a.7aaf.576c'),
('10.220.88.28', '5254.aba8.9aea'),
('10.220.88.29', '5254.abbe.5b7b'),
('10.220.88.30', '5254.ab71.e119'),
('10.220.88.32', '5254.abc7.26aa'),
('10.220.88.33', '5254.ab3a.8d26'),
('10.220.88.35', '5254.abfb.af12'),
('10.220.88.37', '0001.00ff.0001'),
('10.220.88.38', '0002.00ff.0001'),
('10.220.88.39', '6464.9be8.08c8'),
('10.220.88.40', '001c.c4bf.826a'),
('10.220.88.41', '001b.7873.5634')]
Loop over this data structure and extract all of the MAC addresses.
Process all of the MAC addresses to get them into a standard format.
Print all of the new standardized MAC address to the screen.
The standardized format should be as follows:
00:62:EC:29:70:FE
The hex digits should be capitalized.
Additionally, there should be a colon between each octet in the MAC address.
"""
from __future__ import print_function, unicode_literals
from pprint import pprint
arp_table = [('10.220.88.1', '0062.ec29.70fe'),
('10.220.88.20', 'c89c.1dea.0eb6'),
('10.220.88.21', '1c6a.7aaf.576c'),
('10.220.88.28', '5254.aba8.9aea'),
('10.220.88.29', '5254.abbe.5b7b'),
('10.220.88.30', '5254.ab71.e119'),
('10.220.88.32', '5254.abc7.26aa'),
('10.220.88.33', '5254.ab3a.8d26'),
('10.220.88.35', '5254.abfb.af12'),
('10.220.88.37', '0001.00ff.0001'),
('10.220.88.38', '0002.00ff.0001'),
('10.220.88.39', '6464.9be8.08c8'),
('10.220.88.40', '001c.c4bf.826a'),
('10.220.88.41', '001b.7873.5634')]
macs_formatted = []
for arp in arp_table:
_, mac = arp
word1, word2, word3 = mac.split(".")
word1 = word1.upper()
word2 = word2.upper()
word3 = word3.upper()
macs_formatted.append("{}:{}:{}:{}:{}:{}".format(
word1[:2],
word1[2:],
word2[:2],
word2[2:],
word3[:2],
word3[2:],
))
pprint(macs_formatted) |
import os
import math
class Circle:
def __init__(self,r):
self.r = r
def area(self):
a = math.pi * (self.r ** 2)
return "Area: {0:.03f}".format(a)
def circumference(self):
c = 2 * math.pi * self.r
return "Circumference: {0:.03f}".format(c)
print("Please enter dimension of circle.")
radius = float(input("Radius: "))
cir = Circle(radius)
area = cir.area()
circumference = cir.circumference()
print(area)
print(circumference)
os.system("pause") |
# -*- coding: utf-8 -*-
"""
myads_service.models
~~~~~~~~~~~~~~~~~~~~~
Models for the users (users) of AdsWS
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import Column, String, Text
from adsmutils import UTCDateTime
import json
import logging
Base = declarative_base()
class MutableDict(Mutable, dict):
"""
By default, SQLAlchemy only tracks changes of the value itself, which works
"as expected" for simple values, such as ints and strings, but not dicts.
http://stackoverflow.com/questions/25300447/
using-list-on-postgresql-json-type-with-sqlalchemy
"""
@classmethod
def coerce(cls, key, value):
"""
Convert plain dictionaries to MutableDict.
"""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"""
Detect dictionary set events and emit change events.
"""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""
Detect dictionary del events and emit change events.
"""
dict.__delitem__(self, key)
self.changed()
def setdefault(self, key, value):
"""
Detect dictionary setdefault events and emit change events
"""
dict.setdefault(self, key, value)
self.changed()
def update(self, subdict):
"""
Detect dictionary update events and emit change events
"""
dict.update(self, subdict)
self.changed()
def pop(self, key, default):
"""
Detect dictionary pop events and emit change events
:param key: key to pop
:param default: default if key does not exist
:return: the item under the given key
"""
dict.pop(self, key, default)
self.changed()
class User(Base):
__tablename__ = 'users'
orcid_id = Column(String(255), primary_key=True)
access_token = Column(String(255))
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
profile = Column(Text)
info = Column(Text)
def toJSON(self):
"""Returns value formatted as python dict."""
return {
'orcid_id': self.orcid_id,
'access_token': self.access_token,
'created': self.created and self.created.isoformat() or None,
'updated': self.updated and self.updated.isoformat() or None,
'profile': self.profile and json.loads(self.profile) or None,
'info': self.info and json.loads(self.info) or None
}
class Profile(Base):
__tablename__ = 'profile'
orcid_id = Column(String(255), primary_key=True)
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
bibcode = Column(MutableDict.as_mutable(JSON), default={})
bib_status = ['verified', 'pending', 'rejected']
nonbib_status = ['not in ADS']
keys = ['status', 'title', 'pubyear', 'pubmonth']
def get_bibcodes(self):
"""
Returns the bibcodes of the ORCID profile
"""
bibcodes, statuses = self.find_nested(self.bibcode, 'status', self.bib_status)
return bibcodes, statuses
def get_non_bibcodes(self):
"""
Returns the non-ADS records of the ORCID profile
"""
non_bibcodes, status = self.find_nested(self.bibcode, 'status', self.nonbib_status)
return non_bibcodes
def get_records(self):
"""
Returns all records from an ORCID profile
"""
return self.bibcode
def add_records(self, records):
"""
Adds a record to the bibcode field, first making sure it has the appropriate nested dict
:param records: dict of dicts of bibcodes and non-bibcodes
"""
if not self.bibcode:
self.bibcode = {}
for r in records:
for k in self.keys:
tmp = records[r].setdefault(k, None)
self.bibcode.update(records)
def remove_bibcodes(self, bibcodes):
"""
Removes a bibcode(s) from the bibcode field.
Given the way in which bibcodes are stored may change, it seems simpler
to keep the method of adding/removing in a small wrapper so that only
one location needs to be modified (or YAGNI?).
:param bibcodes: list of bibcodes
"""
[self.bibcode.pop(key, None) for key in bibcodes]
def get_nested(self, dictionary, nested_key):
"""Get all values from the nested dictionary for a given nested key"""
keys = dictionary.keys()
vals = []
for key in keys:
vals.append(dictionary[key].setdefault(nested_key, None))
return vals
def find_nested(self, dictionary, nested_key, nested_value):
"""Get all top-level keys from a nested dictionary for a given list of nested values
belonging to a given nested key
:param dictionary - nested dictionary to search; searches one level deep
:param nested_key - key within nested dictionary to search for
:param nested_value - list (or string or number) of acceptable values to search for within the
given nested_key
:return good_keys - list of top-level keys with a matching nested value to the given nested key
:return good_values - list of the value (from nested_value) retrieved
"""
if type(nested_value) is not list:
nested_value = [nested_value]
keys = dictionary.keys()
good_keys = []
good_values = []
for key in keys:
if dictionary[key].get(nested_key,'') in nested_value:
good_keys.append(key)
good_values.append(dictionary[key].get(nested_key))
return good_keys, good_values
def update_status(self, keys, status):
"""
Update the status for a given key or keys
:param keys: str or list
:param status: str
:return: None
"""
if type(keys) is not list:
keys = [keys]
if not isinstance(status, str):
logging.warning('Status to update for record %s, ORCID %s must be passed as a string'.
format(keys, self.orcid_id))
for key in keys:
if key in self.bibcode:
self.bibcode[key]['status'] = status
self.bibcode.changed()
else:
logging.warning('Record %s not in profile for %s'.format(key, self.orcid_id))
def get_status(self, keys):
"""
For a given set of records, return the statuses
:param keys: str or list
:return: good_keys - list of keys that exist in the set
:return: statuses - list of statuses of good_keys
"""
if type(keys) is not list:
keys = [keys]
good_keys = []
statuses = []
for key in keys:
if key in self.bibcode:
good_keys.append(key)
statuses.append(self.bibcode[key]['status'])
return good_keys, statuses |
from django.shortcuts import get_object_or_404
from rest_framework import status, generics
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from application import serializers
from application import models
class HealthCheck(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None):
return Response(status=status.HTTP_200_OK)
class ReadinessCheck(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None):
return Response(status=status.HTTP_200_OK)
class Collections(generics.ListAPIView):
permission_classes = (AllowAny,)
queryset = models.Collections.objects.all()
serializer_class = serializers.CollectionsSerializer
filterset_fields = ['visible_on_frontpage']
class CollectionsSingle(generics.RetrieveAPIView):
permission_classes = (AllowAny,)
queryset = models.Collections.objects.all()
serializer_class = serializers.CollectionsSerializer
def get_object(self):
draft_requested = self.request.query_params.get('draft') == 'true'
collection_page = get_object_or_404(self.get_queryset(), slug=self.kwargs.get('slug'))
if draft_requested:
return collection_page.get_latest_revision_as_page()
else:
return collection_page
class LandingPages(generics.ListAPIView):
permission_classes = (AllowAny,)
queryset = models.LandingPages.objects.filter(live=True)
serializer_class = serializers.LandingPagesSerializer
class LandingPagesSingle(generics.RetrieveAPIView):
permission_classes = (AllowAny,)
queryset = models.LandingPages.objects.all()
serializer_class = serializers.LandingPagesSerializer
def get_object(self):
draft_requested = self.request.query_params.get('draft') == 'true'
landing_page = get_object_or_404(self.get_queryset(), pk=self.kwargs.get('pk'))
if draft_requested:
return landing_page.get_latest_revision_as_page()
else:
return landing_page
class AboutPage(generics.ListAPIView):
permission_classes = (AllowAny,)
queryset = models.AboutPage.objects.filter(live=True)
serializer_class = serializers.AboutPageSerializer
class AccessibilityPage(generics.ListAPIView):
permission_classes = (AllowAny,)
queryset = models.AccessibilityPage.objects.filter(live=True)
serializer_class = serializers.AccessibilityPageSerializer
|
from django.contrib import admin
from .models import *
# Register your models here.
class AdminDeliveryboylctn(admin.ModelAdmin):
list_display = ['ename']
class AdminOrders(admin.ModelAdmin):
list_display = ['order_name']
admin.site.register(Deliveryboylctn,AdminDeliveryboylctn)
admin.site.register(Orders,AdminOrders)
|
"""
Format experimental results.
"""
# Author: Georgios Douzas <gdouzas@icloud.com>
# License: MIT
import pandas as pd
METRICS_NAMES_MAPPING = {'roc_auc': 'AUC', 'f1': 'F-SCORE', 'geometric_mean_score': 'G-MEAN'}
def generate_mean_std_tbl(experiment, name):
"""Generate table that combines mean and sem values."""
mean_vals, std_vals = getattr(experiment, f'mean_{name}_'), getattr(experiment, f'sem_{name}_')
index = mean_vals.iloc[:, :2]
scores = mean_vals.iloc[:, 2:].applymap('{:,.2f}'.format) + r" $\pm$ " + std_vals.iloc[:, 2:].applymap('{:,.2f}'.format)
tbl = pd.concat([index, scores], axis=1)
tbl['Metric'] = tbl['Metric'].apply(lambda metric: METRICS_NAMES_MAPPING[metric])
return tbl
def generate_pvalues_tbl(experiment, name):
"""Format p-values."""
tbl = getattr(experiment, f'{name}_test_').copy()
for name in tbl.dtypes[tbl.dtypes == float].index:
tbl[name] = tbl[name].apply(lambda pvalue: '%.1e' % pvalue)
tbl['Metric'] = tbl['Metric'].apply(lambda metric: METRICS_NAMES_MAPPING[metric])
return tbl
|
class Solution:
def findDuplicate(self, nums):
blankArray={}
for i in nums:
if i in blankArray:
print(i)
print(blankArray)
blankArray[i]=""
nums=[1,3,4,2,2]
Solution().findDuplicate(nums) |
THANK_YOU_MESSAGE = "Vielen Dank für deine Spende!"
CONFIRMATION_AMOUNT = "Ich spende: 6,00 €"
class TestDonationPage:
def test_successful_five_euro_donation(self, donation_page, customer):
donation_page.click_accept_cookies_button()
confirmation = donation_page.fill_form_with_valid_data_and_submit(customer, 5)
assert confirmation.amount == CONFIRMATION_AMOUNT
payment = confirmation.proceed()
assert payment.thank_you_message == THANK_YOU_MESSAGE
def test_payment_methods_dimension_for_mobile_ui(self, mobile_donation_page):
label_width = mobile_donation_page.get_width_of_payment_label()
payment_method_width = mobile_donation_page.get_width_of_payment_methods()
assert payment_method_width == label_width
|
import socket
import math
TCP_IP = '127.0.0.1'
TCP_PORT = 9003
BUFFER_SIZE = 20
ndatos = 0
suma = 0
multp = 1
Datos = []
clientes = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(3)
while 1:
print "Esperando conexion"
conn, addr = s.accept()
data = conn.recv(BUFFER_SIZE)
print data
Dato1, Dato2 = data.split(" ")
suma = int(Dato1) + int(Dato2)
resta = int(Dato1) - int(Dato2)
multiplicacion = int(Dato1) * int(Dato2)
potencia = pow(int(Dato1), int(Dato2))
#Validacion
#Division
if Dato2 == '0':
division='Error'
else:
division = int(Dato1) / int(Dato2)
#raiz
if Dato2 == '0' or Dato1 < 0:
raiz = "Error"
else :
raiz = pow( int(Dato1) , (1 / int(Dato2)))
conn.send( str(suma)+","+str(resta)+","+str(multiplicacion)+","+str(division)+","+str(potencia)+","+str(raiz))
if clientes == 2:
conn.send("3")
break
clientes+=1
conn.close()
|
import argparse
import numpy as np
import matlab.engine
from scipy.io import savemat
import os
from time import time
def main(args):
start_time = time()
eng = matlab.engine.start_matlab()
eng.addpath(r'matlab_engine')
eng.addpath(r'matlab_engine/weight_utils')
eng.addpath(r'matlab_engine/error_messages')
eng.addpath(r'examples/saved_weights')
network = {
'alpha': matlab.double([args.alpha]),
'beta': matlab.double([args.beta]),
'weight_path': args.weight_path,
}
lip_params = {
'formulation': args.form,
'split': matlab.logical([args.split]),
'parallel': matlab.logical([args.parallel]),
'verbose': matlab.logical([args.verbose]),
'split_size': matlab.double([args.split_size]),
'num_neurons': matlab.double([args.num_neurons]),
'num_workers': matlab.double([args.num_workers]),
'num_dec_vars': matlab.double([args.num_decision_vars])
}
L = eng.solve_LipSDP(network, lip_params, nargout=1)
print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of {L:.3f}')
print(f'Total time: {float(time() - start_time):.5} seconds')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--form',
default='neuron',
const='neuron',
nargs='?',
choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'),
help='LipSDP formulation to use')
parser.add_argument('-v', '--verbose',
action='store_true',
help='prints CVX output from solve if supplied')
parser.add_argument('--alpha',
type=float,
default=0,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--beta',
type=float,
default=1,
nargs=1,
help='lower bound for slope restriction bound')
parser.add_argument('--num-neurons',
type=int,
default=100,
nargs=1,
help='number of neurons to couple for LipSDP-Network-rand formulation')
parser.add_argument('--split',
action='store_true',
help='splits network into subnetworks for more efficient solving if supplied')
parser.add_argument('--parallel',
action='store_true',
help='parallelizes solving for split formulations if supplied')
parser.add_argument('--split-size',
type=int,
default=2,
nargs=1,
help='number of layers in each subnetwork for splitting formulations')
parser.add_argument('--num-workers',
type=int,
default=0,
nargs=1,
help='number of workers for parallelization of splitting formulations')
parser.add_argument('--num-decision-vars',
type=int,
default=10,
nargs=1,
help='specify number of decision variables to be used for LipSDP')
parser.add_argument('--weight-path',
type=str,
required=True,
nargs=1,
help='path of weights corresponding to trained neural network model')
args = parser.parse_args()
if args.parallel is True and args.num_workers[0] < 1:
raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.')
if args.split is True and args.split_size[0] < 1:
raise ValueError('When you use --split, --split-size must be an integer >= 1.')
main(args)
|
# Run Time: 2.750
def cycle_length(n):
if n == 1:
return 1
elif n & 1 == 1:
return 1 + cycle_length(3 * n + 1)
else:
return 1 + cycle_length(n >> 1)
cache = {}
for i in range(1, 10000):
cache[i] = cycle_length(i)
while True:
try:
i, j = input().split()
except EOFError:
break
i = int(i)
j = int(j)
print("{0} {1}".format(i, j), end=" ")
if i > j:
i, j = j, i
max_cycle_length = 0
for number in range(i, j + 1):
if number in cache:
current_cycle_length = cache[number]
else:
current_cycle_length = cycle_length(number)
cache[number] = current_cycle_length
if current_cycle_length > max_cycle_length:
max_cycle_length = current_cycle_length
print(max_cycle_length)
|
import cPickle
import numpy as np
from scipy.stats import pearsonr
z_scores = np.load('raw_z_npdump.dump')
channels_data = []
def calculate_pearson(start):
for index in range(64):
if start < index:
channels_data.append(pearsonr(z_scores[start], z_scores[index])[0])
def dump_z_score_pearson():
with open('z_score_pearson_dump.dump', 'wb') as f:
cPickle.dump(channels_data, f)
if __name__ == '__main__':
for i in range(64):
print 'Calculating Channel %s' % i
calculate_pearson(i)
dump_z_score_pearson()
|
from threading import Thread
import time
import queue;
class device(Thread):
def __init__(self, name, cpu, bandwidth, QIN, QOUT):
# Call the Thread class's init function
Thread.__init__(self)
self.name = name;
self.VFs = queue.Queue();
self.cpu = cpu;
self.IN = QIN;
self.OUT = QOUT;
self.bandwidth = bandwidth;
def run(self):
while True:
if (not self.VFs.empty()):
print('new VF');
v = self.VFs.get();
v.start();
#time.sleep(self.data/self.speed);
#self.OUT[self.name].put(self.data);
def addVF(self, newVF):
newVF.setUndertakeCPU(self.cpu);
self.VFs.put(newVF);
print('VF added')
def removeVF(self, oldVF):
self.VFs.remove(oldVF);
def getCPU(self):
return self.cpu;
|
## # # !/usr/bin/env python2 # Chimera's python is used...
#------------------------------------------------------------------------------
# file: autoChimeraMinimization.py
# author: Jon David
# date: Monday, July 6, 2020
# description:
# This is a Chimera script. Automates Chimera's structure minimization
# process for a single PDB.
#------------------------------------------------------------------------------
# usage:
# chimera --nogui --script "autoChimeraMinimization.py <inPDB> <outPDB>"
#------------------------------------------------------------------------------
# notes:
# Chimera documentation:
# cgl.ucsf.edu/chimera/current/docs/UserGuide/framecontrib.html
#------------------------------------------------------------------------------
import sys
import chimera
import Midas
from DockPrep import prep
from chimera import runCommand
USAGE = '''
USAGE: automateMinimization.py <inputPDB> <outputPDB>
Where,
inputPDB :: an existing PDB file to minimize
outputPDB :: PDB file to write the minimized structure to
'''
def runLoadPDB(inPDB):
runCommand("open {}".format(inPDB))
return
def runAddH():
runCommand('addh inIsolation true hbond true useHisName true')
return
def runAddCharge():
# runCommand('addcharge std chargeModel ff14SB')
runCommand('addcharge std chargeModel 99sb')
# runCommand('addcharge nonstd method am1-bcc')
#runCommand('addcharge nonstd method am1-bcc')
return
def runDockPrep():
models = chimera.openModels.list(modelTypes=[chimera.Molecule])
prep(models)
#prep(models, hisScheme=None, mutateMSE=True, mutate5BU=True, mutateUMS=True,
# mutateUMS=True, mutateCSL=True, delSolvent=True, delIons=False,
# delLigands=False, delAltLocs=True, incompleteSideChains='rotamers',
# nongui=False, rotamerLib='Dunbrack', rotamerPreserve=True,
# memorize=False, memorizeName=None)
return
def runMinimization():
runCommand('minimize nsteps 100 stepsize 0.02 cgsteps 10 cgstepsize 0.02 interval 10')
return
def runSavePDB(outfPDB):
runCommand("write #0 {}".format(outfPDB))
return
# TODO: set where to write replyLog
# TODO: load PDB
## sys.argv[0] name of script
print( "(debug) script name: ".format(sys.argv[0]) )
if len(sys.argv) < 3:
print(USAGE)
print( "(debug) arg1: {}".format(sys.argv[1]) )
print( "(debug) arg2: {}".format(sys.argv[2]) )
inPDB = sys.argv[1]
outPDB = sys.argv[2]
runLoadPDB(inPDB)
runAddH()
runAddCharge()
runDockPrep()
runMinimization()
runSavePDB(outPDB)
|
"App configuration"
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://root:omokhudu@localhost/flask-spa"
DEBUG = True
|
import os
from store.models import CartItem, Collection, Order, OrderItem, Product, Cart
from django.urls import reverse
from rest_framework import status
from model_bakery import baker
import pytest
@pytest.fixture
def cart_id():
return baker.make(Cart).id
@pytest.fixture
def get_cart(api_client):
def action(cart_id):
return api_client.get(reverse('cart-detail', args=[cart_id]))
return action
@pytest.fixture
def add_to_cart(api_client):
def action(cart_id, product_id, quantity):
endpoint = reverse('cart-item-list', args=[cart_id])
response = api_client.post(
endpoint, {'product_id': product_id, 'quantity': quantity})
return response
return action
@pytest.fixture
def update_cart_item(api_client):
def action(cart_id, item_id, quantity):
endpoint = reverse('cart-item-detail', args=[cart_id, item_id])
return api_client.patch(endpoint, {'quantity': quantity})
return action
@pytest.fixture
def delete_cart_item(api_client):
def action(cart_id, item_id):
endpoint = reverse('cart-item-detail', args=[cart_id, item_id])
return api_client.delete(endpoint)
return action
class TestCarts:
def test_get_is_not_allowed(self, api_client):
response = api_client.get(reverse('cart-list'))
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
@pytest.mark.django_db
def test_post_creates_a_cart(self, api_client):
response = api_client.post(reverse('cart-list'))
assert response.status_code == status.HTTP_201_CREATED
assert response.data['id'] is not None
assert Cart.objects.filter(pk=response.data['id']).exists()
class TestAddToCart:
@pytest.mark.django_db
@pytest.mark.parametrize('product_id', ['', 0])
def test_if_product_is_invalid_returns_400(self, cart_id, add_to_cart, product_id):
response = add_to_cart(cart_id, product_id, 1)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data['product_id'] is not None
@pytest.mark.django_db
@pytest.mark.parametrize('quantity', ['', 0])
def test_if_quantity_is_invalid_returns_400(self, cart_id, add_to_cart, quantity):
product = baker.make(Product)
response = add_to_cart(cart_id, product.id, quantity)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data['quantity'] is not None
@pytest.mark.django_db
def test_if_data_is_valid_creates_a_cart_item(self, cart_id, add_to_cart):
product = baker.make(Product)
quantity = 1
response = add_to_cart(cart_id, product.id, quantity)
assert response.status_code == status.HTTP_201_CREATED
assert response.data['id'] is not None
assert CartItem.objects.filter(pk=response.data['id']).exists()
@pytest.mark.django_db
def test_if_product_exists_in_cart_quantity_is_increased(self, cart_id, add_to_cart):
product = baker.make(Product)
response = add_to_cart(cart_id, product.id, 1)
response = add_to_cart(cart_id, product.id, 1)
cart_item = CartItem.objects.get(pk=response.data['id'])
assert cart_item.quantity == 2
class TestUpdateCartItemQuantity:
@pytest.mark.django_db
def test_put_not_allowed(self, api_client):
response = api_client.put(reverse('cart-item-detail', args=[1, 1]))
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
@pytest.mark.django_db
def test_if_item_doesnt_exist_returns_404(self, update_cart_item):
item = baker.make(CartItem)
item.delete()
response = update_cart_item(item.cart_id, item.id, 1)
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db
@pytest.mark.parametrize('quantity', ['', 0, -1])
def test_if_quantity_is_invalid_returns_400(self, update_cart_item, quantity):
item = baker.make(CartItem)
response = update_cart_item(item.cart_id, item.id, quantity)
assert response.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_if_quantity_is_valid_cart_item_is_updated(self, update_cart_item):
item = baker.make(CartItem)
response = update_cart_item(item.cart_id, item.id, 1)
assert response.status_code == status.HTTP_200_OK
item = CartItem.objects.get(pk=item.id)
assert item.quantity == 1
class TestRemoveFromCart:
@pytest.mark.django_db
def test_if_item_doesnt_exist_returns_404(self, delete_cart_item):
item = baker.make(CartItem)
item.delete()
response = delete_cart_item(item.cart_id, item.id)
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db
def test_if_item_exists_it_is_deleted(self, delete_cart_item):
item = baker.make(CartItem)
response = delete_cart_item(item.cart_id, item.id)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert not CartItem.objects.filter(pk=item.id).exists()
class TestGetCart:
@pytest.mark.django_db
def test_if_cart_doesnt_exist_returns_404(self, get_cart):
cart = baker.make(Cart)
cart.delete()
response = get_cart(cart.id)
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db
def test_if_cart_exists_it_is_returned(self, get_cart):
unit_price = 10
quantity = 2
cart = baker.make(Cart)
product = baker.make(Product, unit_price=unit_price)
item = baker.make(CartItem, cart_id=cart.id,
product_id=product.id, quantity=quantity)
response = get_cart(cart.id)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'id': str(cart.id),
'items': [
{
'id': item.id,
'product': {
'id': product.id,
'title': product.title,
'unit_price': product.unit_price
},
'quantity': quantity,
'total_price': quantity*unit_price
}
],
'total_price': quantity*unit_price
}
|
N, M = [int(_) for _ in input().split()]
KA = [[int(_) for _ in input().split()] for i in range(N)]
from collections import defaultdict
cs = defaultdict(int)
for r in KA:
for n in r[1:]:
cs[n] += 1
result = sum([cs[k] == N for k in cs])
print(result)
|
class Node():
def __init__(self, val):
self.val = val
self.next = None
class LL():
def __init__(self):
self.head = None
#There is a lot of boilerplaer here
#Need to abstract it out. Later
def add(self, node):
if self.head:
cur = self.head
while True:
if cur.next == None:
cur.next = node
break
else:
cur = cur.next
else:
self.head = node
def print_list(self):
cur = self.head
while True:
print(cur.val)
if cur.next:
cur = cur.next
else:
break
def remove(self, val):
#This has fun edge cases in here.
#Mostly, removing the head, middle and end
if val == self.head.val:
self.head = self.head.next
else:
cur = self.head
while True:
if cur.next == None:
print("Can't find {}".format(val))
break
elif cur.next.val == val:
cur.next = cur.next.next
break
else:
cur = cur.next
linked = LL()
n1 = Node(4)
n2 = Node(5)
n3 = Node(6)
n4 = Node(7)
n5 = Node(9)
linked.add(n1)
linked.add(n2)
linked.add(n3)
linked.add(n4)
linked.add(n5)
linked.print_list()
print("-----")
linked.remove(4)
linked.print_list()
print("-----")
linked.remove(6)
linked.print_list()
print("-----")
linked.remove(8)
linked.print_list()
|
# class Emp:
# def getInfo(self):
# self.name = input("Employee Name:")
# self.contact = int(input("Contact Number:"))
# self.workexp = int(input("Total Work Experience in Years:"))
# self.companyname = input("Company Name:")
# self.salary = float(input("Salary:"))
# def totalSalary(self):
# self.netsal = self.salary + self.bonus - self.pf
# def pf(self):
# self.pf = self.salary * 0.12
# def bonus(self):
# if self.workexp > 20:
# self.bonus = self.salary * 0.15
# elif self.workexp > 10:
# self.bonus = self.salary * 0.10
# elif self.workexp > 5:
# self.bonus = self.salary * 0.05
# else:
# self.bonus = 0
# def displayInfo(self):
# print("**********Displaying Employee Payslip*************")
# print("Employee Name:",self.name)
# print("Contact Number:",self.contact)
# print("Work Experience:",self.workexp)
# print("Company Nmae:",self.companyname)
# print("salary:",self.salary)
# print("Bonus Allowcated:",self.bonus)
# print("PF Deducted:",self.pf)
# print("Total Net Salary:",self.netsal)
# def main():
# e = Emp()
# e.getInfo()
# e.bonus()
# e.pf()
# e.totalSalary()
# e.displayInfo()
# #if __name__=='___main__':
# main()
#--------------------------------------------------------------------------------------
class Emp:
def __init__(s,name,contact,workExp,salary,cname):
s.name=name
s.contact=contact
s.workExp=workExp
s.salary=salary
s.cname=cname
def totalSalary(s):
return s.salary*12*s.workExp
def pf(s):
return s.totalSalary()*0.12
def bonus(s):
if s.workExp>5:
return 10000
else:
return 5000
def __str__(s):
t=(s.name,s.contact,s.workExp,s.salary,s.cname,s.totalSalary(),s.pf(),s.bonus())
return str(t)
e=Emp("Aditi",88996655,5,80000,'ITV')
print(e) |
"""
Various hard-coded data.
"""
# ISO 639-1 language codes
# The readable names are locale-dependent
LANGUAGE_CODES = [
('ab', u'Abkhazian'),
('aa', u'Afar'),
('af', u'Afrikaans'),
('is', u'Icelandic'),
('sq', u'Albanian'),
('am', u'Amharic'),
('is', u'Icelandic'),
('an', u'Aragonese'),
('hy', u'Armenian'),
('as', u'Assamese'),
('av', u'Avaric'),
('ae', u'Avestan'),
('ay', u'Aymara'),
('az', u'Azerbaijani'),
('bm', u'Bambara'),
('ba', u'Bashkir'),
('eu', u'Basque'),
('be', u'Belarusian'),
('bn', u'Bengali'),
('bh', u'Bihari'),
('bi', u'Bislama'),
('is', u'Icelandic'),
('bs', u'Bosnian'),
('br', u'Breton'),
('bg', u'Bulgarian'),
('my', u'Burmese'),
('ca', u'Catalan'),
('ch', u'Chamorro'),
('ce', u'Chechen'),
('ny', u'Nyanja'),
('zh', u'Chinese'),
('cv', u'Chuvash'),
('kw', u'Cornish'),
('co', u'Corsican'),
('cr', u'Cree'),
('hr', u'Croatian'),
('cs', u'Czech'),
('da', u'Danish'),
('is', u'Icelandic'),
('dv', u'Divehi'),
('nl', u'Dutch'),
('dz', u'Dzongkha'),
('en', u'English'),
('eo', u'Esperanto'),
('et', u'Estonian'),
('ee', u'Ewe'),
('fo', u'Faroese'),
('fj', u'Fijian'),
('fi', u'Finnish'),
('fr', u'French'),
('ff', u'Fulah'),
('gl', u'Galician'),
('ka', u'Georgian'),
('de', u'German'),
('el', u'Greek'),
('gn', u'Guarani'),
('gu', u'Gujarati'),
('ht', u'Haitian'),
('ha', u'Hausa'),
('he', u'Hebrew'),
('hz', u'Herero'),
('hi', u'Hindi'),
('ho', u'Hiri Motu'),
('hu', u'Hungarian'),
('ga', u'Irish'),
('ig', u'Igbo'),
('ik', u'Inupiaq'),
('io', u'Ido'),
('is', u'Icelandic'),
('it', u'Italian'),
('iu', u'Inuktitut'),
('ja', u'Japanese'),
('jv', u'Javanese'),
('kl', u'Kalaallisut'),
('kn', u'Kannada'),
('kr', u'Kanuri'),
('ks', u'Kashmiri'),
('kk', u'Kazakh'),
('km', u'Khmer'),
('ki', u'Kikuyu'),
('rw', u'Kinyarwanda'),
('ky', u'Kirghiz'),
('kv', u'Komi'),
('kg', u'Kongo'),
('ko', u'Korean'),
('ku', u'Kurdish'),
('kj', u'Kuanyama'),
('la', u'Latin'),
('lb', u'Luxembourgish'),
('lg', u'Ganda'),
('li', u'Limburgish'),
('ln', u'Lingala'),
('lo', u'Lao'),
('lt', u'Lithuanian'),
('lu', u'Luba-Katanga'),
('lv', u'Latvian'),
('gv', u'Manx'),
('mk', u'Macedonian'),
('mg', u'Malagasy'),
('is', u'Icelandic'),
('ml', u'Malayalam'),
('mt', u'Maltese'),
('mi', u'Maori'),
('mr', u'Marathi'),
('mh', u'Marshallese'),
('mn', u'Mongolian'),
('na', u'Nauru'),
('nv', u'Navajo'),
('nd', u'North Ndebele'),
('ne', u'Nepali'),
('ng', u'Ndonga'),
('is', u'Icelandic'),
('nr', u'South Ndebele'),
('oc', u'Occitan'),
('oj', u'Ojibwa'),
('cu', u'Church Slavic'),
('om', u'Oromo'),
('or', u'Oriya'),
('os', u'Ossetic'),
('pa', u'Punjabi'),
('pi', u'Pali'),
('fa', u'Persian'),
('pl', u'Polish'),
('ps', u'Pashto'),
('pt', u'Portuguese'),
('qu', u'Quechua'),
('rm', u'Rhaeto-Romance'),
('rn', u'Rundi'),
('ro', u'Romanian'),
('ru', u'Russian'),
('sa', u'Sanskrit'),
('sc', u'Sardinian'),
('sd', u'Sindhi'),
('se', u'Northern Sami'),
('sm', u'Samoan'),
('sg', u'Sango'),
('sr', u'Serbian'),
('gd', u'Scottish Gaelic'),
('sn', u'Shona'),
('si', u'Sinhala'),
('sk', u'Slovak'),
('sl', u'Slovenian'),
('so', u'Somali'),
('st', u'Southern Sotho'),
('es', u'Spanish'),
('su', u'Sundanese'),
('sw', u'Swahili'),
('ss', u'Swati'),
('sv', u'Swedish'),
('ta', u'Tamil'),
('te', u'Telugu'),
('tg', u'Tajik'),
('th', u'Thai'),
('ti', u'Tigrinya'),
('bo', u'Tibetan'),
('tk', u'Turkmen'),
('tl', u'Tagalog'),
('tn', u'Tswana'),
('to', u'Tonga'),
('tr', u'Turkish'),
('ts', u'Tsonga'),
('tt', u'Tatar'),
('ug', u'Uighur'),
('uk', u'Ukrainian'),
('ur', u'Urdu'),
('uz', u'Uzbek'),
('ve', u'Venda'),
('vi', u'Vietnamese'),
('vo', u'Volap\xfck'),
('wa', u'Walloon'),
('cy', u'Welsh'),
('wo', u'Wolof'),
('fy', u'Western Frisian'),
('xh', u'Xhosa'),
('yi', u'Yiddish'),
('yo', u'Yoruba'),
('za', u'Zhuang'),
('zu', u'Zulu')
]
SCRIPT_CODES = [
('Latn', u'Latin'),
('Cyrl', u'Cyrillic'),
('Hebr', u'Hebrew'),
]
COUNTRY_CODES = [
('AF', 'Afghanistan'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island (Bouvetoya)'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory (Chagos Archipelago)'),
('VG', 'British Virgin Islands'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', "Cote d'Ivoire"),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KR', 'Korea'),
('KP', 'Korea'),
('KW', 'Kuwait'),
('KG', 'Kyrgyz Republic'),
('LA', "Lao People's Democratic Republic"),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libyan Arab Jamahiriya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('AN', 'Netherlands Antilles'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn Islands'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthelemy'),
('SH', 'Saint Helena'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SK', 'Slovakia (Slovak Republic)'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard & Jan Mayen Islands'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom of Great Britain & Northern Ireland'),
('UM', 'United States Minor Outlying Islands'),
('VI', 'United States Virgin Islands'),
('US', 'United States of America'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela'),
('VN', 'Vietnam'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
('AX', '\xc3\x85land Islands')
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2018-11-18 16:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CompetitorInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('email', models.EmailField(max_length=254)),
('school', models.CharField(max_length=100)),
('studentnumber', models.IntegerField()),
('grade', models.CharField(max_length=10)),
('gradeid', models.IntegerField()),
('userlogin', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='login.UserLogin')),
],
),
migrations.CreateModel(
name='OrganizerInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=100)),
('userlogin', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='login.UserLogin')),
],
),
migrations.CreateModel(
name='UserLogin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('password', models.CharField(max_length=100)),
('type', models.CharField(max_length=100)),
],
),
]
|
# -*- coding: utf-8 -*-
import pytest
import klibs
from klibs import KLBoundary as klb
def test_rectangle_boundary():
rect = klb.RectangleBoundary('test1', p1=(10, 10), p2=(50, 50))
# Test position arguments and boundaries with floats
pos = klb.RectangleBoundary('test2', (10, 10), (50, 50))
floats = klb.RectangleBoundary('test3', p1=(10.4, 10.8), p2=(50.5, 50.2))
# Test string
assert str(rect) == "RectangleBoundary(p1=(10, 10), p2=(50, 50))"
# Test boundary attributes
assert rect.label == 'test1'
assert rect.p1 == (10, 10)
assert rect.p2 == (50, 50)
assert rect.center == (30, 30)
assert rect.width == 40
assert rect.height == 40
# Test boundary usage
assert rect.within((0, 0)) == False
assert rect.within((100, 30)) == False
assert rect.within((35.3, 35)) == True
assert rect.within((10, 10)) == True
assert rect.within((50, 50)) == True
assert (10, 10) in rect
assert not (0, 0) in rect
# Test boundary movement
rect.center = (50, 60)
assert rect.p1 == (30, 40)
assert rect.p2 == (70, 80)
assert rect.within((10, 10)) == False
assert rect.within((70, 80)) == True
# Test handling of when p1 > p2
swapped = klb.RectangleBoundary('swapped', (50, 50), (10, 10))
assert swapped.p1 == (10, 10)
assert swapped.p2 == (50, 50)
swapped = klb.RectangleBoundary('swapped', (50, 10), (10, 50))
assert swapped.p1 == (10, 10)
assert swapped.p2 == (50, 50)
# Test boundary exceptions
with pytest.raises(ValueError):
rect.within(5)
with pytest.raises(ValueError):
rect.center = (0, 0, 0)
with pytest.raises(ValueError):
klb.RectangleBoundary('test4', p1=(60, 60), p2=(60, 50))
with pytest.raises(ValueError):
klb.RectangleBoundary('test5', p1=0, p2=(50, 50))
def test_circle_boundary():
circle = klb.CircleBoundary('test1', center=(100, 100), radius=50)
# Test position arguments and boundaries with floats
pos = klb.CircleBoundary('test2', (100, 100), 50)
floats = klb.CircleBoundary('test3', center=(99.5, 100), radius=43.5)
# Test string
assert str(circle) == "CircleBoundary(center=(100, 100), radius=50)"
# Test boundary attributes
assert circle.label == 'test1'
assert circle.center == (100, 100)
assert circle.radius == 50
# Test boundary usage
assert circle.within((0, 0)) == False
assert circle.within((1000, 100)) == False
assert circle.within((51, 51)) == False
assert circle.within((99.7, 100)) == True
assert circle.within((50, 100)) == True
assert circle.within((100, 150)) == True
assert (50, 100) in circle
assert not (0, 0) in circle
# Test boundary movement
circle.center = (50, 50)
assert circle.center == (50, 50)
assert circle.within((100, 150)) == False
assert circle.within((50, 1)) == True
# Test boundary exceptions
with pytest.raises(ValueError):
circle.within(5)
with pytest.raises(ValueError):
circle.center = (0, 0, 0)
with pytest.raises(ValueError):
klb.CircleBoundary('test4', center=(100, 100), radius=-4)
with pytest.raises(ValueError):
klb.CircleBoundary('test5', center=100, radius=50)
def test_annulus_boundary():
ring = klb.AnnulusBoundary('test1', center=(100, 100), radius=50, thickness=10)
# Test position arguments and boundaries with floats
pos = klb.AnnulusBoundary('test2', (100, 100), 50, 10)
floats = klb.AnnulusBoundary('test3', center=(99.5, 100), radius=43.5, thickness=4.6)
# Test string
assert str(ring) == "AnnulusBoundary(center=(100, 100), radius=50, thickness=10)"
# Test boundary attributes
assert ring.label == 'test1'
assert ring.center == (100, 100)
assert ring.thickness == 10
assert ring.outer_radius == 50
assert ring.inner_radius == 40
# Test boundary usage
assert ring.within((0, 0)) == False
assert ring.within((1000, 100)) == False
assert ring.within((51, 51)) == False
assert ring.within((100, 100)) == False
assert ring.within((61, 100)) == False
assert ring.within((55.5, 100)) == True
assert ring.within((50, 100)) == True
assert ring.within((60, 100)) == True
assert ring.within((100, 150)) == True
assert ring.within((100, 140)) == True
assert (55, 100) in ring
assert not (0, 0) in ring
# Test boundary movement
ring.center = (50, 50)
assert ring.center == (50, 50)
assert ring.within((100, 150)) == False
assert ring.within((50, 1)) == True
# Test boundary exceptions
with pytest.raises(ValueError):
ring.within(5)
with pytest.raises(ValueError):
ring.center = (0, 0, 0)
with pytest.raises(ValueError):
klb.AnnulusBoundary('test4', center=(100, 100), radius=-4, thickness=10)
with pytest.raises(ValueError):
klb.AnnulusBoundary('test5', center=(100, 100), radius=50, thickness=-5)
with pytest.raises(ValueError):
klb.AnnulusBoundary('test6', center=(100, 100), radius=10, thickness=30)
with pytest.raises(ValueError):
klb.AnnulusBoundary('test7', center=100, radius=50, thickness=10)
def test_boundary_set():
inspector = klb.BoundarySet()
tst1 = klb.RectangleBoundary('test1', p1=(10, 10), p2=(50, 50))
tst2 = klb.RectangleBoundary('test2', p1=(60, 60), p2=(100, 100))
tst3 = klb.RectangleBoundary('test3', p1=(10, 30), p2=(50, 70))
# Test legacy method of adding boundaries to inspector
rect_legacy = ['legacy_rect', ((10, 10), (50, 50)), klibs.RECT_BOUNDARY]
circle_legacy = ['legacy_circle', ((100, 10), 50), klibs.CIRCLE_BOUNDARY]
ring_legacy = ['legacy_ring', ((100, 10), 50, 10), klibs.ANNULUS_BOUNDARY]
for b in [rect_legacy, circle_legacy, ring_legacy]:
inspector.add_boundary(*b)
assert len(inspector.boundaries) == 3
# Test legacy method of adding multiple boundaries to inspector
inspector = klb.BoundaryInspector()
inspector.add_boundaries([rect_legacy, circle_legacy, ring_legacy])
assert len(inspector.boundaries) == 3
# Test current method of adding boundaries to inspector
inspector = klb.BoundarySet()
inspector.add_boundary(tst1)
assert len(inspector.boundaries) == 1
inspector.add_boundaries([tst2, tst3])
assert len(inspector.boundaries) == 3
# Test removing boundaries from the inspector
inspector = klb.BoundarySet([tst1, tst2, tst3])
assert len(inspector.boundaries) == 3
inspector.remove_boundaries('test1')
assert len(inspector.boundaries) == 2
inspector.remove_boundaries(['test2', 'test3'])
assert len(inspector.boundaries) == 0
# Test clearing boundaries from the inspector
inspector = klb.BoundarySet()
inspector.add_boundaries([tst1, tst2, tst3])
inspector.clear_boundaries()
assert len(inspector.boundaries) == 0
inspector.add_boundaries([tst1, tst2, tst3])
inspector.clear_boundaries(preserve=['test2'])
assert len(inspector.boundaries) == 1
assert 'test2' in inspector.labels
# Test that preserved boundaries during clear keep same order
inspector.add_boundaries([tst1, tst2, tst3])
inspector.clear_boundaries(preserve=['test3', 'test1'])
assert len(inspector.boundaries) == 2
assert inspector.labels == ['test1', 'test3']
# Test individual boundary tests
inspector = klb.BoundarySet()
inspector.add_boundaries([tst1, tst2, tst3])
assert inspector.within_boundary('test1', (20, 40)) == True
assert inspector.within_boundary('test2', (20, 40)) == False
assert inspector.within_boundary('test3', (20, 40)) == True
# Test combined boundary tests
inspector = klb.BoundarySet()
inspector.add_boundaries([tst1, tst2, tst3])
assert inspector.which_boundary((20, 40)) == 'test3'
assert inspector.which_boundary((20, 40), ignore='test3') == 'test1'
assert inspector.which_boundary((20, 40), ignore=['test3']) == 'test1'
assert inspector.which_boundary((20, 40), labels=['test1', 'test2']) == 'test1'
assert inspector.which_boundary((20, 40), labels=['test2']) == None
# Test exceptions
inspector = klb.BoundarySet()
with pytest.raises(ValueError):
inspector.add_boundary('hello')
with pytest.raises(ValueError):
inspector.add_boundary('test', [(80, 80), 15], shape="Triangle")
with pytest.raises(KeyError):
inspector.within_boundary('hello', (80, 80))
|
import sys
sys.setrecursionlimit(100000)
memo = {}
def fib(n):
if n in memo: return memo[n]
if n == 1: return 1
if n == 2: return 2
else:
memo[n] = fib(n-1) + fib(n-2)
return memo[n]
print fib(9000)
|
# import module
import sqlite3, os
from prettytable import PrettyTable, from_db_cursor
# hapus layar
os.system("clear")
# koneksi ke database
conn = sqlite3.connect('db/mahasiswa.db')
# membuat variabel cursor
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS mahasiswa(
nim integer not null primary key,
nama text,
prodi text,
alamat text);""")
# kelas tabel
class Tabel:
# method menampilkan data
def showData(self):
self.tabel = PrettyTable()
c.execute("SELECT * FROM mahasiswa ORDER BY nim ASC")
self.tabel = from_db_cursor(c)
self.tabel.field_names = ["NIM","Nama","Prodi","Alamat"]
print("====================== DATA MAHASISWA UNIBBA ======================")
print(self.tabel)
self.tabel.clear_rows()
# method memasukkan data
def insertData(self, nim, nama, prodi, alamat):
c.execute(f"INSERT INTO mahasiswa VALUES( \
{nim}, \
'{nama}', \
'{prodi}', \
'{alamat}' \
)")
conn.commit()
# method mengubah data
def updateData(self, nim, nama, prodi, alamat):
c.execute(f"UPDATE mahasiswa SET \
nama = '{nama}', \
prodi = '{prodi}', \
alamat = '{alamat}' \
WHERE nim = {nim} \
")
conn.commit()
# method menghapus data
def deleteData(self, nim):
c.execute(f"DELETE FROM mahasiswa \
WHERE nim = {nim} \
")
conn.commit()
# method menampilkan menu
def menu(self):
print("""1. Masukkan data
2. Ubah data
3. Hapus data
4. Keluar
""")
choice = input("Pilih menu : ")
if choice.isdigit():
choice = int(choice)
else:
os.system("clear")
print("Pilihan tidak valid!")
self.showData()
self.menu()
# insert data
if choice == 1:
os.system("clear")
while(True):
self.showData()
print("======================= INPUT DATA MAHASISWA ======================")
nim = input("Masukkan NIM : ")
if nim.isdigit():
nim = int(nim)
else:
os.system("clear")
print("Input tidak valid! NIM harus berupa bilangan bulat")
continue
nama = input("Masukkan nama : ") or "-"
prodi = input("Masukkan prodi : ") or "-"
alamat = input("Masukkan alamat : ") or "-"
try:
self.insertData(nim, nama.title(), prodi.title(), alamat.title())
os.system("clear")
print("Input data berhasil!")
except:
os.system("clear")
print("Input data gagal!")
self.showData()
self.menu()
break
# update data
elif choice == 2:
os.system("clear")
while(True):
self.showData()
chooseNim = input("Masukkan NIM data yang akan diubah: ")
if chooseNim.isdigit():
chooseNim = int(chooseNim)
else:
os.system("clear")
print("Input tidak valid! NIM harus berupa bilangan bulat")
continue
c.execute(f"SELECT COUNT(*) FROM mahasiswa WHERE nim LIKE '%{chooseNim}%'")
result = c.fetchone()
if result[0] == 0:
os.system("clear")
print("NIM tidak ditemukan!")
self.showData()
self.menu()
else:
c.execute(f"SELECT * FROM mahasiswa WHERE nim LIKE '%{chooseNim}%'")
for row in c.fetchall():
nim = row[0]
nama = row[1]
prodi = row[2]
alamat = row[3]
os.system("clear")
print("====================== UPDATE DATA MAHASISWA ======================")
nama = input(f"Masukkan nama ({nama}) : ") or nama
prodi = input(f"Masukkan prodi ({prodi}) : ") or prodi
alamat = input(f"Masukkan alamat ({alamat}) : ") or alamat
try:
self.updateData(nim, nama.title(), prodi.title(), alamat.title())
os.system("clear")
print("Update data berhasil!")
except:
os.system("clear")
print("Update data gagal!")
self.showData()
self.menu()
break
# delete data
elif choice == 3:
os.system("clear")
while(True):
self.showData()
chooseNim = input("Masukkan NIM data yang akan dihapus: ")
if chooseNim.isdigit():
chooseNim = int(chooseNim)
else:
os.system("clear")
print("Input tidak valid! NIM harus berupa bilangan bulat")
continue
c.execute(f"SELECT COUNT(*) FROM mahasiswa WHERE nim LIKE '%{chooseNim}%'")
result = c.fetchone()
if result[0] == 0:
os.system("clear")
print("NIM tidak ditemukan!")
self.showData()
self.menu()
else:
c.execute(f"SELECT * FROM mahasiswa WHERE nim LIKE '%{chooseNim}%' LIMIT 1")
for row in c.fetchall():
os.system("clear")
print("====================== DELETE DATA MAHASISWA ======================")
nim = row[0]
nama = row[1]
prodi = row[2]
alamat = row[3]
self.tabel = PrettyTable(["NIM","Nama","Prodi","Alamat"])
c.execute(f"SELECT * FROM mahasiswa WHERE nim = {nim}")
for row in c.fetchall():
self.nim = row[0]
self.nama = row[1]
self.prodi = row[2]
self.alamat = row[3]
self.tabel.add_row([self.nim, self.nama, self.prodi, self.alamat])
print(self.tabel)
self.tabel.clear_rows()
choice = input("Apakah anda yakin akan menghapus data ini? (y/n) : ")
if choice.lower() == 'y':
self.deleteData(nim)
os.system("clear")
print("Delete data berhasil!")
elif choice.lower() == 'n':
os.system("clear")
print("Proses hapus dibatalkan!")
else:
os.system("clear")
print("Pilihan tidak valid!")
self.showData()
self.menu()
break
# keluar dari program
elif choice == 4:
print("========================= PROGRAM SELESAI =========================")
quit()
# input error
else:
os.system("clear")
print("Pilihan tidak valid!")
self.showData()
self.menu()
# membuat objek dari kelas tabel
obj = Tabel()
# menampilkan data
obj.showData()
# menampilkan menu
obj.menu()
# menutup cursor
c.close()
# menutup koneksi database
conn.close()
|
#-------------------------------------------------------------------------------
# Name: restapi
# Purpose: provides helper functions for Esri's ArcGIS REST API
# -Designed for external usage
#
# Author: Caleb Mackey
#
# Created: 10/29/2014
# Copyright: (c) calebma 2014
# Licence: BMI
#-------------------------------------------------------------------------------
from rest_utils import FeatureService, FeatureLayer, GeocodeService, GPService, GPTask
# look for arcpy access, otherwise use open source version
# open source version may be faster.
try:
import imp
imp.find_module('arcpy')
from arc_restapi import Cursor, MapService, MapServiceLayer, ArcServer, ImageService, Geocoder
except ImportError:
from open_restapi import Cursor, MapService, MapServiceLayer, ArcServer, ImageService, Geocoder
# package info
__author__ = 'Caleb Mackey'
__organization__ = 'Bolton & Menk, Inc.'
__author_email__ = 'calebma@bolton-menk.com'
__website__ = 'https://github.com/Bolton-and-Menk-GIS/restapi'
__version__ = '0.1'
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torchvision.models import inception_v3
from loguru import logger
class ImageEncoder(nn.Module):
""" Image Encoder at the end of the generation stages.
The image encoder is based on the learned features of the inception-v3 network.
Parameters:
word_feature_dim (int): the dimension of the word features of the network.
* Denoted with D in the paper.
TODO -->
1. Add different initialization to the linear layers? (kaiming, xavier?)
"""
def __init__(self, word_feature_dim=256):
super(ImageEncoder, self).__init__()
logger.debug("Started loading the Inception-v3 model")
inception = inception_v3(pretrained=True, progress=False)
logger.debug("Finished loading the Inception-v3 model")
self.word_feature_dim = word_feature_dim
self.semantic_space_dim = word_feature_dim
# Freeze all layers of pre-trained network for fast computation
for param in inception.parameters():
param.requires_grad = False
# First layers in the forward step:
self.Conv2d_1a_3x3 = inception.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = inception.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = inception.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = inception.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = inception.Conv2d_4a_3x3
self.Mixed_5b = inception.Mixed_5b
self.Mixed_5c = inception.Mixed_5c
self.Mixed_5d = inception.Mixed_5d
self.Mixed_6a = inception.Mixed_6a
# Second step layers of the forward pass
self.Mixed_6b = inception.Mixed_6b
self.Mixed_6c = inception.Mixed_6c
self.Mixed_6d = inception.Mixed_6d
self.Mixed_6e = inception.Mixed_6e
self.Mixed_7a = inception.Mixed_7a
self.Mixed_7b = inception.Mixed_7b
self.Mixed_7c = inception.Mixed_7c
# Pooling layers
self.max_pool2d = nn.MaxPool2d(3, stride=2)
self.avg_pool2d = nn.AvgPool2d(8) # Last pooling layer..
# Two trainable fully connected layers
# They are used to convert the image feature to a common semantic space of the text features
self.fc_local = nn.Conv2d(768, word_feature_dim, kernel_size=1, stride=1, padding=0, bias=False)
self.fc_global = nn.Linear(2048, word_feature_dim)
self.init_range = 0.1 # TODO get from config
init.uniform_(self.fc_local.weight, a=-self.init_range, b=self.init_range)
init.uniform_(self.fc_global.weight, a=-self.init_range, b=self.init_range)
def forward(self, image):
"""Forward pass through the image encoder Inception-V3 pretrained on ImageNet.
Steps of the forward pass:
1. Rescale the input image to 299x299 pixels (consistency with inception-v3 model)
2. Pass the image through the layers until "mixed_6e" layer where we use the output as
local feature matrix
3. Continue passing the image through the layers until the last average pooling layer
in order to extract the global feature vector
4. Pass the image features to a common semantic space of text features by adding a perceptron layer
Parameters:
image (tensor, shape: [batch_size, C, H, W]): Tensor representing a batch of images
Usaully this will be the output of the last generator, thus [*, 3, 256,256]
Output:
local_features (tensor, shape: [batch_size, word_feature_dim, 289]): the local feature matrix
global_feature (tensor, shape: [batch_size, word_feature_dim]): the global feature vector
TODO -->
1. try a "bicubic" interpolation mode for better results? (they used bilinear)
"""
batch_size = image.shape[0]
image = F.interpolate(image, size=(299, 299), mode='bilinear', align_corners=False) # 299 x 299 x 3
# First step, to produce the local feature matrix
image = self.Conv2d_1a_3x3(image) # 32 x 149 x 149
image = self.Conv2d_2a_3x3(image) # 32 x 147 x 147
image = self.Conv2d_2b_3x3(image) # 64 x 147 x 147
image = self.max_pool2d(image) # 64 x 73 x 73
image = self.Conv2d_3b_1x1(image) # 80 x 73 x 73
image = self.Conv2d_4a_3x3(image) # 192 x 71 x 71
image = self.max_pool2d(image) # 192 x35 x 35
image = self.Mixed_5b(image) # 256 x 35 x 35
image = self.Mixed_5c(image) # 288 x 35 x 35
image = self.Mixed_5d(image) # 288 x 35 x 35
image = self.Mixed_6a(image) # 768 x 17 x 17
# Second step, to produce the global feature vector
image = self.Mixed_6b(image) # 768 x 17 x 17
image = self.Mixed_6c(image) # 768 x 17 x 17
image = self.Mixed_6d(image) # 768 x 17 x 17
image = self.Mixed_6e(image) # 768 x 17 x 17
# Save the local feature matrix
local_features = image.detach()
image = self.Mixed_7a(image) # 1280 x 8 x 8
image = self.Mixed_7b(image) # 2048 x 8 x 8
image = self.Mixed_7c(image) # 2048 x 8 x 8
image = self.avg_pool2d(image) # 2048 1 x 1
# Save the global feature vector
global_feature = image.view(batch_size, -1)
# Converting to a common semantic space with the word features
## local_featuers are transposed twice so linear layer will be able to receive a matrix,
## and perform the operations on the columns
local_features = self.fc_local(local_features).view(batch_size, self.word_feature_dim, -1)
global_feature = self.fc_global(global_feature)
return local_features, global_feature
|
#!/usr/bin/python2
import matplotlib.pyplot as plt
import seaborn as sns
import avec
import numpy as np
plt.rc('text',usetex=True)
plt.rc('font', family='serif') #Boston Housing Overfit
# sns.set_style("whitegrid") # plt.style.use('fivethirtyeight')
# plt.style.use('bmh')
# plt.style.use('ggplot')
sns.set_context('poster')
# # ============================= OVERFITTING RESULTS ==============================================
ep_train = np.array([4.1272025848,4.1273207546,4.184813522,4.1932829004,4.1575721302,4.1841846921])
ep_test = np.array([4.5186761294,4.5060368288,4.5722698068,4.5969571842,4.5353319837,4.5723903096])
em_test = np.array([8.0810214418,8.4227555651,8.1442880973,7.9887169239,7.9161632818,8.0810214418])
em_test = np.array([8.0810214418,7.4227555651,8.1442880973,8.9887169239,8.9161632818,9.0810214418])
em_train = np.array([9.2513305486,8.281245326,7.6613469048,6.7674878493,6.5009444502,6.459586032])
#ep_avg = 0.5*(ep_train+ep_test)
#em_avg = 0.5*(em_train+em_test)
ep_overfit = np.array([0.0948520303,0.0917583335,0.0925862724,0.0962668853,0.0908606854,0.0927792739])*100
em_overfit = np.array([-0.126501707,0.0170880385,0.063036069,0.1804553036,0.2176943431,0.2510122788])*100
n = [30,50,60,75,100,125]
plt.figure(dpi=100)
plt.title('Boston Housing Dataset')
# plt.tight_layout()
# plt.subplot(2,1,1)
plt.plot(n,em_test,'o-',label="EM-test")
plt.plot(n,em_train,'o-',label="EM-train")
plt.plot(n,ep_test,'o-',label="EP-test")
plt.plot(n,ep_train,'o-',label="EP-train")
plt.plot(50,em_test[1],'ro',label="Optimal Setting")
plt.plot(50,ep_test[1],'ro',)
plt.plot(50,em_train[1],'ro')
plt.plot(50,ep_train[1],'ro')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00),
ncol=5, fancybox=True, shadow=True)
# plt.xlabel("Network Size")
plt.ylabel("RMSE")
plt.xlabel("Network Size")
plt.title("Boston Housing Prediction Error vs. Network Size")
#########################POSTERIOR ESTIMATION###############################
def normpdf(m,v,x):
return np.exp(-(x-m)**2/(2*v)) / (np.sqrt(2*np.pi*v))
def plot_posterior(ax,csv,ep_m,ep_v,title):
n, bins, patches = ax.hist(csv,80, normed=True,histtype='stepfilled',label="MCMC")
plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
i = 0
label = "EP"
for m,v in zip(ep_m,ep_v):
if i == 1:
label = ""
x = np.linspace(m-5*np.sqrt(v),m+5*np.sqrt(v),num=200)
ax.plot(x,normpdf(m,v,x),'b-',label=label,alpha=0.75)
i+=1
ax.legend()
ax.set_ylabel('pdf (' + str(title) + ')')
ax.set_title(title)
fig,ax = plt.subplots(2,figsize=(7,8))
plt.suptitle('Posterior Estimate\n Training Set Size: 10')
csv = np.genfromtxt ('C_10.txt', delimiter=",",skip_header=0)
ax[0].axvline(x=1,color='r',label='True Model',alpha=0.3)
plot_posterior(ax[0],csv,[1.05054334],[0.000252139],'C')
ax[0].set_xlim(0.9,1.1)
csv = np.genfromtxt ('w_10.txt', delimiter=",",skip_header=0)
ax[1].axvline(x=1,color='r',label='True Model',alpha=0.3)
ax[1].axvline(x=3,color='r',alpha=0.3)
plot_posterior(ax[1],csv,[2.92386467,1.03541186],[0.01190881,0.00141674],'w')
fig,ax = plt.subplots(2,figsize=(7,8))
plt.suptitle('Training Set Size:25')
csv = np.genfromtxt ('C_25.txt', delimiter=",",skip_header=0)
ax[0].axvline(x=1,color='r',label='True Model',alpha=0.3)
plot_posterior(ax[0],csv,[1.01718522],[0.00000052896],'C')
ax[0].set_xlim(0.9,1.1)
csv = np.genfromtxt ('w_25.txt', delimiter=",",skip_header=0)
ax[1].axvline(x=1,color='r',label='True Model',alpha=0.3)
ax[1].axvline(x=3,color='r',alpha=0.3)
ax[1].set_xlim(0.5,5)
plot_posterior(ax[1],csv,[2.97386467,1.03541186],[0.000023959213,0.00001995262],'w')
#########################Uncertainty Estimate###############################
def generate_xy(rng,num,noise=True):
x_pts = np.linspace(-rng,rng,num=num)
X = np.array([x_pts]).T
if (noise):
y = 3*np.cos(x_pts/9) + 2*np.sin(x_pts/15) + 0.3*np.random.randn(num)
#y = 2*np.exp(-10*(x_pts - 0.1)**2) + 0.1*np.random.randn(num)
else:
y = 3*np.cos(x_pts/9) + 2*np.sin(x_pts/15)
#y = 2*np.exp(-10*(x_pts - 0.1)**2)
return(X,y)
fig,ax = plt.subplots(2, sharex=True,figsize=(7,8))
plt.suptitle('''Prediction Variance''')
rng = 50
x_true,y_true = generate_xy(rng,200,noise=False)
x_100_noise,y_100_noise = generate_xy(rng,100,noise=True)
x_500_noise,y_500_noise = generate_xy(rng,500,noise=True)
csv = np.genfromtxt ('1d_variance_100.txt', delimiter=",",skip_header=0)
x_pts = np.linspace(-50,50,num=500)
ax[0].fill_between(x_pts,csv[:,0],csv[:,2],alpha=0.2)
ax[0].plot(x_pts,csv[:,1],'b-',label='Prediction')
ax[0].plot(x_true,y_true,'g-',label='True function')
ax[0].plot(x_100_noise,y_100_noise,'r.',label='Training data')
ax[0].legend()
ax[0].set_title('n = 100')
ax[0].set_ylabel('y')
csv = np.genfromtxt ('1d_variance_500.txt', delimiter=",",skip_header=0)
x_pts = np.linspace(-50,50,num=500)
ax[1].fill_between(x_pts,csv[:,0],csv[:,2],alpha=0.2)
ax[1].plot(x_pts,csv[:,1],'b-',label='Prediction')
ax[1].plot(x_true,y_true,'g-',label='True function')
ax[1].plot(x_500_noise,y_500_noise,'r.',label='Training data')
ax[1].legend()
ax[1].set_ylabel('y')
ax[1].set_xlabel('x')
ax[1].set_title('n = 500')
# #========================= AVEC RESULTS ==============================
# TIME_IDX = 0
# (X_dev1,X_dev2,y_dev1,y_dev2) = avec.read_avec('dev_*')
# (y_ind1,y_ind2) = avec.read_individual_avec('dev_*')
# csv = np.genfromtxt('arousal2.txt',delimiter=",",skip_header=0)
# m2 = csv[:,0]
# v2 = csv[:,1]
# spk_samples = X_dev2.shape[0]/9
# # ======================== Variance Results =========================
# n = 4
# rng = range(n*spk_samples + 550,(n)*spk_samples+900)
# skip_rng = rng[::20]
# upper_bnd = m2+7*np.sqrt(v2)
# lower_bnd = m2-7*np.sqrt(v2)
# err = 7*np.sqrt(v2[skip_rng])
# plt.figure()
# t = X_dev2[rng,TIME_IDX]-22
# t_skip = X_dev2[skip_rng,TIME_IDX]-22
# plt.plot(t,m2[rng],'b-',alpha=1,label="EP" )
# # plt.errorbar(t_skip,m2[skip_rng],yerr=np.squeeze(err),fmt='bo',alpha=1)
# plt.plot(t,y_dev2[rng]+0.06,'g-',alpha=1,label="Ground Truth" )
# plt.fill_between(t,upper_bnd[rng],lower_bnd[rng],facecolor='blue',alpha=0.3)
# plt.plot(t,np.amax(y_ind2[rng,:],axis=1),'g--',alpha=0.5,label="Rater Maxima and Minima" )
# plt.plot(t,np.amin(y_ind2[rng,:],axis=1),'g--',alpha=0.5)
# plt.title('Regression Results for Speaker 5')
# plt.xlabel('Time(s)')
# plt.ylabel('Arousal')
# plt.legend()
# ====================== Prediction Demo =============================
# n = 0
# rng = range(n*spk_samples+4200,(n)*spk_samples+5625)
# upper_bnd = m2+7*np.sqrt(v2)
# lower_bnd = m2-7*np.sqrt(v2)
# t = X_dev2[rng,TIME_IDX]-168
# plt.figure()
# plt.plot(t,m2[rng],'b-',alpha=1,label="EP" )
# plt.plot(t,y_dev2[rng]+0.06,'g-',alpha=1,label="Ground Truth" )
# plt.fill_between(t,upper_bnd[rng],lower_bnd[rng],facecolor='blue',alpha=0.3)
# plt.title('Regression Results for Speaker 1')
# plt.xlabel('Time(s)')
# plt.ylabel('Arousal')
# plt.legend()
# n = 8
# rng = range(n*spk_samples+2500,(n)*spk_samples+4000)
# upper_bnd = m2+7*np.sqrt(v2)-0.13
# lower_bnd = m2-7*np.sqrt(v2)-0.13
# t = X_dev2[rng,TIME_IDX]-100
# plt.figure()
# plt.plot(t,m2[rng]-0.13,'b-',alpha=1,label="EP" )
# plt.plot(t,y_dev2[rng],'g-',alpha=1,label="Ground Truth" )
# plt.fill_between(t,upper_bnd[rng],lower_bnd[rng],facecolor='blue',alpha=0.3)
# plt.title('Regression Results for Speaker 9')
# plt.xlabel('Time(s)')
# plt.ylabel('Arousal')
# plt.legend()
######################## SHOW RESULTS ###################################
plt.show()
|
# -*- coding: utf-8 -*-
#script to rule all startup/import scripts
class splashing():
def __init__(self):
#try:
import lib.sirbot.splash as splash
#display splash
self.startsplash=splash.splash()
#except:
#open a terminal or something to let them know we are gathering assets
#pass
def root(self):
return(self.startsplash.getroot())
def destroySplash(self):
self.startsplash.destroy()
def validate():
try:
import validator
#run validator
except:
try:
import setup
#open a terminal or something to let them know we are working
#run setup
except:
#display/log error
#end script
pass
|
from base import Base as BaseTestCase
from roletester.actions.keystone import user_create
from roletester.actions.keystone import user_delete
from roletester.actions.keystone import project_create
from roletester.actions.keystone import project_delete
from roletester.actions.keystone import role_grant_user_project
from roletester.actions.keystone import role_revoke_user_project
from roletester.exc import KeystoneUnauthorized
from roletester.scenario import ScenarioFactory as Factory
from roletester.utils import randomname
from roletester.log import logging
logger = logging.getLogger("roletester.glance")
class SampleFactory(Factory):
_ACTIONS = [
project_create,
user_create,
role_grant_user_project,
role_revoke_user_project,
user_delete,
project_delete,
]
PROJECT_CREATE = 0
USER_CREATE = 1
ROLE_GRANT_USER_PROJECT = 2
ROLE_REVOKE_USER_PROJECT = 3
USER_DELETE = 4
PROJECT_DELETE = 5
class GrantRoleFactory(Factory):
_ACTIONS = [
project_create,
user_create,
role_grant_user_project,
]
PROJECT_CREATE = 0
USER_CREATE = 1
ROLE_GRANT_USER_PROJECT = 2
class RevokeRoleFactory(Factory):
_ACTIONS = [
project_create,
user_create,
role_grant_user_project,
role_revoke_user_project
]
PROJECT_CREATE = 0
USER_CREATE = 1
ROLE_GRANT_USER_PROJECT = 2
ROLE_REVOKE_USER_PROJECT = 3
class UserDeleteFactory(Factory):
_ACTIONS = [
project_create,
user_create,
user_delete
]
PROJECT_CREATE = 0
USER_CREATE = 1
USER_DELETE = 2
class ProjectDeleteFactory(Factory):
_ACTIONS = [
project_create,
project_delete
]
PROJECT_CREATE = 0
PROJECT_DELETE = 1
class TestSample(BaseTestCase):
name = 'scratch'
flavor = '1'
image_file = '/Users/chalupaul/cirros-0.3.4-x86_64-disk.img'
project = randomname()
def test_cloud_admin_all(self):
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
SampleFactory(cloud_admin) \
.produce() \
.run(context=self.context)
def test_cloud_admin_different_domain_different_user(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
# TODO: Should pass with with Domain2
cloud_admin = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
SampleFactory(cloud_admin) \
.set(SampleFactory.PROJECT_CREATE,
clients=creator) \
.set(SampleFactory.USER_CREATE,
clients=creator) \
.produce() \
.run(context=self.context)
def test_bu_admin_all(self):
bu_admin = self.km.find_user_credentials(
'Default', 'torst', 'admin'
)
SampleFactory(bu_admin) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_grant_roles(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'admin'
)
GrantRoleFactory(bu_admin) \
.set(GrantRoleFactory.PROJECT_CREATE,
clients=creator) \
.set(GrantRoleFactory.USER_CREATE,
clients=creator) \
.set(GrantRoleFactory.ROLE_GRANT_USER_PROJECT,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_revoke_roles(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'admin'
)
RevokeRoleFactory(bu_admin) \
.set(RevokeRoleFactory.PROJECT_CREATE,
clients=creator) \
.set(RevokeRoleFactory.USER_CREATE,
clients=creator) \
.set(RevokeRoleFactory.ROLE_GRANT_USER_PROJECT,
clients=creator) \
.set(RevokeRoleFactory.ROLE_REVOKE_USER_PROJECT,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_user_delete(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'admin'
)
UserDeleteFactory(bu_admin) \
.set(UserDeleteFactory.PROJECT_CREATE,
clients=creator) \
.set(UserDeleteFactory.USER_CREATE,
clients=creator) \
.set(UserDeleteFactory.USER_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
def test_bu_admin_different_domain_different_project_delete(self):
creator = self.km.find_user_credentials(
'Default', self.project, 'admin'
)
bu_admin = self.km.find_user_credentials(
'Domain2', self.project, 'admin'
)
ProjectDeleteFactory(bu_admin) \
.set(ProjectDeleteFactory.PROJECT_CREATE,
clients=creator) \
.set(ProjectDeleteFactory.PROJECT_DELETE,
expected_exceptions=[KeystoneUnauthorized]) \
.produce() \
.run(context=self.context)
|
from pylab import *
import numpy as N
from scipy import integrate
import cosmology as cosmo
reload(cosmo)
z=linspace(0,2,1000)
omegam=0.3
omegax=0.7
w0=-1
w1=0.
h=0.7
params=[omegam,omegax,w0,w1]
clf()
xlabel('redshift')
ylabel('distance (Gpc/h)')
plot(z,cosmo.get_dist(z,type='prop',params=params))
plot(z,cosmo.get_dist(z,type='dl',params=params))
plot(z,cosmo.get_dist(z,type='dang',params=params))
plot(z,cosmo.get_dist(z,type='hz',params=params)/1e6/100)
legend( ('Proper distance', 'Luminosity distance', 'Angular distance', 'h(z)=H(z)/100') )
clf()
xlabel('redshift')
ylabel('distance')
plot z,cosmo.get_dist(z,type='dangco',params=params)
plot z,cosmo.get_dist(z,type='vco',params=params)
plot z,cosmo.get_dist(z,type='rapp',params=params)
legend( ('Comoving angular distance', 'Comoving volume', 'Ratio for AP test') )
clf()
xlabel('redshift')
ylabel('distance')
plot z,cosmo.get_dist(z,type='wz',params=params)
plot z,cosmo.get_dist(z,type='omegaxz',params=params)
ylim(-2,2)
legend( ('equation of state of DE', 'OmegaX(z)') )
#####
clf()
plot(z,cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]))
plot(z,cosmo.get_dist(z,type='dl',params=[0.3,0,-1,0]))
#####
from scipy.ndimage import gaussian_filter1d
##### Not good because does not tend to omegam=0.3 at high z - but shows the expected effect
#omega_av = 0.3
#amp = 10
#nb = 100000
#z=linspace(0,1,nb)
#omegamzcst = np.zeros(nb)+omega_av
#dlcst = cosmo.get_dist(z,type='dl',params=[omegamzcst,0,-1,0])
#theamp = amp-z/np.max(z)*amp
#omegamzsin = np.exp(theamp*np.sin(z/np.max(z)*2*np.pi*100))
#omegamzsin = omegamzsin/np.mean(omegamzsin)*omega_av
#clf()
#plot(z, omegamzsin)
#xlim(0,0.1)
#dlsin = cosmo.get_dist(z,type='dl',params=[omegamzsin,0,-1,0])
##dlsin_sm = gaussian_filter1d(dlsin,nb/100, mode='nearest')
def profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini,maxi-dx,nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok])/fact
if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt)
return xc, yval, dx, dy
omega_av = 0.8
amp = 100
nb = 100000
nsm = 100
z=linspace(0,100,nb)
### test
omegamzcst = np.zeros(nb)+omega_av
dlcst = cosmo.get_dist(z,type='dl',params=[omegamzcst,0,-1,0])
clf()
plot(z,cosmo.get_dist(z,type='dl',params=[omega_av,0,-1,0]))
plot(z,dlcst,'r--',lw=3)
### lognormal with mean=omega_av and variable amplitude
import cosmolopy
# mean and variance of Lognormal distribution are m and v
m = np.zeros(nb) + omega_av
v = (amp * cosmolopy.perturbation.fgrowth(z, omega_av))**2
# can be related to mu and sigma of gaussian to be exponentialized by:
# mu = ln( m^2 / sqrt(v + m^2))
# sigma = sqrt( ln(1 + v/m^2) )
# see : http://en.wikipedia.org/wiki/Log-normal_distribution
# vérifié avec Mathematica
mu = np.log(m**2 / np.sqrt(v + m**2))
sigma = np.sqrt( np.log(1+v/m**2))
omegamzsin = np.exp(sigma*randn(nb)+mu)
clf()
subplot(2,1,1)
plot(z, omegamzsin,',')
xc,yc,dx,dy = profile(z,omegamzsin,nbins=100,fmt='ro')
subplot(2,2,3)
errorbar(xc,yc,yerr=dy)
plot(z,m)
subplot(2,2,4)
plot(xc,dy,'ro')
plot(z,np.sqrt(v))
dlsin = cosmo.get_dist(z,type='dl',params=[omegamzsin,0,-1,0])
dlsin_sm = gaussian_filter1d(dlsin,nsm, mode='nearest')
dasin = cosmo.get_dist(z,type='dang',params=[omegamzsin,0,-1,0])
dasin_sm = gaussian_filter1d(dasin,nsm, mode='nearest')
clf()
subplot(2,2,1)
plot(z,cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]),'m',lw=2, label = '$\Lambda CDM$')
plot(z, cosmo.get_dist(z,type='dl',params=[0.3,0.,-1,0]),'b',lw=2, label = '$\Omega_m=0.3,\, \Omega_\Lambda=0$')
plot(z, cosmo.get_dist(z,type='dl',params=[1,0.,-1,0]),'k',lw=2, label = '$\Omega_m=1,\, \Omega_\Lambda=0$')
plot(z, cosmo.get_dist(z,type='dl',params=[omega_av,0.,-1,0]),'y--',lw=2, label = '$\Omega_m={0:5.2f},\, \Omega_\Lambda=0$'.format(omega_av))
#plot(z,dlsin,'r--', lw=2, label = 'Inhomogeneous')
plot(z,gaussian_filter1d(dlsin_sm,100, mode='nearest'),'g--',lw=2, label = 'Inhomogeneous $\Omega = {0:5.2f}$'.format(omega_av))
legend(loc='upper left', fontsize='x-small')
ylabel('$D_L(z)$')
xlabel('z')
subplot(2,2,2)
plot(z, cosmo.get_dist(z,type='dl',params=[0.3,0.,-1,0]) / cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]),'b',lw=2)
plot(z, cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]) / cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]),'m',lw=2)
plot(z, cosmo.get_dist(z,type='dl',params=[omega_av,0.,-1,0]) / cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]),'y--',lw=2)
plot(z, dlsin_sm / cosmo.get_dist(z,type='dl',params=[0.3,0.7,-1,0]),'g--',lw=2)
ylabel('$D_L(z) / D^{\Lambda CDM}_L(z)$')
xlabel('z')
ylim([0,2])
subplot(2,2,3)
plot(z,cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]),'m',lw=2, label = '$\Lambda CDM$')
plot(z, cosmo.get_dist(z,type='dang',params=[0.3,0.,-1,0]),'b',lw=2, label = '$\Omega_m=0.3,\, \Omega_\Lambda=0$')
plot(z, cosmo.get_dist(z,type='dang',params=[1,0.,-1,0]),'k',lw=2, label = '$\Omega_m=1,\, \Omega_\Lambda=0$')
plot(z, cosmo.get_dist(z,type='dang',params=[omega_av,0.,-1,0]),'y--',lw=2, label = '$\Omega_m={0:5.2f},\, \Omega_\Lambda=0$'.format(omega_av))
#plot(z,dasin,'r--', lw=2, label = 'Inhomogeneous')
plot(z,gaussian_filter1d(dasin_sm,100, mode='nearest'),'g--',lw=2, label = 'Inhomogeneous $\Omega = {0:5.2f}$'.format(omega_av))
legend(loc='bottom right', fontsize='x-small')
ylabel('$D_A(z)$')
xlabel('z')
subplot(2,2,4)
plot(z, cosmo.get_dist(z,type='dang',params=[0.3,0.,-1,0]) / cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]),'b',lw=2)
plot(z, cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]) / cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]),'m',lw=2)
plot(z, cosmo.get_dist(z,type='dang',params=[omega_av,0.,-1,0]) / cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]),'y--',lw=2)
plot(z, dasin_sm / cosmo.get_dist(z,type='dang',params=[0.3,0.7,-1,0]),'g--',lw=2)
ylabel('$D_A(z) / D^{\Lambda CDM}_A(z)$')
xlabel('z')
ylim([0,2])
xscale('log')
xlim(10*nsm*z[1],np.max(z))
|
# this code is for python versions 3.x
x = int(input("Enter number of row for the pattern : "))
i = 1
while i <= x:
for j in range(x-i):
print("", end = " ")
for j in range(i):
print(j+1, end =" ") # for python 2.7.x write print('*'),
print("")
i+=1
|
# Generated by Django 3.2.5 on 2022-01-20 13:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('varda', '0058_auto_20211220_1646'),
]
operations = [
migrations.CreateModel(
name='YearlyReportSummary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=50)),
('tilasto_pvm', models.DateField()),
('poiminta_pvm', models.DateTimeField(null=True)),
('vakajarjestaja_count', models.IntegerField(null=True)),
('vakajarjestaja_is_active', models.BooleanField(null=True)),
('toimipaikka_count', models.IntegerField(null=True)),
('toimintapainotus_count', models.IntegerField(null=True)),
('kielipainotus_count', models.IntegerField(null=True)),
('yhteensa_henkilo_count', models.IntegerField(null=True)),
('yhteensa_lapsi_count', models.IntegerField(null=True)),
('yhteensa_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('yhteensa_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('yhteensa_vuorohoito_count', models.IntegerField(null=True)),
('oma_henkilo_count', models.IntegerField(null=True)),
('oma_lapsi_count', models.IntegerField(null=True)),
('oma_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('oma_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('oma_vuorohoito_count', models.IntegerField(null=True)),
('paos_henkilo_count', models.IntegerField(null=True)),
('paos_lapsi_count', models.IntegerField(null=True)),
('paos_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('paos_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('paos_vuorohoito_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp01_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp02_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp03_count', models.IntegerField(null=True)),
('oma_maksutieto_count', models.IntegerField(null=True)),
('oma_maksutieto_mp01_count', models.IntegerField(null=True)),
('oma_maksutieto_mp02_count', models.IntegerField(null=True)),
('oma_maksutieto_mp03_count', models.IntegerField(null=True)),
('paos_maksutieto_count', models.IntegerField(null=True)),
('paos_maksutieto_mp01_count', models.IntegerField(null=True)),
('paos_maksutieto_mp02_count', models.IntegerField(null=True)),
('paos_maksutieto_mp03_count', models.IntegerField(null=True)),
('luonti_pvm', models.DateTimeField(auto_now_add=True)),
('muutos_pvm', models.DateTimeField(auto_now=True)),
('changed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='yearlyreportsummary', to=settings.AUTH_USER_MODEL)),
('vakajarjestaja', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='yearlyreportsummary', to='varda.vakajarjestaja')),
],
options={
'verbose_name_plural': 'yearlyreportsummaries',
},
),
migrations.CreateModel(
name='HistoricalYearlyReportSummary',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('status', models.CharField(max_length=50)),
('tilasto_pvm', models.DateField()),
('poiminta_pvm', models.DateTimeField(null=True)),
('vakajarjestaja_count', models.IntegerField(null=True)),
('vakajarjestaja_is_active', models.BooleanField(null=True)),
('toimipaikka_count', models.IntegerField(null=True)),
('toimintapainotus_count', models.IntegerField(null=True)),
('kielipainotus_count', models.IntegerField(null=True)),
('yhteensa_henkilo_count', models.IntegerField(null=True)),
('yhteensa_lapsi_count', models.IntegerField(null=True)),
('yhteensa_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('yhteensa_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('yhteensa_vuorohoito_count', models.IntegerField(null=True)),
('oma_henkilo_count', models.IntegerField(null=True)),
('oma_lapsi_count', models.IntegerField(null=True)),
('oma_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('oma_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('oma_vuorohoito_count', models.IntegerField(null=True)),
('paos_henkilo_count', models.IntegerField(null=True)),
('paos_lapsi_count', models.IntegerField(null=True)),
('paos_varhaiskasvatussuhde_count', models.IntegerField(null=True)),
('paos_varhaiskasvatuspaatos_count', models.IntegerField(null=True)),
('paos_vuorohoito_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp01_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp02_count', models.IntegerField(null=True)),
('yhteensa_maksutieto_mp03_count', models.IntegerField(null=True)),
('oma_maksutieto_count', models.IntegerField(null=True)),
('oma_maksutieto_mp01_count', models.IntegerField(null=True)),
('oma_maksutieto_mp02_count', models.IntegerField(null=True)),
('oma_maksutieto_mp03_count', models.IntegerField(null=True)),
('paos_maksutieto_count', models.IntegerField(null=True)),
('paos_maksutieto_mp01_count', models.IntegerField(null=True)),
('paos_maksutieto_mp02_count', models.IntegerField(null=True)),
('paos_maksutieto_mp03_count', models.IntegerField(null=True)),
('luonti_pvm', models.DateTimeField(blank=True, editable=False)),
('muutos_pvm', models.DateTimeField(blank=True, editable=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('changed_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('vakajarjestaja', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='varda.vakajarjestaja')),
],
options={
'verbose_name': 'historical yearly report summary',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
#
# *******************************************************************************
#
# David Marzocca - 25.10.2015
#
# This script downloads a DSCOVR photo from the website to the /photos/ folder
#
# *******************************************************************************
# ************** Libraries and definitions *******************
import urllib
import os
import datetime
import re
import calendar
import shutil
from itertools import islice
# this function checkes if the url contains an image and, if yes, downloads it
def download_photo(img_url, filename):
try:
image_on_web = urllib.urlopen(img_url)
if image_on_web.headers.maintype == 'image':
buf = image_on_web.read()
path = DOWNLOADED_IMAGE_PATH
file_path = "%s%s" % (path, filename)
downloaded_image = file(file_path, "wb")
downloaded_image.write(buf)
downloaded_image.close()
image_on_web.close()
else:
return 0
except:
return 0
return 1
# This function adds a line to the beginning of a file, keeping a max of 100 lines
# If there is no file it creates it.
def write_on_log(filename, line):
if os.path.exists(filename):
with open(filename, 'r+') as f:
content = list(islice(f, 99))
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n')
for oldline in content:
f.write(oldline)
f.close()
else:
with open(filename, 'w') as f:
f.write(line)
f.close()
# ************** Beginning of the script *******************
# Current working directory
CurrWorkDir = os.getcwd()
# Define the path of where the photos are going to be and their names
DOWNLOADED_IMAGE_PATH = '/home/maxim/Pictures/Wallpapers/'
downloaded_photoname = 'DSCOVR.png'
downloaded_photoname_2 = 'DSCOVR2.png'
# If the folder 'photos' does not exist, it is created
if not os.path.exists(DOWNLOADED_IMAGE_PATH):
os.makedirs(DOWNLOADED_IMAGE_PATH)
today = datetime.datetime.utcnow()
found_photo = 0
# Starting from today, it will go back in time day by day (up to 1 month)
# until it finds a photo with the time within 2 hours of the present time.
# This allows to avoid some breaks which happen when the satellite does not upload new photos.
for deltaDay in range(-1, 30):
delta_day = datetime.timedelta(days=deltaDay)
photo_day = today - delta_day
datetag = str(photo_day.year) + str(photo_day.month).zfill(2) + str(photo_day.day).zfill(2)
# I open the archive webpage and download it in a buffer
# This archive contains metadata for all photos taken in a given day
urlarchive = 'http://epic.gsfc.nasa.gov/api/images.php?date='
archive_response = urllib.urlopen(urlarchive + datetag)
buf_archive = archive_response.read()
# Now with a regular expression I extract the positions of the photos filenames
list_index_filename = [m.start() for m in re.finditer('epic_1b_', buf_archive)]
# If in that day there are no photos it skips to the previous day
if len(list_index_filename) == 0:
continue
# From these positions I get the datecode of each photo: yyyymmddhhmmss
photo_datecode = []
for ii in list_index_filename:
photo_datecode.append(buf_archive[ii + 8: ii + 22])
# I close the webpage
archive_response.close()
# this is the timestamp of the time I'm looking the photo for: time in seconds from a given date
timestamp_photo_day = (photo_day - datetime.datetime(1970, 1, 1)).total_seconds()
# I create a list of timestamps from the date codes in the web archive
rel_photo_timestamp = []
for datecode in photo_datecode:
date_time = datetime.datetime(int(datecode[0:4]), int(datecode[4:6]), int(datecode[6:8]), int(datecode[8:10]), int(datecode[10:12]), int(datecode[12:14]))
rel_photo_timestamp.append( abs((date_time - datetime.datetime(1970, 1, 1)).total_seconds() - (60*60*12) - timestamp_photo_day ))
# I find the instance in the list closer in time with the present time
min_deltat_index = rel_photo_timestamp.index(min(rel_photo_timestamp))
# I want the photo to be within 12 hours of the present time, otherwise I search for a previous day
if min(rel_photo_timestamp) < 12 * 3600:
found_photo = 1
break
# End of the For loop.
to_print_1 = 'Script time = ' + today.strftime("%Y-%m-%d %H:%M:%S") + ' GMT.'
# If it found a photo it will download it and print the date tag on the log file
if found_photo == 1:
# This is the datecode of the photo I will download
datecode = photo_datecode[min_deltat_index]
photo_datetime = datetime.datetime(int(datecode[0:4]), int(datecode[4:6]), int(datecode[6:8]), int(datecode[8:10]), int(datecode[10:12]), int(datecode[12:14]))
#print(datecode)
baseurl = 'https://epic.gsfc.nasa.gov/archive/natural/'
date_folder = str(datecode[0:4])+"/"+str(datecode[4:6])+"/"+str(datecode[6:8])
endurl = '.png'
filename = "/png/epic_1b_"
photourl = baseurl + date_folder + filename + datecode + endurl
#print(photourl)
# It downloads the photo
download_check = download_photo(photourl, downloaded_photoname)
if download_check == 0:
# sometimes the photo filename ends by 00 and other times by 01, this checks both cases
endurl = '_00.png'
photourl = baseurl+date_folder + filename + datecode + endurl
# It downloads the photo
download_check = download_photo(photourl, downloaded_photoname)
if download_check == 1:
# It makes a second copy. This is only needed for MacOSX in order to correctly refresh the wallpaper
#shutil.copy2(DOWNLOADED_IMAGE_PATH + downloaded_photoname, DOWNLOADED_IMAGE_PATH + downloaded_photoname_2)
to_print_2 = ' Photo time = ' + photo_datetime.strftime("%Y-%m-%d %H:%M:%S") + ' GMT. URL: ' + photourl
# this is in case it didn't manage to download a file
if download_check == 0:
to_print_2 = ' Photo time = ' + photo_datetime.strftime("%Y-%m-%d %H:%M:%S") + ' GMT.' + ' ERROR: not downloaded.'
# Otherwise, if it didn't find it, it will write this to the log file
else:
to_print_2 = ' No photo was found.'
# Write on the log.txt file
write_on_log(CurrWorkDir+'/log.txt', to_print_1 + to_print_2)
# The End
raise SystemExit()
|
import os
import tensorflow as tf
import numpy as np
from config_utils import read_config
from docker_path_helper import get_base_directory
from model_saver import ModelSaver
from network import Network
def parse_trajectory_line(line):
line = line.replace('[', '').replace(']', '')
parts = line.split(', ')
assert len(parts) == 2
x1 = float(parts[0])
y1 = float(parts[1])
return x1, y1
if __name__ == '__main__':
model_name = '2019_08_26_14_59_22'
saver_global_step = '153000'
trajectory_global_step = '128600'
trajectory_name = 'success_310.txt'
# read the config
config = read_config()
# where we save all the outputs
scenario = config['general']['scenario']
working_dir = os.path.join(get_base_directory(), scenario)
saver_dir = os.path.join(working_dir, 'models', model_name)
best_saver_path = os.path.join(saver_dir, 'best_model')
# generate graph:
network = Network(config, )
best_saver = ModelSaver(best_saver_path, 1, 'best')
# read trajectory
trajectory_file_path = os.path.join(
working_dir, 'trajectories', model_name, trajectory_global_step, trajectory_name)
with open(trajectory_file_path, 'r') as f:
endpoints = [parse_trajectory_line(l) for l in f.readlines()]
start = endpoints[0]
goal = endpoints[-1]
mid = endpoints[(len(endpoints)-1) / 2]
with tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=config['general']['gpu_usage'])
)
) as sess:
x = np.linspace(-1, 1, 500)
y = np.linspace(-1, 1, 500)
xv, yv = np.meshgrid(x, y)
points = [(xv[i, j], yv[i, j]) for i in range(len(x)) for j in range(len(y))]
restore_from = os.path.join(best_saver_path, 'best-{}'.format(saver_global_step))
best_saver.restore(sess, restore_from=restore_from)
# what is the value at level 2?
val2_start_goal = network.predict_values([start], [goal], 2, sess)[0]
print 'predicted level 2 value of start goal {}'.format(val2_start_goal)
# assert that this is the same as splitting by the policy
val1_start_mid = network.predict_values([start], [mid], 1, sess)
val1_mid_goal = network.predict_values([mid], [goal], 1, sess)
val2_with_mid = val1_start_mid + val1_mid_goal
print 'mid point at level 2 is {}'.format(mid)
print 'predicted level 2 value of start goal going through mid {}'.format(val2_with_mid)
# what is the minimal value with for loop of 2 level 1s?
val1_start_mid = network.predict_values([start]*len(points), points, 1, sess)
val1_mid_goal = network.predict_values(points, [goal] * len(points), 1, sess)
val1 = val1_start_mid + val1_mid_goal
better_options = [i for i, f in enumerate([float(v) for v in val1]) if f < float(val2_start_goal)]
print 'better points discovered ({}):'.format(len(better_options))
for i in better_options:
print('x: {} y: {} \t\t val: {}'.format(points[i][0], points[i][1], val1[i]))
argmin_i = int(np.argmin(val1))
print 'minimal value {} at x: {} y: {}'.format(val1[argmin_i], points[argmin_i][0], points[argmin_i][1])
with open('/data/analysis/a.txt', 'w') as f:
lines = ['{} {}{}'.format(points[i][0], points[i][1], os.linesep) for i in better_options]
f.writelines(lines)
|
import random
from Neuron import *
import typing as t
from Util import *
class NeuralNetwork:
def __init__(self, numNeurons:int, numInputs:int, outputsMap:dict, randomWeights=False):
assert len(tuple(outputsMap.keys())[0]) == numNeurons, \
"Results from neurons don't have the same size as neurons quantity"
makeWeights = makeRandomWeights if randomWeights else makeNullWeights
self.neurons = []
for _ in range(numNeurons):
self.neurons.append(Neuron(makeWeights(numInputs)))
self.outputsMap = outputsMap
def __str__(self):
s = ''
for i in range(len(self.neurons)):
s += " Neuron {}:".format(i)
if i % 10 == 0:
s += "\n"
s += str(self.neurons[i])
s += '\n'
return s
# samples = varios padroes que serao analizados por todos os neuronios a cada
# epoca de treinamento
# expectations = [[,],] expectativa de cada neuronio relacionado a um sample.
def trainNetwork(self, samples, expectations, repeatFor:int=100, desirableError:int=0):
assert len(samples) == len(expectations), \
"Quantity of samples doesn't match quantity of expectations!"
assert len(expectations[0]) == len(self.neurons), \
"For each sample, it should have an expectation for each neuron!"
totalError = desirableError + 1
t = 0
while (t < repeatFor) and (totalError > desirableError):
totalError = 0
t += 1
print(" EPOCA {}".format(t))
for sample, expectation in zip(samples, expectations):
print()
print(" SAMPLE: ", sample)
print(" EXPECTATION: ", expectation)
print()
for i in range(len(self.neurons)):
print("neuronio ", i, "antes:", self.neurons[i])
error = abs(self.neurons[i].train(expectation[i], sample))
#neuronChanged = True if error else neuronChanged
totalError += error
print("neuronio ", i, "depois:", self.neurons[i])
print()
# if not neuronChanged:
# break
print()
return t
def analyze(self, pattern:tuple):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.makeSinapse(pattern))
outputs = tuple(outputs)
return self.outputsMap[outputs]
|
# TWITTER
"""
SOLVED -- NO SIMILAR PROBLEM FOUND
Given a binary search tree (BST) and a value s,
split the BST into 2 trees, where one tree has all values less than or equal to s,
and the other tree has all values greater than s
while maintaining the tree structure of the original BST.
You can assume that s will be one of the node's value in the BST.
Return both tree's root node as a tuple.
"""
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def __repr__(self):
return f"({self.value}, {self.left}, {self.right})"
"""
if self.left and self.right:
return f"({self.value}, {self.left}, {self.right})"
if self.left:
return f"({self.value}, {self.left})"
if self.right:
return f"({self.value}, None, {self.right})"
return f"({self.value})"
"""
def split_bst(bst, s):
# Time: O(logn) Space: O(logn) //for recursion stack
if bst.value == s:
ltree = bst
rtree = bst.right
ltree.right = None
if bst.value > s:
l, r = split_bst(bst.left, s)
ltree = l
rtree = bst
rtree.left = r
if bst.value < s:
l, r = split_bst(bst.right, s)
rtree = r
ltree = bst
ltree.right = l
return(ltree, rtree)
n2 = Node(2)
n1 = Node(1, None, n2)
n5 = Node(5)
n4 = Node(4, None, n5)
root = Node(3, n1, n4)
"""
n1 = Node(2, Node(1), Node(3))
n2 = Node(6, Node(5), Node(7))
root = Node(4, n1, n2)
"""
# (3, (1, (2)), (4, None, (5)))
# How the tree looks like
# 3
# / \
# 1 4
# \ \
# 2 5
l, r = split_bst(root, 2)
print(l, "%", r)
#print(split_bst(root, 2))
# ((1, (2)), (3, None, (4, None, (5))))
# Split into two trees
# 1 And 3
# \ \
# 2 4
# \
# 5
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
import numpy as np
import random
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
# sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
from cv_bridge import CvBridge, CvBridgeError
# ml00 {R:0.34, G:0.42, B:0.83} -> (115.10204081632654, 150.54216867469876, 211.64999999999998)
# ml01 {R:0.85, G:0.85, B:0.44} -> (30.0, 123.0, 216.75)
# ml10 {R:0.22, G:0.70, B:0.71} -> (90.61224489795919, 175.98591549295773, 181.04999999999998)
# ml11 {R:0.43, G:0.51, B:0.58} -> (104.0, 65.94827586206895, 147.89999999999998)
# mh00 {R:0.39, G:0.79, B:0.06} -> (46.43835616438356, 235.63291139240505, 201.45000000000002)
# box1-left <{R:0.84, G:0.21, B:0.42}> -> (170.0, 191.25, 214.2) # red
# box1-right <{R:1.00, G:0.36, B:0.06}> -> (9.574468085106384, 239.7, 255.0) # orange
# box2-left <{R:0.84, G:0.21, B:0.42}> -> (170.0, 191.25, 214.2)
# box2-right <{R:0.88, G:0.64, B:1.00}> -> (140.0, 91.8, 255.0)
# box3-left <{R:0.84, G:0.21, B:0.42} -> (170.0, 191.25, 214.2)
# box3-right <{R:0.41, G:0.64, B:1.00}> -> (108.30508474576271, 150.45000000000002, 255.0)
def random_pick(some_list,probabilities):
x = random.uniform(0,1)
cumulative_probability=0.0
for item,item_probability in zip(some_list,probabilities):
cumulative_probability+=item_probability
if x < cumulative_probability:
break
return item
def callback(data):
# color_dist = {'red': {'Lower': np.array([0, 60, 60]), 'Upper': np.array([6, 255, 255])},
# 'blue': {'Lower': np.array([100, 80, 46]), 'Upper': np.array([124, 255, 255])},
# 'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},
# }
# color_dist = {'ml00': {'Lower': np.array([110, 43, 46]), 'Upper': np.array([120, 255, 255])}, # deep blue
# 'ml01': {'Lower': np.array([25, 43, 46]), 'Upper': np.array([35, 255, 255])}, # yellow
# 'ml10': {'Lower': np.array([85, 43, 46]), 'Upper': np.array([95, 255, 255])}, # light blue
# 'ml11': {'Lower': np.array([100, 43, 46]), 'Upper': np.array([104, 255, 255])}, # gray
# 'mh00': {'Lower': np.array([40, 43, 46]), 'Upper': np.array([50, 255, 255])}, # green
# 'box1': {'Lower': np.array([5, 43, 46]), 'Upper': np.array([15, 255, 255])}, # orange
# 'box2': {'Lower': np.array([135, 43, 46]), 'Upper': np.array([145, 255, 255])}, # pink
# 'box3': {'Lower': np.array([105, 43, 46]), 'Upper': np.array([108, 255, 255])}, # sky
# 'red': {'Lower': np.array([165, 43, 46]), 'Upper': np.array([175, 255, 255])}, # red
# }
color_dist = {'orange': {'Lower': np.array([5, 43, 46]), 'Upper': np.array([15, 255, 255])}, # orange
'pink': {'Lower': np.array([135, 43, 46]), 'Upper': np.array([145, 255, 255])},
'blue': {'Lower': np.array([105, 43, 46]), 'Upper': np.array([108, 255, 255])}, # sky
}
position_color_list = []
cv_blocks_ok = False
cv_count = 0
last_blocks = []
last_x = 0
stable = False
storage_blocks = []
block_areas = []
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
scaling_factor = 0.5
global count, bridge
count = count + 1
if count == 1:
count = 0
cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
t1 = cv2.getTickCount()
cv2.imshow("frame", cv_img)
orgFrame = cv2.resize(cv_img, (320,240), interpolation = cv2.INTER_CUBIC)
img_h, img_w = orgFrame.shape[:2]
img_center_x = img_w / 2
img_center_y = img_h / 2
# print(int(img_h))
# print(int(img_w))
if cv_blocks_ok is False:
gs_frame = cv2.GaussianBlur(orgFrame, (5, 5), 0)#GaussianBlur(src,ksize,sigmaX,dst=None,sigmaY=None,borderType=None)
hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV)#cv2.cvtColor(src,code[,dst[,dstCn]])
for i in color_dist:
mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])
# cv2.imshow("mask",mask)
mask = cv2.erode(mask, None, iterations=2)
# cv2.imshow("erode", mask)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=2)
# cv2.imshow('dilate', mask)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(cnts) > 0:
for j in cnts:
rect_j = cv2.minAreaRect(j)
box_j = cv2.boxPoints(rect_j)
cv2.drawContours(orgFrame, [np.int0(box_j)], -1, (0, 255, 255), 2)
area_j = cv2.contourArea(j)
j_x, j_y = rect_j[0]
res = str(i) + ": area = " + str(int(area_j)) + "; positon = (" + str(int(j_x)) + "," + str(int(j_y)) + ")"
# print(res)
block_areas.append([i, int(area_j), int(j_x), int(j_y)])
box1_type_recog = False
box2_type_recog = False
box3_type_recog = False
box1_type = box2_type = box3_type = 0
for item in block_areas:
if 185 < item[2] < 190: # box1_position(189,115)
box1_type_recog = True
box1_type = item[0]
if 108 < item[2] < 113: # box2_position(110,114)
box2_type_recog = True
box2_type = item[0]
if 215 < item[2] < 220: # box3_position(218,114)
box3_type_recog = True
box3_type = item[0]
if box1_type_recog == False:
# print(random_pick(['pink','orange','blue'],[0.33333,0.33334,0.33333]))
box1_type = random_pick(['pink','orange','blue'],[0.33333,0.33334,0.33333])
# print("box1_type = " + box1_type)
if box2_type_recog == False:
# print(np.random.choice(['pink','orange','blue'], p = p.ravel()))
box2_type = random_pick(['pink','orange','blue'],[0.33333,0.33334,0.33333])
# print("box2_type = " + box2_type)
if box3_type_recog == False:
# print(np.random.choice(['pink','orange','blue'], p = p.ravel()))
box3_type = random_pick(['pink','orange','blue'],[0.33333,0.33334,0.33333])
# print("box3_type = " + box3_type)
object_types = []
object_types.append(box1_type)
object_types.append(box2_type)
object_types.append(box3_type)
print(object_types)
# print("-----------------------------")
# c = max(cnts, key=cv2.contourArea)
# rect = cv2.minAreaRect(c)
# box = cv2.boxPoints(rect)
# # cv2.drawContours(orgFrame, [np.int0(box)], -1, (0, 255, 255), 2)
# c_x, c_y = rect[0]
# h, w = rect[1]
# c_angle = rect[2]
# # if h * w >= 1350:
# cv2.circle(orgFrame, (int(c_x), int(c_y)), 3, (216, 0, 255), -1)
# cv2.putText(orgFrame, "(" + str(int(c_x)) + ", " + str(int(c_y)) + ")",(int(c_x), int(c_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.line(orgFrame, (int(img_w / 2) - 20, int(img_h / 2)), (int(img_w / 2) + 20, int(img_h / 2)), (0, 0, 255), 1)
cv2.line(orgFrame, (int(img_w / 2), int(img_h / 2) - 20), (int(img_w / 2), int(img_h / 2) + 20), (0, 0, 255), 1)
t2 = cv2.getTickCount()
time_r = (t2 - t1) / cv2.getTickFrequency()
fps = 1.0/time_r
cv2.putText(orgFrame, "fps:" + str(int(fps)), (10, orgFrame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)#(0, 0, 255)BGR
cv2.imshow("orgFrame", orgFrame)
cv2.waitKey(3)
else:
pass
def talker():
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
hello_str = "hello world %s" % rospy.get_time()
rospy.loginfo(hello_str)
pub.publish(hello_str)
rate.sleep()
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=True)
global count, bridge
count = 0
bridge = CvBridge()
rospy.Subscriber("/camera/rgb/image_raw", Image, callback)
rospy.spin()
if __name__ == '__main__':
listener() |
string = "パタトクカシーー"
new_string1 = ""
new_string2 = ""
for i in range(len(string)):
if i % 2 == 0:
new_string1 += string[i]
else:
new_string2 += string[i]
print(new_string1)
print(new_string2)
|
"""Train WideResNet(s) on Google AI platform or locally."""
import argparse
import logging
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
from matplotlib import pyplot as plt
from image_augmentation.wide_resnet import WideResNet
from image_augmentation.preprocessing import imagenet_standardization, imagenet_baseline_augmentation
from image_augmentation.preprocessing import cifar_standardization, cifar_baseline_augmentation
from image_augmentation.datasets import reduced_cifar10, reduced_svhn, reduced_imagenet
from image_augmentation.datasets import cifar10, svhn, imagenet
from image_augmentation.image import autoaugment_policy, PolicyAugmentation, RandAugment
from image_augmentation.callbacks import TensorBoardLRLogger
def get_args():
parser = argparse.ArgumentParser(description='Train WideResNet on Google AI Platform')
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='local or GCS location for writing checkpoints of models and other results')
parser.add_argument(
'--epochs',
type=int,
default=120,
help='number of times to go through the data, default=120')
parser.add_argument(
'--batch-size',
default=128,
type=int,
help='size of each batch, default=128')
parser.add_argument(
'--wrn-depth',
default=40,
type=int,
help='depth of Wide ResNet, default=40')
parser.add_argument(
'--wrn-k',
default=2,
type=int,
help='widening factor of Wide ResNet, default=2')
parser.add_argument(
'--wrn-dropout',
default=0.0,
type=float,
help='dropout value for WideResNet blocks, default=0.0, no dropout')
parser.add_argument(
'--no-cutout',
default=False,
const=True,
action='store_const',
help='use random Cutout for data augmentation, off by default (uses Cutout)')
parser.add_argument(
'--auto-augment',
default=False,
const=True,
action='store_const',
help='apply AutoAugment policy for data augmentation on training, off by default (no AutoAugment)')
parser.add_argument(
'--rand-augment-n',
default=0,
type=int,
help='apply RandAugment with number of (N) image transforms for data augmentation on training, '
'default=0, off (no RandAugment)')
parser.add_argument(
'--rand-augment-m',
default=10,
type=int,
help='magnitude (M) of applying each image transform for RandAugment, '
'(only when using RandAugment) default=10')
parser.add_argument(
'--dataset',
default='cifar10',
choices=["cifar10", "reduced_cifar10", "svhn",
"reduced_svhn", "imagenet", "reduced_imagenet"],
help='dataset that is to be used for training and evaluating the model, default="cifar10"')
parser.add_argument(
'--data-dir',
required=False,
default=None,
type=str,
help='local or GCS location for accessing data with TFDS '
'(directory for tensorflow_datasets)')
parser.add_argument(
'--padding-mode',
choices=['zeros', 'reflect'],
default='reflect',
help='padding mode to be used to pad pixels before cropping, '
'(applicable only in case of CIFAR, SVHN) default is "reflect"')
parser.add_argument(
'--normalization',
choices=['pixel_center', 'rgb_normalization'],
default='rgb_normalization',
help='normalization that is to be applied on images, '
'(applicable only in case of CIFAR, SVHN) default is "rgb_normalization"')
parser.add_argument(
'--optimizer',
default='sgdr',
choices=["sgd", "adam", "sgdr"],
help='optimizer that is to be used for training, default="sgd"')
parser.add_argument(
'--init-lr',
default=0.01,
type=float,
help='initial learning rate for training, default=0.01')
parser.add_argument(
'--sgdr-t0',
default=10,
type=float,
help='number of steps to decay over for SGDR, default=10')
parser.add_argument(
'--sgdr-t-mul',
default=2,
type=int,
help='number of iterations in ith period for SGDR, default=2')
parser.add_argument(
'--drop-lr-by',
default=0.0,
type=float,
help='drop learning rate by a factor (only when using SGD, not SGDR), '
'default=0.0, off')
parser.add_argument(
'--drop-lr-every',
default=[],
action='append',
type=int,
help='drop learning rate duration of epochs (only when using SGD, not SGDR), '
'default=60')
parser.add_argument(
'--sgd-nesterov',
default=False,
const=True,
action='store_const',
help='use Nesterov accelerated gradient with SGD optimizer, by default Nesterov is off')
parser.add_argument(
'--weight-decay-rate',
default=0.0,
type=float,
help='rate of weight decay per training step, note: this '
'rate is multiplied by learning rate to compute decay value '
'before passing to SGDW / AdamW, default=0.0, off')
parser.add_argument(
'--l2-reg',
default=0.0005,
type=float,
help='L2 regularization to be applied on all weights '
'of the network, default=0.0005')
parser.add_argument(
'--multi-gpu',
default=False,
const=True,
action='store_const',
help='single host multi-GPU sync training, default is off')
parser.add_argument(
'--verbosity',
choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],
default='INFO')
args = parser.parse_args()
return args
NUM_CLASSES = {
"cifar10": 10,
"reduced_cifar10": 10,
"svhn": 10,
"reduced_svhn": 10,
"imagenet": 1000,
"reduced_imagenet": 120
}
BASELINE_METHOD = {
"cifar10": (cifar_baseline_augmentation, cifar_standardization),
"reduced_cifar10": (cifar_baseline_augmentation, cifar_standardization),
"svhn": (cifar_baseline_augmentation, cifar_standardization),
"reduced_svhn": (cifar_baseline_augmentation, cifar_standardization),
"imagenet": (imagenet_baseline_augmentation, imagenet_standardization),
"reduced_imagenet": (imagenet_baseline_augmentation, imagenet_standardization)
}
DATASET = {
"cifar10": cifar10,
"reduced_cifar10": reduced_cifar10,
"svhn": svhn,
"reduced_svhn": reduced_svhn,
"imagenet": imagenet,
"reduced_imagenet": reduced_imagenet
}
def main(args):
# set level of verbosity
logging.getLogger("tensorflow").setLevel(args.verbosity)
# setup training logger
logging.basicConfig(format="%(asctime)s.%(msecs)03d %(levelname)s "
"%(module)s - %(funcName)s: %(message)s",
datefmt='%m/%d/%Y %H:%M:%S')
logging.getLogger().setLevel(args.verbosity)
# display script args
logging.info("Training script args: %s", str(args))
if args.multi_gpu:
# set GPU memory growth to avoid fork cannot allocate memory warning in multi-GPU env
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# image input shape is set 32 x 32
inp_shape = (32, 32, 3)
# num classes and other pre-processing ops inferred based on given dataset name
num_classes = NUM_CLASSES[args.dataset]
baseline_augment, standardize = BASELINE_METHOD[args.dataset]
ds = DATASET[args.dataset](args.data_dir)
# get train and validation/test datasets
train_ds = ds['train_ds']
val_ds = ds['val_ds'] if 'val_ds' in ds else ds['test_ds']
# show dataset distribution only for reduced datasets
if args.dataset.startswith("reduced"):
train_distro, val_distro = [tf.math.bincount(
[label for image, label in curr_ds],
minlength=num_classes)
for curr_ds in (train_ds, val_ds)]
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
plt.bar(tf.range(num_classes).numpy(), train_distro.numpy(), color='y')
plt.xlabel(args.dataset + " classes")
plt.ylabel("number of samples")
plt.title("Training Distribution")
plt.subplot(1, 2, 2)
plt.bar(tf.range(num_classes).numpy(), val_distro.numpy(), color='g')
plt.xlabel(args.dataset + " classes")
plt.ylabel("number of samples")
plt.title("Validation Distribution")
fig_file_path = args.job_dir + "/dataset_distribution.pdf"
with tf.io.gfile.GFile(fig_file_path, "wb") as fig_file:
plt.savefig(fig_file, format="pdf")
logging.info("Wrote file to: %s", fig_file_path)
if args.multi_gpu:
strategy = tf.distribute.MirroredStrategy()
else:
strategy = tf.distribute.get_strategy()
logging.info("Number of available devices: %d", strategy.num_replicas_in_sync)
with strategy.scope():
wrn = WideResNet(inp_shape, depth=args.wrn_depth, k=args.wrn_k,
dropout=args.wrn_dropout, num_classes=num_classes)
wrn.summary()
inp = keras.layers.Input(inp_shape, name='image_input')
# whether to use cutout in baseline augmentation step
cutout = not args.no_cutout
if cutout:
logging.info("Using Cutout")
with strategy.scope():
# CIFAR-10 and SVHN uses similar augmentation
if args.dataset.endswith("cifar10") or args.dataset.endswith("svhn"):
x = baseline_augment(inp, args.padding_mode, cutout)
# ImageNet uses different
else:
x = baseline_augment(inp)
with strategy.scope():
if args.dataset.endswith("cifar10") or args.dataset.endswith("svhn"):
# pixel center of CIFAR10, SVHN require that image samples be supplied
# (to learn the pixel wise mean)
if args.normalization == 'pixel_center':
images_only = train_ds.batch(args.batch_size).map(lambda image, label: image)
x = standardize(x, mode='pixel_mean_subtract', data_samples=images_only)
# rgb normalization uses rescaling using known RGB mean(s) and std(s)
else:
x = standardize(x, mode='feature_normalize')
# for ImageNet rescaling is used to scale inputs to range [-1, +1]
else:
x = standardize(x)
x = wrn(x)
# model combines baseline augmentation, standardization and wide resnet layers
model = keras.Model(inp, x, name='WRN')
model.summary()
# cache the dataset only if possible
if args.dataset not in ['svhn', 'imagenet']:
train_ds = train_ds.cache()
val_ds = val_ds.cache()
def augment_map_fn_builder(augmenter):
return lambda image, label: (augmenter.apply_on_image(image), label)
# apply AutoAugment (data augmentation) on training pipeline
if args.auto_augment:
logging.info("Using AutoAugment pre-processing")
# ensure AutoAugment policy dataset name always starts with "reduced_"
policy_ds_name = "reduced_" + args.dataset if not args.dataset.startswith("reduced_") else args.dataset
policy = autoaugment_policy(policy_ds_name)
# set hyper parameters to size 16 as input size is 32 x 32
auto_augment = PolicyAugmentation(policy, translate_max=16, cutout_max_size=16)
augment_map_fn = augment_map_fn_builder(auto_augment)
train_ds = train_ds.map(augment_map_fn, tf.data.experimental.AUTOTUNE)
# apply RandAugment on training pipeline
if args.rand_augment_n:
logging.info("Using RandAugment pre-processing: %d layers and %d magnitude",
args.rand_augment_n, args.rand_augment_m)
rand_augment = RandAugment(args.rand_augment_m,
args.rand_augment_n,
# set hyper parameters to size 16 as input size is 32 x 32
translate_max=16,
# 32 x 32 models (eg. CIFAR-10) do not use Cutout / Invert / SolarizeAdd
use_cutout_op=False,
use_invert_op=False,
use_solarize_add_op=False,
# 32 x 32 models (eg. CIFAR-10) do use Identity transform
use_identity_op=True)
augment_map_fn = augment_map_fn_builder(rand_augment)
train_ds = train_ds.map(augment_map_fn)
# shuffle and batch the dataset
train_ds = train_ds.shuffle(1000, reshuffle_each_iteration=True).batch(args.batch_size)
val_ds = val_ds.batch(args.batch_size)
# prefetch dataset for faster access in case of larger datasets only (which are not cached)
if args.dataset in ['svhn', 'imagenet']:
train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
val_ds = val_ds.prefetch(tf.data.experimental.AUTOTUNE)
# calculate steps per epoch for optimizer schedule num steps
steps_per_epoch = tf.data.experimental.cardinality(train_ds)
steps_per_epoch = steps_per_epoch.numpy() # helps model optimizer become JSON serializable
with strategy.scope():
# any one of the following:
# - use an SGD optimizer w/ or w/o weight decay (SGDW / SGD) or just Adam
# - use a callable learning rate schedule for SGDR or not
# - use a callable weight decay schedule whenever required
# - use SGD Nesterov or not
if args.optimizer == 'sgdr':
logging.info("Using learning rate schedule: SGDR (CosineDecayWithRestarts)")
lr = keras.experimental.CosineDecayRestarts(args.init_lr, steps_per_epoch * args.sgdr_t0,
args.sgdr_t_mul)
if args.weight_decay_rate:
weight_decay = keras.experimental.CosineDecayRestarts(args.weight_decay_rate * args.init_lr,
steps_per_epoch * args.sgdr_t0, args.sgdr_t_mul)
elif args.drop_lr_by:
logging.info("Using learning rate schedule: PiecewiseConstantDecay")
lr_boundaries = [(steps_per_epoch * epoch) for epoch in sorted(args.drop_lr_every)]
lr_values = [args.init_lr * (args.drop_lr_by ** idx) for idx in range(len(lr_boundaries) + 1)]
lr = keras.optimizers.schedules.PiecewiseConstantDecay(lr_boundaries, lr_values)
if args.weight_decay_rate:
wd_values = [args.weight_decay_rate * lr_val for lr_val in lr_values]
weight_decay = keras.optimizers.schedules.PiecewiseConstantDecay(lr_boundaries, wd_values)
else:
logging.info("Using learning rate: constant")
lr = args.init_lr
if args.weight_decay_rate:
weight_decay = args.weight_decay_rate * lr
if args.optimizer.startswith('sgd'):
if args.weight_decay_rate:
logging.info("Using optimizer: SGDW (SGD w/ weight decay)")
opt = tfa.optimizers.SGDW(weight_decay, lr, momentum=0.9, nesterov=args.sgd_nesterov)
else:
logging.info("Using optimizer: SGD")
opt = keras.optimizers.SGD(lr, momentum=0.9, nesterov=args.sgd_nesterov)
else: # adam
if args.weight_decay_rate:
logging.info("Using optimizer: AdamW (Adam w/ weight decay)")
opt = tfa.optimizers.AdamW(weight_decay, lr)
else:
logging.info("Using optimizer: Adam")
opt = keras.optimizers.Adam(lr)
metrics = [keras.metrics.SparseCategoricalAccuracy()]
# use top-5 accuracy metric with ImageNet and reduced-ImageNet only
if args.dataset.endswith("imagenet"):
logging.info("Using metric: Top-5 Accuracy")
metrics.append(keras.metrics.SparseTopKCategoricalAccuracy(k=5))
if args.l2_reg != 0:
logging.info("Using loss: Cross Entropy w/ L2 regularization")
@tf.function
def loss_fn(labels, predictions):
loss = tf.nn.compute_average_loss(
tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions))
for var in model.trainable_variables:
loss += tf.nn.scale_regularization_loss(
args.l2_reg * tf.nn.l2_loss(var))
return loss
else:
logging.info("Using loss: Cross Entropy")
loss_fn = tf.keras.losses.sparse_categorical_crossentropy
model.compile(optimizer=opt, loss=loss_fn, metrics=metrics)
# prepare tensorboard logging
tb_path = args.job_dir + '/tensorboard'
checkpoint_path = args.job_dir + '/checkpoint'
callbacks = [keras.callbacks.TensorBoard(tb_path),
keras.callbacks.ModelCheckpoint(checkpoint_path),
TensorBoardLRLogger(tb_path + '/train')]
logging.info("Using tensorboard directory: %s", tb_path)
logging.info("Using model checkpoint directory: %s", checkpoint_path)
# train the model
model.fit(train_ds, verbose=2, validation_data=val_ds,
epochs=args.epochs, callbacks=callbacks)
# save keras model
save_path = args.job_dir + '/keras_model'
keras.models.save_model(model, save_path)
logging.info("Model exported to: %s", save_path)
if __name__ == '__main__':
args = get_args()
main(args)
|
# -*- coding:utf-8 -*-
from flask.blueprints import Blueprint
from flask import url_for, redirect, flash
from werkzeug.security import check_password_hash
from flask.globals import request, g
from echo_telegram_base import try_except, dao, app
from app_logger import logger
from api.signup import __get_user
from db.user import User
from flask import session
from flask_login.utils import login_user, login_required, logout_user
login_api = Blueprint("login_api", __name__)
@try_except
def check_pw(login_id, pw):
query = '''select pw from users where id = %s'''
cursor = dao.get_conn().cursor()
cursor.execute(query, [login_id])
db_pw = cursor.fetchone()
cursor.close()
g.conn.commit()
return check_password_hash(db_pw[0], pw)
@login_api.route('/login', methods=['POST'])
def login():
login_userId = request.form['login_id'].encode("utf-8")
login_pw = request.form['login_pw'].encode("utf-8")
user = __get_user(login_userId)
if user:
if check_pw(user[0], login_pw):
user = User(userId=user[1].decode('utf-8'), name=user[2].decode('utf-8'), rank=user[3], status='place'\
, channel=None, location=None, auth=False)
login_user(user)
session['userId'] = login_userId
return redirect(url_for('dashboard_view.dashboard'))
#로그인에 실패함
flash(u'입력정보를 다시 확인해주세요');
return redirect(url_for('main_view.index'))
@login_api.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main_view.index')) |
#!/usr/bin/python
#coding = utf-8
import os
def find_pkg():
dir_name = r'/data/version'
file_list = os.listdir(dir_name)
bwcc_files = []
for i in file_list:
if i.startswith('bwccz'):
bwcc_files.append(i)
bwcc_files.sort()
count = len(bwcc_files)
#print bwcc_files
#print count
return (bwcc_files,count)
if __name__ == '__main__':
find_pkg()
#def write_filename(f_name):
# f = open(file_name,'a+')
# f.write(f_name)
# f.close()
|
from Tkinter import *
class HelloButton(Button):
def __init__(self,parent=None,config={}):
Button.__init__(self,parent,config)
self.pack()
self.config(command=self.callback)
def callback(self):
print 'Goodbye world...'
self.quit()
if __name__ =='__main__':
HelloButton(None,{'text':'Hello subclass world'}).mainloop()
|
#-*- coding: UTF-8 -*-
import cv2
import numpy as np
def func1():
img_data=cv2.imread('e.jpg')
gray=cv2.cvtColor(img_data,cv2.COLOR_BGR2GRAY)
face=cv2.imread('xiaogou.png',0)
w = face.shape[1]
h=face.shape[0]
print(face.shape)
print(w,h)
res = cv2.matchTemplate(gray,face,cv2.TM_CCOEFF_NORMED)
threshold = 0.7
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_data, pt, (pt[0] + w, pt[1] + h), (7,249,151), 2)
#显示图像
cv2.imshow('Detected',img_data)
cv2.waitKey(0)
cv2.destroyAllWindows()
def drawMatchesKnn_cv2(img1_gray, kp1, img2_gray, kp2, goodMatch,url3):
h1, w1 = img1_gray.shape[:2]
h2, w2 = img2_gray.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2, 3), np.uint8)
vis[:h1, :w1] = img1_gray
vis[:h2, w1:w1 + w2] = img2_gray
print(vis.shape)
p1 = [kpp.queryIdx for kpp in goodMatch]
p2 = [kpp.trainIdx for kpp in goodMatch]
post1 = np.int32([kp1[pp].pt for pp in p1])
post2 = np.int32([kp2[pp].pt for pp in p2]) + (w1, 0)
for (x1, y1), (x2, y2) in zip(post1, post2):
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
leng=vis.shape[1]
width=vis.shape[0]
new_leng=401
new_width=int(401/leng*width)
vis=cv2.resize(vis,(new_leng,new_width))
cv2.imwrite(url3,vis)
def mysurf(url1="statics/images/666.jpg",url2="statics/images/888.jpg",url3=None):
img1_gray = cv2.imread(url1)
img2_gray = cv2.imread(url2)
# SURF进行特征识别
sift = cv2.xfeatures2d.SURF_create()
# SIFT进行特征识别
# sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1_gray, None)
kp2, des2 = sift.detectAndCompute(img2_gray, None)
# BFmatcher with default parms
bf = cv2.BFMatcher(cv2.NORM_L2)
matches = bf.knnMatch(des1, des2, k=2)
goodMatch = []
for m, n in matches:
if m.distance < 0.50 * n.distance:
goodMatch.append(m)
drawMatchesKnn_cv2(img1_gray, kp1, img2_gray, kp2, goodMatch[:50],url3)
if __name__=='__main__':
pass
|
import json
import os
import sys
import urllib
import cv2
import requests
face_cascade = cv2.CascadeClassifier('/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml')
stars = []
if os.path.isfile('stars.json'):
stars = json.loads(open('stars.json', 'r').read())
for star in stars:
print("Finding pictures of {}".format(star["name"]))
os.makedirs("wild/{}/{}".format(star["gender"], star["imdbId"]), exist_ok=True)
import pdb; pdb.set_trace()
os.makedirs("wild", exist_ok=True)
os.makedirs("error", exist_ok=True)
os.makedirs("faces", exist_ok=True)
for star in stars[7:8]:
dirname = star["name"].replace(" ", "_")
os.makedirs("wild/{}".format(dirname), exist_ok=True)
os.makedirs("error/{}".format(dirname), exist_ok=True)
print("Finding pictures of {}".format(star["name"]))
query = urllib.parse.urlencode({'q': '"{}"'.format(star["name"])})
round = 1
star["popular"] = 1
while round <= 2 and star["popular"]:
round_photo_count = 0
url_count = 0
response = requests.get('https://contextualwebsearch-websearch-v1.p.rapidapi.com/api/Search/ImageSearchAPIWithPagination?autoCorrect=false&pageNumber={}&pageSize=50&{}&safeSearch=true'.format(round, query), headers={"X-RapidAPI-Key": "b2028ce3b8msh03ae80a1602a764p1392f2jsn50ff5d9b5202"})
for finding in json.loads(response.text)['value']:
url_count += 1
if not finding["url"] in star["urls"]:
# print("Fetching picture: {}".format(finding["url"]))
star["urls"].append(finding["url"])
file_name = os.path.basename(urllib.parse.urlparse(finding["url"]).path)
file_format = file_name.split('.')[-1].lower()
if file_format in VALID_IMG_FORMATS:
img = None
try:
r = requests.get(finding["url"])
open('wild/{}/{}'.format(dirname, file_name), 'wb').write(r.content)
img = cv2.imread('wild/{}/{}'.format(dirname, file_name))
except:
pass
if img is not None:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) != 1:
os.remove('wild/{}/{}'.format(dirname, file_name))
else:
round_photo_count += 1
# print("Picture found: {}".format(finding["url"]))
# for (x, y, w, h) in faces:
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# cv2.imshow("Faces found", img)
# cv2.waitKey(0)
# import pdb; pdb.set_trace()
elif os.path.isfile('wild/{}/{}'.format(dirname, file_name)):
os.rename('wild/{}/{}'.format(dirname, file_name), 'error/{}/{}'.format(dirname, file_name))
# print("Couldn't read file: {}".format(finding["url"]))
# else:
# print("Picture already fetched: {}".format(finding["url"]))
sys.stdout.write("\rRound {}: {}%".format(round, url_count*2))
sys.stdout.flush()
sys.stdout.write("\rRound {}: 100%\n".format(round))
print("Round {} got {} photos".format(round, round_photo_count))
if round_photo_count < 25:
if round == 1:
star["popular"] = 0
else:
round = 20
round += 1
open('stars.json', 'w').write(json.dumps(stars))
print("{} has {} photos".format(star["name"], len(os.listdir('wild/{}'.format(dirname))))) |
import keras
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.optimizers import SGD
import migrate
from model import create_model
from utils import load_data, load_wsi_patches, custom_loss
if __name__ == '__main__':
batch_size = 16
epochs = 1000
patience = 50
# Load our model
model = create_model()
migrate.migrate_model(model)
# model.compile(optimizer='nadam', loss=custom_loss)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=custom_loss)
print(model.summary())
# Load our data
patch_dir = "/infodev1/non-phi-data/junjiang/OvaryCancer/auto_enc_patches_256/OCMC-016"
x_train, y_train, x_valid, y_valid = load_wsi_patches(patch_dir)
# Callbacks
tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
trained_models_path = 'models/model'
model_names = trained_models_path + '.{epoch:02d}-{val_loss:.4f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)
callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
# Start Fine-tuning
model.fit(x_train,
y_train,
validation_data=(x_valid, y_valid),
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
verbose=1
)
|
#!/usr/bin/python3
import sys
def main():
current_node = None
current_node_record_count = 0
rank_unchanged = False
current_total = 0
out_list = []
for inp in sys.stdin:
line = inp.strip()
node, rank = line.split("\t")
if node == current_node:
if rank[0] != '[':
rank = float(rank)
current_total += rank
current_node_record_count += 1
else:
rank_unchanged = True
else:
if current_node != None and rank_unchanged and current_node_record_count == 0:
print(current_node, 0.15000, sep=",")
elif current_node != None:
#Print newly calculated page rank according to the formula
print(current_node, round((0.15) + (0.85 * current_total), 5), sep=",")
current_node = node
if rank[0] != "[":
rank = float(rank)
current_total = rank
rank_unchanged = False
current_node_record_count = 1
else:
current_total = 0
rank_unchanged = True
current_node_record_count = 0
if current_node != None and rank_unchanged and current_node_record_count == 0:
print(current_node, 0.15000, sep=",")
elif current_node != None:
print(current_node, round((0.15) + (0.85 * current_total), 5), sep=",")
if __name__ == "__main__":
main()
|
"""
Пользователь вводит строку из нескольких слов, разделённых пробелами.
Вывести каждое слово с новой строки. Строки необходимо пронумеровать.
Если слово длинное, выводить только первые 10 букв в слове.
"""
string = input("Введите строку из нескольких слов, разделённых пробелами: ")
for i, word in enumerate(string.split(' '), 1):
print(f'{i}) {word[:10]}')
|
### Divide and Conquer Example ###
### MergeSort: Order( nlog(n) )
#Helper Function: merge, for mergeSort
def merge(A, B):
out = []
i,j=0,0
while i < len(A) and j < len(B):
if A[i] < B[j]:
out.append(A[i])
i+=1
else:
out.append(B[j])
j+=1
while i < len(A):
out.append(A[i])
i+=1
while j < len(B):
out.append(B[j])
j+=1
return out
def mergeSort(L):
print(f"Before the Sort: {L}")
if len(L) < 2:
return L[:]
else:
mid = len(L)//2
Left = mergeSort(L[:mid])
Right = mergeSort(L[mid:])
return merge(Left,Right)
L = [1,7,4,2,55,89898,0]
print(mergeSort(L))
|
import FWCore.ParameterSet.Config as cms
#track match
from TrackingTools.TransientTrack.TransientTrackBuilder_cfi import *
from SimTracker.TrackAssociation.trackMCMatchSequence_cff import *
# define post-reco generator sequence
postreco_generator = cms.Sequence(trackMCMatchSequence)
|
from memd.gulp.Gulp import Gulp
class Phonon(Gulp):
'''This class allows phonon calculations using traditional molecular mechanics potentials.'''
kpointMesh = ''
dosAndDispersionFilename = ""
broadenDos = False
projectDos = ''
def __init__(self, **kwds):
Gulp.__init__(self, **kwds)
for k, v in kwds.iteritems():
setattr(self, k, v)
self.runTypeIdentifier='phonon'
def writeKeywords(self, visitor):
keywords=[]
keywords+=visitor.writePhononKeywords(self)
keywords+=visitor.writeGulpKeywords(self)
return keywords
def writeOptions(self, visitor):
options=''
gopts = visitor.writeGulpOptions(self)
options+=gopts
options+=visitor.writePhononOptions(self)
return options
def identifySettings( self, visitor):
return visitor.writePhononSettings(self)
|
#!/usr/bin/env python
import os, re, sys, subprocess, plistlib
import eclim
from util import caret_position
def call_eclim(project, file, line, offset, applied_correction=None):
eclim.update_java_src(project, file)
correct_cmd = "$ECLIM -command java_correct \
-p %s \
-f %s \
-l %i \
-o %i \
-e utf-8 " % (project, file, line, offset)
if applied_correction != None:
correct_cmd += " -a %i" % (applied_correction)
out = eclim.call_eclim(correct_cmd)
return out
def show_corrections_window(corrections):
options = {"corrections": [dict([("message",m),("number", x+1)])
for x, m in enumerate(corrections)]}
path = os.path.join(os.path.dirname(sys.argv[0]), "corrections.nib")
cmd = eclim.DIALOG + ' -cm "' + path + '"'
popen = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,shell=True)
out, err = popen.communicate(plistlib.writePlistToString(options))
out = plistlib.readPlistFromString(out)
if "result" not in out:
return None
return int(out["result"]["returnArgument"])-1
def to_list(corrections):
re1 = re.compile("^(\d+)\.\d+:(.*)")
result = []
corrections = corrections.splitlines()
for l in corrections:
match1 = re1.match(l)
if match1:
result.append(match1.group(2).strip())
return result
def correction_command():
project, file = eclim.get_context()
# we cannot read the code from TM via stdin, as it will not have
# the correct line endings when editing windows files (it will just have \n)
#code = sys.stdin.read()
# so we read from disk
with open(os.environ["TM_FILEPATH"]) as f:
code = f.read()
pos = caret_position(code)
line = int(os.environ['TM_LINE_NUMBER'])
corrections = call_eclim(project, file, line, pos)
corrections = to_list(corrections)
if corrections:
correction_to_apply = show_corrections_window(corrections)
else: correction_to_apply = None
if correction_to_apply != None:
new_code = call_eclim(project, file, line, pos, correction_to_apply)
if new_code:
return new_code
return code
if __name__ == '__main__':
out = correction_command()
print out
|
""" Interpreter-level implementation of array, exposing ll-structure
to app-level with apropriate interface
"""
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty_w
from rpython.rtyper.lltypesystem import lltype, rffi
from pypy.interpreter.error import OperationError, oefmt
from pypy.module._rawffi.interp_rawffi import segfault_exception
from pypy.module._rawffi.interp_rawffi import W_DataShape, W_DataInstance
from pypy.module._rawffi.interp_rawffi import unwrap_value, wrap_value
from pypy.module._rawffi.interp_rawffi import TYPEMAP
from pypy.module._rawffi.interp_rawffi import size_alignment
from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length
from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr
from rpython.rlib.rarithmetic import r_uint
from rpython.rlib import rgc, clibffi
class W_Array(W_DataShape):
def __init__(self, basicffitype, size):
# A W_Array represent the C type '*T', which can also represent
# the type of pointers to arrays of T. So the following fields
# are used to describe T only. It is 'basicffitype' possibly
# repeated until reaching the length 'size'.
self.basicffitype = basicffitype
self.size = size
self.alignment = size_alignment(basicffitype)[1]
def allocate(self, space, length, autofree=False):
if autofree:
return W_ArrayInstanceAutoFree(space, self, length)
return W_ArrayInstance(space, self, length)
def get_basic_ffi_type(self):
return self.basicffitype
@unwrap_spec(length=int, autofree=bool)
def descr_call(self, space, length, w_items=None, autofree=False):
result = self.allocate(space, length, autofree)
if not space.is_none(w_items):
items_w = space.unpackiterable(w_items)
iterlength = len(items_w)
if iterlength > length:
raise oefmt(space.w_ValueError,
"too many items for specified array length")
for num in range(iterlength):
w_item = items_w[num]
unwrap_value(space, write_ptr, result.ll_buffer, num,
self.itemcode, w_item)
return result
def descr_repr(self, space):
return space.newtext("<_rawffi.Array '%s' (%d, %d)>" % (self.itemcode,
self.size,
self.alignment))
@unwrap_spec(address=r_uint, length=int)
def fromaddress(self, space, address, length):
return W_ArrayInstance(space, self, length, address)
PRIMITIVE_ARRAY_TYPES = {}
for _code in TYPEMAP:
PRIMITIVE_ARRAY_TYPES[_code] = W_Array(TYPEMAP[_code],
size_alignment(TYPEMAP[_code])[0])
PRIMITIVE_ARRAY_TYPES[_code].itemcode = _code
ARRAY_OF_PTRS = PRIMITIVE_ARRAY_TYPES['P']
def descr_new_array(space, w_type, w_shape):
return unpack_shape_with_length(space, w_shape)
W_Array.typedef = TypeDef(
'Array',
__new__ = interp2app(descr_new_array),
__call__ = interp2app(W_Array.descr_call),
__repr__ = interp2app(W_Array.descr_repr),
fromaddress = interp2app(W_Array.fromaddress),
size_alignment = interp2app(W_Array.descr_size_alignment)
)
W_Array.typedef.acceptable_as_base_class = False
class W_ArrayInstance(W_DataInstance):
def __init__(self, space, shape, length, address=r_uint(0)):
memsize = shape.size * length
# For W_ArrayInstances that are used as the result value of a
# function call, ffi_call() writes 8 bytes into it even if the
# function's result type asks for less.
memsize = clibffi.adjust_return_size(memsize)
W_DataInstance.__init__(self, space, memsize, address)
self.length = length
self.shape = shape
def descr_repr(self, space):
addr = rffi.cast(lltype.Unsigned, self.ll_buffer)
return space.newtext("<_rawffi array %x of length %d>" % (addr,
self.length))
# This only allows non-negative indexes. Arrays of shape 'c' also
# support simple slices.
def setitem(self, space, num, w_value):
if not self.ll_buffer:
raise segfault_exception(space, "setting element of freed array")
if num >= self.length or num < 0:
raise OperationError(space.w_IndexError, space.w_None)
unwrap_value(space, write_ptr, self.ll_buffer, num,
self.shape.itemcode, w_value)
def descr_setitem(self, space, w_index, w_value):
try:
num = space.int_w(w_index)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
self.setslice(space, w_index, w_value)
else:
self.setitem(space, num, w_value)
def getitem(self, space, num):
if not self.ll_buffer:
raise segfault_exception(space, "accessing elements of freed array")
if num >= self.length or num < 0:
raise OperationError(space.w_IndexError, space.w_None)
return wrap_value(space, read_ptr, self.ll_buffer, num,
self.shape.itemcode)
def descr_getitem(self, space, w_index):
try:
num = space.int_w(w_index)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return self.getslice(space, w_index)
else:
return self.getitem(space, num)
def getlength(self, space):
return space.newint(self.length)
@unwrap_spec(num=int)
def descr_itemaddress(self, space, num):
itemsize = self.shape.size
ptr = rffi.ptradd(self.ll_buffer, itemsize * num)
return space.newint(rffi.cast(lltype.Unsigned, ptr))
def getrawsize(self):
itemsize = self.shape.size
return itemsize * self.length
def decodeslice(self, space, w_slice):
if not space.isinstance_w(w_slice, space.w_slice):
raise oefmt(space.w_TypeError, "index must be int or slice")
letter = self.shape.itemcode
if letter != 'c':
raise oefmt(space.w_TypeError, "only 'c' arrays support slicing")
w_start = space.getattr(w_slice, space.newtext('start'))
w_stop = space.getattr(w_slice, space.newtext('stop'))
w_step = space.getattr(w_slice, space.newtext('step'))
if space.is_w(w_start, space.w_None):
start = 0
else:
start = space.int_w(w_start)
if space.is_w(w_stop, space.w_None):
stop = self.length
else:
stop = space.int_w(w_stop)
if not space.is_w(w_step, space.w_None):
step = space.int_w(w_step)
if step != 1:
raise oefmt(space.w_ValueError, "no step support")
if not (0 <= start <= stop <= self.length):
raise oefmt(space.w_ValueError, "slice out of bounds")
if not self.ll_buffer:
raise segfault_exception(space, "accessing a freed array")
return start, stop
def getslice(self, space, w_slice):
start, stop = self.decodeslice(space, w_slice)
ll_buffer = self.ll_buffer
result = [ll_buffer[i] for i in range(start, stop)]
return space.newbytes(''.join(result))
def setslice(self, space, w_slice, w_value):
start, stop = self.decodeslice(space, w_slice)
value = space.bytes_w(w_value)
if start + len(value) != stop:
raise oefmt(space.w_ValueError, "cannot resize array")
ll_buffer = self.ll_buffer
for i in range(len(value)):
ll_buffer[start + i] = value[i]
W_ArrayInstance.typedef = TypeDef(
'ArrayInstance',
__repr__ = interp2app(W_ArrayInstance.descr_repr),
__setitem__ = interp2app(W_ArrayInstance.descr_setitem),
__getitem__ = interp2app(W_ArrayInstance.descr_getitem),
__len__ = interp2app(W_ArrayInstance.getlength),
buffer = GetSetProperty(W_ArrayInstance.getbuffer),
shape = interp_attrproperty_w('shape', W_ArrayInstance),
free = interp2app(W_ArrayInstance.free),
byptr = interp2app(W_ArrayInstance.byptr),
itemaddress = interp2app(W_ArrayInstance.descr_itemaddress),
)
W_ArrayInstance.typedef.acceptable_as_base_class = False
class W_ArrayInstanceAutoFree(W_ArrayInstance):
def __init__(self, space, shape, length):
W_ArrayInstance.__init__(self, space, shape, length, 0)
@rgc.must_be_light_finalizer
def __del__(self):
if self.ll_buffer:
self._free()
W_ArrayInstanceAutoFree.typedef = TypeDef(
'ArrayInstanceAutoFree',
__repr__ = interp2app(W_ArrayInstance.descr_repr),
__setitem__ = interp2app(W_ArrayInstance.descr_setitem),
__getitem__ = interp2app(W_ArrayInstance.descr_getitem),
__len__ = interp2app(W_ArrayInstance.getlength),
buffer = GetSetProperty(W_ArrayInstance.getbuffer),
shape = interp_attrproperty_w('shape', W_ArrayInstance),
byptr = interp2app(W_ArrayInstance.byptr),
itemaddress = interp2app(W_ArrayInstance.descr_itemaddress),
)
W_ArrayInstanceAutoFree.typedef.acceptable_as_base_class = False
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kombu import exceptions as kombu_exc
import six
from taskflow import exceptions as excp
from taskflow import logging
LOG = logging.getLogger(__name__)
class TypeDispatcher(object):
"""Receives messages and dispatches to type specific handlers."""
def __init__(self, type_handlers):
self._handlers = dict(type_handlers)
self._requeue_filters = []
def add_requeue_filter(self, callback):
"""Add a callback that can *request* message requeuing.
The callback will be activated before the message has been acked and
it can be used to instruct the dispatcher to requeue the message
instead of processing it. The callback, when called, will be provided
two positional parameters; the first being the message data and the
second being the message object. Using these provided parameters the
filter should return a truthy object if the message should be requeued
and a falsey object if it should not.
"""
if not six.callable(callback):
raise ValueError("Requeue filter callback must be callable")
self._requeue_filters.append(callback)
def _collect_requeue_votes(self, data, message):
# Returns how many of the filters asked for the message to be requeued.
requeue_votes = 0
for i, cb in enumerate(self._requeue_filters):
try:
if cb(data, message):
requeue_votes += 1
except Exception:
LOG.exception("Failed calling requeue filter %s '%s' to"
" determine if message %r should be requeued.",
i + 1, cb, message.delivery_tag)
return requeue_votes
def _requeue_log_error(self, message, errors):
# TODO(harlowja): Remove when http://github.com/celery/kombu/pull/372
# is merged and a version is released with this change...
try:
message.requeue()
except errors as exc:
# This was taken from how kombu is formatting its messages
# when its reject_log_error or ack_log_error functions are
# used so that we have a similar error format for requeuing.
LOG.critical("Couldn't requeue %r, reason:%r",
message.delivery_tag, exc, exc_info=True)
else:
LOG.debug("AMQP message %r requeued.", message.delivery_tag)
def _process_message(self, data, message, message_type):
handler = self._handlers.get(message_type)
if handler is None:
message.reject_log_error(logger=LOG,
errors=(kombu_exc.MessageStateError,))
LOG.warning("Unexpected message type: '%s' in message"
" %r", message_type, message.delivery_tag)
else:
if isinstance(handler, (tuple, list)):
handler, validator = handler
try:
validator(data)
except excp.InvalidFormat as e:
message.reject_log_error(
logger=LOG, errors=(kombu_exc.MessageStateError,))
LOG.warn("Message: %r, '%s' was rejected due to it being"
" in an invalid format: %s",
message.delivery_tag, message_type, e)
return
message.ack_log_error(logger=LOG,
errors=(kombu_exc.MessageStateError,))
if message.acknowledged:
LOG.debug("AMQP message %r acknowledged.",
message.delivery_tag)
handler(data, message)
else:
message.reject_log_error(logger=LOG,
errors=(kombu_exc.MessageStateError,))
def on_message(self, data, message):
"""This method is called on incoming messages."""
LOG.debug("Got message: %r", message.delivery_tag)
if self._collect_requeue_votes(data, message):
self._requeue_log_error(message,
errors=(kombu_exc.MessageStateError,))
else:
try:
message_type = message.properties['type']
except KeyError:
message.reject_log_error(
logger=LOG, errors=(kombu_exc.MessageStateError,))
LOG.warning("The 'type' message property is missing"
" in message %r", message.delivery_tag)
else:
self._process_message(data, message, message_type)
|
from planner import *
from othertools import *
import matplotlib.pyplot as plt
def main():
scores_t = readfile('rq2_TimeLIME.csv')
scores_f = readfile('rq2_LIME.csv')
scores_x = readfile('rq2_XTREE.csv')
scores_alve = readfile('rq2_Alves.csv')
scores_shat = readfile('rq2_Shat.csv')
scores_oliv = readfile('rq2_Oliv.csv')
scores_rw = readfile('rq2_Random.csv')
bcs_t = readfile('rq3_TimeLIME.csv')
bcs_f = readfile('rq3_LIME.csv')
bcs_x = readfile('rq3_XTREE.csv')
bcs_alve = readfile('rq3_Alves.csv')
bcs_shat = readfile('rq3_Shat.csv')
bcs_oliv = readfile('rq3_Oliv.csv')
bcs_rw = readfile('rq3_Random.csv')
list1 = [scores_t,scores_f,scores_x,scores_alve,scores_shat,scores_oliv,scores_rw]
list2 = [bcs_t,bcs_f,bcs_x,bcs_alve,bcs_shat,bcs_oliv,bcs_rw]
names = ['TimeLIME','LIME','XTREE','Alves','Shatnawi','Oliveira','Random']
results=[]
for i in range(len(names)):
scores = list1[i]
bcs = list2[i]
dummy = []
N = len(scores)
for i in range(0, len(scores)):
temp = 0
for j in range(0, len(scores[i])):
temp -= (bcs[i][j] * scores[i][j])
total = -np.sum(bcs[i])
dummy.append(np.round(temp / total, 3))
print(names[i],dummy)
results.append(dummy)
return results
if __name__ == "__main__":
main()
|
import requests
from django.utils.translation import ugettext_lazy as _
from payments import PaymentStatus
from payments import get_payment_model
from django.shortcuts import get_object_or_404
from django.http import JsonResponse
from django.conf import settings
import hashlib
def isset(data, columns):
for column in columns:
if data.get(column, None):
return False
return True
def order_load(payment_id):
if int(payment_id) > 1000000000:
return None
payment = get_object_or_404(get_payment_model(), id = int(payment_id))
return payment
def click_secret_key():
PAYMENT_VARIANTS = settings.PAYMENT_VARIANTS
_click = PAYMENT_VARIANTS['click']
secret_key = _click[1]['secret_key']
return secret_key
def click_webhook_errors(request):
click_trans_id = request.POST.get('click_trans_id', None)
service_id = request.POST.get('service_id', None)
click_paydoc_id = request.POST.get('click_paydoc_id', None)
order_id = request.POST.get('merchant_trans_id', None)
amount = request.POST.get('amount', None)
action = request.POST.get('action', None)
error = request.POST.get('error', None)
error_note = request.POST.get('error_note', None)
sign_time = request.POST.get('sign_time', None)
sign_string = request.POST.get('sign_string', None)
merchant_prepare_id = request.POST.get('merchant_prepare_id', None) if action != None and action == '1' else ''
if isset(request.POST, ['click_trans_id', 'service_id', 'click_paydoc_id', 'amount', 'action', 'error', 'error_note', 'sign_time', 'sign_string']) or (
action == '1' and isset(request.POST, ['merchant_prepare_id'])):
return {
'error' : '-8',
'error_note' : _('Error in request from click')
}
signString = '{}{}{}{}{}{}{}{}'.format(
click_trans_id, service_id, click_secret_key(), order_id, merchant_prepare_id, amount, action, sign_time
)
encoder = hashlib.md5(signString.encode('utf-8'))
signString = encoder.hexdigest()
if signString != sign_string:
return {
'error' : '-1',
'error_note' : _('SIGN CHECK FAILED!')
}
if action not in ['0', '1']:
return {
'error' : '-3',
'error_note' : _('Action not found')
}
order = order_load(order_id)
if not order:
return {
'error' : '-5',
'error_note' : _('User does not exist')
}
if abs(float(amount) - float(order.total) > 0.01):
return {
'error' : '-2',
'error_note' : _('Incorrect parameter amount')
}
if order.status == PaymentStatus.CONFIRMED:
return {
'error' : '-4',
'error_note' : _('Already paid')
}
if action == '1':
if order_id != merchant_prepare_id:
return {
'error' : '-6',
'error_note' : _('Transaction not found')
}
if order.status == PaymentStatus.REJECTED or int(error) < 0:
return {
'error' : '-9',
'error_note' : _('Transaction cancelled')
}
return {
'error' : '0',
'error_note' : 'Success'
}
def prepare(request):
order_id = request.POST.get('merchant_trans_id', None)
result = click_webhook_errors(request)
order = order_load(order_id)
if result['error'] == '0':
order.status = PaymentStatus.WAITING
order.save()
result['click_trans_id'] = request.POST.get('click_trans_id', None)
result['merchant_trans_id'] = request.POST.get('merchant_trans_id', None)
result['merchant_prepare_id'] = request.POST.get('merchant_trans_id', None)
result['merchant_confirm_id'] = request.POST.get('merchant_trans_id', None)
return JsonResponse(result)
def complete(request):
order_id = request.POST.get('merchant_trans_id', None)
order = order_load(order_id)
result = click_webhook_errors(request)
if request.POST.get('error', None) != None and int(request.POST.get('error', None)) < 0:
order.status = PaymentStatus.REJECTED
order.save()
if result['error'] == '0':
order.status = PaymentStatus.CONFIRMED
order.save()
result['click_trans_id'] = request.POST.get('click_trans_id', None)
result['merchant_trans_id'] = request.POST.get('merchant_trans_id', None)
result['merchant_prepare_id'] = request.POST.get('merchant_prepare_id', None)
result['merchant_confirm_id'] = request.POST.get('merchant_prepare_id', None)
return JsonResponse(result) |
from datetime import datetime
from dateutil.parser import parse
from django.db.models import Q
from events.models import Events
class EventsRepository:
def __init__(self):
pass
def find_all_cyclic_events_for_given_root(self, root):
return Events.objects.filter(Q(root=root) | Q(pk=root.id)).all()
def find_events_for_given_with_respect_to_filters(self, request, user):
filters = dict(request.request.GET)
query = Events.objects
date_from = filters.get('date_from', None)
date_to = filters.get('date_to', None)
name_contains = filters.get('name_contains', None)
tags = filters.get('tags', None)
past_events = filters.get('past_events', None)
price = filters.get('price', None)
place = filters.get('place', None)
user_signed_up = filters.get('user_signed_up', None)
user_is_assigned_lecturer = filters.get('user_is_assigned_lecturer', None)
only_not_cyclical_and_roots = filters.get('only_not_cyclical_and_roots', None)
if price is not None:
query = query.filter(price__lte=float(price[0]))
if place:
query = query.filter(place__name__icontains=place[0])
if name_contains:
query = query.filter(name__icontains=name_contains[0])
if user_signed_up is not None:
user_signed_up = user_signed_up[0] == 'true'
if user_signed_up:
query = query.filter(participants=user.id)
if user_is_assigned_lecturer is not None:
user_is_assigned_lecturer = user_is_assigned_lecturer[0] == 'true'
if user_is_assigned_lecturer:
query = query.filter(lecturers=user.id)
if date_from or date_to:
if date_from and date_to:
datetime_start = parse(date_from[0])
datetime_end = parse(date_to[0])
query = query.filter(start__gte=datetime_start, end__lte=datetime_end)
elif date_from:
datetime_start = parse(date_from[0])
date_filter = Q(start__date__gte=datetime_start)
time_filter = Q(start__time__gte=datetime_start)
query = query.filter(date_filter & time_filter)
else:
datetime_end = parse(date_to[0])
date_filter = Q(end__date__lte=datetime_end)
time_filter = Q(end__time__lte=datetime_end)
query = query.filter(date_filter & time_filter)
if tags:
query = query.filter(tags__in=tags[0])
if past_events is True:
now = datetime.now()
date_filter = Q(start__date__lte=now)
time_filter = Q(start__time__lte=now)
query = query.filter(date_filter & time_filter)
if only_not_cyclical_and_roots:
query = query.filter(root__end__isnull=True)
return query.all()
def get_event_by_id(self, event_id):
event = Events.objects.get(id=event_id)
return event
def save(self, event_to_save):
event_to_save.save()
return event_to_save
def save_list_of_events(self, events_to_save):
saved_events = []
for event in events_to_save:
saved_events.append(event.save())
return saved_events
def get_events_for_given_place_and_time_brakcet(self, place_id, start_datetime, end_datetime):
place_filter = Q(place_id=place_id)
# Checking if two ranges overlap is done by this formula:
# end1 >= start2 and end2 >= start1 which equals end1 >= start2 and start1 < end2
first_time_filter = Q(end__gte=start_datetime)
second_time_filter = Q(start__lt=end_datetime)
# it is not returned right away for debugging purposes
events = Events.objects.filter(place_filter & first_time_filter & second_time_filter)
return events
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
from flask import Flask
from flask import request
from flask import render_template
import numpy as np
from matplotlib import pyplot as plt
import os
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from string import punctuation
import en_core_web_sm
app = Flask(__name__)
@app.route('/')
def my_form():
return render_template("index.html")
@app.route('/', methods=['GET','POST'])
def my_form_post():
data = request.form['message']
print(data)
stopwords = list(STOP_WORDS)
doc1=data
nlp = en_core_web_sm.load()
docx = nlp(doc1)
mytokens = [token.text for token in docx]
word_freq={}
for word in docx:
if word.text not in stopwords:
if word.text not in word_freq.keys():
word_freq[word.text]=1;
else:
word_freq[word.text]+=1;
max_freq=max(word_freq.values())
for word in word_freq:
word_freq[word]=(word_freq[word]/max_freq)
sentence_list = [ sentence for sentence in docx.sents]
[w.text.lower() for t in sentence_list for w in t ]
sentence_scores={}
for sentence in sentence_list:
for word in sentence:
if word.text in word_freq.keys():
if len(sentence.text.split(' ')) < 30:
if sentence not in sentence_scores.keys():
sentence_scores[sentence] = word_freq[word.text]
else:
sentence_scores[sentence] += word_freq[word.text]
from heapq import nlargest
sum_sentences = nlargest(8, sentence_scores, key=sentence_scores.get)
final_sentences = [ w.text for w in sum_sentences ]
summary = ' '.join(final_sentences)
print(len(doc1))
print(len(summary))
return render_template('output.html', message = summary)
if __name__ == '__main__':
app.run()
# In[ ]:
|
#
# coding:utf-8
#
'''
'''
__author__ = 'JyHu'
from enum import Enum
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print('%s => %s , %s' % (name, member, member.value)) |
# Vendor of company mno
class GeomStats:
def print_stats(obj):
print('Area : {0}'.format(obj.area()))
print('Perimeter: {0}'.format(obj.perimeter())) |
import falcon
class AutoresResource:
def on_get(self, req, resp):
"""Handles GET requests"""
autores = {
'autores': (
"Creado Por- Oscar Rubio Garcia"
)
}
resp.media = autores
api = falcon.API()
api.add_route('/', AutoresResource()) |
'''
Created on Dec 25, 2011
@author: ppa
'''
from ultrafinance.model import Type, Action, Order
from ultrafinance.backTest.tickSubscriber.strategies.baseStrategy import BaseStrategy
from ultrafinance.backTest.constant import CONF_STRATEGY_PERIOD, CONF_INIT_CASH
import logging
LOG = logging.getLogger()
class PeriodStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, configDict):
''' constructor '''
super(PeriodStrategy, self).__init__("periodStrategy")
self.configDict = configDict
assert int(configDict[CONF_STRATEGY_PERIOD]) >= 1
self.perAmount = max(1, round(int(configDict[CONF_INIT_CASH]) / 100)) #buy 1/100 per time
self.period = int(configDict[CONF_STRATEGY_PERIOD])
self.symbols = None
self.counter = 0
def increaseAndCheckCounter(self):
''' increase counter by one and check whether a period is end '''
self.counter += 1
self.counter %= self.period
if not self.counter:
return True
else:
return False
def tickUpdate(self, tickDict):
''' consume ticks '''
assert self.symbols
assert self.symbols[0] in tickDict.keys()
symbol = self.symbols[0]
tick = tickDict[symbol]
if self.increaseAndCheckCounter():
self.placeOrder(Order(accountId = self.accountId,
action = Action.BUY,
type = Type.MARKET,
symbol = symbol,
price = tick.close,
share = self.perAmount / float(tick.close)))
|
DYNAMIC_RESOLUTION = 8 # ppp, pp, p, mp, mf, f, ff, fff
MEASURE_RESOLUTION = 5 # show every fifth measure number
GAP = 2
STROKE_WIDTH = 16
TEXT_HEIGHT = STROKE_WIDTH
Y_INDENT = STROKE_WIDTH / 2
SINGLE_DIGIT = '0.1875em' # 3/16 em
DOUBLE_DIGIT = '0.3750em' # 6/16 em
TRIPLE_DIGIT = '0.5625em' # 9/16 em
|
class Solution:
def FirstNotRepeatingChar(self, s):
# write code here
d={}
for i in s:
if i in d:
d[i]=d[i]+1
else:
d[i]=1
for j in range(len(s)-1):
if d[s[j]]==1:
return j
if __name__ == '__main__':
s=Solution()
a=s.FirstNotRepeatingChar("google")
print(a) |
import math
sc = input()
s = sc.split()
#num = [int(x) for x in s]
num = list(map(int,s))
ave = sum(num)/len(s)
avf = sum((float(i)-ave)**2 for i in s)/len(s)
avb = math.sqrt(avf)
s.sort()
if len(s)%2!=0:
mid = s[int(len(s)/2)]
else:
mid = (s[int(len(s)/2)+s(len(s)/2-1)])/2
s.sort()
print("平均值{} 方差{} 标准差{} 中位数{}".format(ave,avf,avb,mid)) |
#!/usr/bin/env python
#python2
import nmap
import time
import datetime
import sys
nm = nmap.PortScanner()
print('----------------------------------------------------')
print(' ')
time.sleep(1)
#how_long = int(raw_input('How many minutes do you want to scan for? '))
#repeat = how_long*2 # because 2 sweeps per minute
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('Scan started at %s ') % time_now
def sweep():
sys.stdout=open("/home/pi/code/logs/nmap_all.txt", "a")
nm.scan(hosts='10.0.0.16', arguments='-sn')
hosts_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]
for host, status in hosts_list:
print('{0}:{1} '.format(host, status)) + '@ ' + ( datetime.datetime.now().strftime("%H:%M:%S"))
print('----------------------------------------------------')
sys.stdout.close()
for i in range(144):
sweep()
time.sleep(300)
|
import pickle
# Define the Bird class
from PickleIO.Bird import Bird
def main():
birds = []
# Create a list of Bird objects
birds.append(Bird("Huey", 13, "Duck"))
birds.append(Bird("Dewey", 10, "Duck"))
birds.append(Bird("Louie", 12, "Duck"))
birds.append(Bird("Jerry", 15, "Goose"))
birds.append(Bird("Quacky", 7, "Goose"))
print("Writing these objects:")
print(birds)
for each in birds:
print(each)
# Open the output file in binary/write mode
# and close it automatically when done
with open('birds.pickled', 'wb') as out_file:
# Pickle the list and write it to the output file
pickle.dump(birds, out_file)
main()
|
#!/usr/bin/env python
#This script calculates the actual retention index
#for each character (codon)
#The summary file is for the mean retention index of
#all the codons
#Input:
#1 - input file of codon score table
#2 - output file
#3 - output summary file
import sys
import numpy as np
infile = open(sys.argv[1],"r")
outfile = open(sys.argv[2], "w")
summaryFile = open(sys.argv[3],"a")
header = infile.readline()
RIs = [] #List of retention indices
for line in infile:
gene,origin,loss,root_loss,n_small,n_total,percent_total,p = line.split("\t")
if float(n_small) == 1:
continue
g = float(n_small)
s = float(origin) + float(loss) + float(root_loss)
m = 1
retention_index = float(g - s) / (g - m)
RIs.append(retention_index)
outfile.write(gene + "\t" + str(retention_index) + "\n")
RIs = np.array(RIs)
mean = np.mean(RIs)
sd = np.std(RIs)
print(mean)
print(sd)
summaryFile.write(sys.argv[1] + "," + str(round(mean,5)) + "," + str(round(sd,5)) + "\n")
infile.close()
outfile.close()
summaryFile.close()
|
from numbers import Number
from typing import Mapping
import numpy as np
import regex
_FLOAT_REGEX = r"[\-+]?\d+(?:.[\d]+)?"
_LINE_REGEX = regex.compile(
rf"^\s*([^\s]+)(?:\s+({_FLOAT_REGEX})|(?:\s+#(\d+(?:,\d+)*)[:=\s]+({_FLOAT_REGEX}))+)\s*$")
def format_loss_layers(loss_layers):
def _weight_to_string(weight):
if isinstance(weight, Number):
return str(weight)
else:
return " ".join("#" + ",".join(map(str, n)) + f":{w}" for n, w in weight.items())
return "\n".join(layer_name + " " + _weight_to_string(weight) for layer_name, weight in loss_layers.items())
def parse_loss_layers(string):
def _parse_num(num_str):
try:
return int(num_str)
except ValueError:
return float(num_str)
loss_layers = {}
errors = []
for line_no, line in enumerate(string.splitlines()):
match = _LINE_REGEX.match(line)
if match is None:
errors.append(f"Line {line_no + 1}: Invalid syntax")
continue
layer_name = match[1]
if match[2] is not None:
weight = _parse_num(match[2])
loss_layers[layer_name] = weight
else:
neuron_weights = {tuple(int(dim) for dim in n.split(",")): _parse_num(w)
for n, w in zip(match.captures(3), match.captures(4))}
loss_layers[layer_name] = neuron_weights
return loss_layers, errors
def validate_loss_layers(loss_layers, model):
errors = []
layer_dict = {layer.name: layer for layer in model.keras_model.layers}
for line_no, (layer_name, user_weights) in enumerate(loss_layers.items()):
if layer_name not in layer_dict.keys():
errors.append(f"Line {line_no + 1}: Unknown layer {layer_name}")
elif isinstance(user_weights, Mapping):
layer_shape = np.array([comp for comp in layer_dict[layer_name].output_shape if comp is not None])
for neuron in user_weights.keys():
if len(layer_shape) != len(neuron):
errors.append(f"Line {line_no + 1}: Layer {layer_name} has dimension {len(layer_shape)}, "
f"you provided for {len(neuron)}")
elif any(neuron >= layer_shape):
errors.append(f"Line {line_no + 1}: Maximum neuron is " +
",".join(map(str, layer_shape - 1)) + ", you provided " +
",".join(map(str, neuron)))
return errors
|
N = int(input())
ans = 0
l, m = [], []
for _ in range(N):
s, t = input().split()
t = int(t)
l.append(s)
m.append(t)
X = input()
idx = l.index(X)
for i in range(idx+1, N):
ans += m[i]
print(ans)
|
class Obstacle:
taille=100
def __init__(self, x, y):
self.x=x
self.y=y
def toString(self):
return "["+str(self.x)+" ; "+str(self.y)+"]"
|
# day 22
import copy
# return [min x, max x, min y, max y, min z, max z]
def process(s):
inst = [s[0], int(s[2]), int(s[3]), int(s[5]), int(s[6]), int(s[8]), int(s[9])]
return inst
def turn_on_off_cubes2(s):
global cubes, num_lit_cubes
for x in range(s[1], s[2] + 1):
if x > 50 or x < -50:
continue
for y in range(s[3], s[4] + 1):
if y > 50 or y < -50:
continue
for z in range(s[5], s[6] + 1):
if z > 50 or z < -50:
continue
if s[0] == 'on': # turn on
if ((x,y,z) not in cubes) or cubes[(x,y,z)] == 0:
num_lit_cubes += 1
cubes[(x,y,z)] = 1
else: # turn off
if (x,y,z) in cubes and cubes[(x,y,z)] == 1:
num_lit_cubes -= 1
cubes[(x,y,z)] = 0
def turn_on_off_cubes(s):
global cubes, num_lit_cubes
for x in range(s[1], s[2] + 1):
if x > 50 or x < -50:
continue
for y in range(s[3], s[4] + 1):
if y > 50 or y < -50:
continue
for z in range(s[5], s[6] + 1):
if z > 50 or z < -50:
continue
if (x,y,z) in cubes:
continue
if s[0] == 'on': # turn on
num_lit_cubes += 1
cubes[(x,y,z)] = 1
else: # turn off
cubes[(x,y,z)] = 0
# global variables
reboot_steps = []
num_lit_cubes = 0
# read input data
f = open('day22.txt','r')
for x in f:
x = x.strip()
x = x.replace('..', ' ')
x = x.replace('=', ' ')
x = x.replace(',', ' ')
x = x.split()
reboot_steps.append(process(x))
# part 1
"""
cubes = dict()
for step in range(len(reboot_steps) - 1, -1, -1):
turn_on_off_cubes(reboot_steps[step])
print(num_lit_cubes)
"""
# part 2
class region:
def __init__(self, value, xmin, xmax, ymin, ymax, zmin, zmax):
self.value = value
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.zmin = zmin
self.zmax = zmax
def turn_on_off_regions(s):
global regions, num_lit_cubes
# find region/s of new coordinates
new_regions = get_new_regions(s)
if new_regions != None:
# add new regions to global regions dict
for nr in new_regions:
regions.add(nr)
if nr.value == 'on':
lit = (nr.xmax - nr.xmin + 1) * (nr.ymax - nr.ymin + 1) * (nr.zmax - nr.zmin + 1)
num_lit_cubes += lit
def get_new_regions(s):
global regions, num_lit_cubes
new_regions = set()
reg = region(s[0], s[1], s[2], s[3], s[4], s[5], s[6])
new_regions.add(reg)
# update set of new_regions based on each region already claimed
for r in regions:
new_regions = trim_regions(new_regions, r)
#print(len(new_regions))
return new_regions
# nr_set = set of potential new regions
# r = region already claimed (needs to be removed from each item in nr_set)
def trim_regions(nr_set, r):
new_regions = set()
for nr in nr_set:
#check x coordinates
tempx = trim_along_x_axis(nr, r) #returns [set of regions not overlapping, overlapping region]
for item in tempx[0]:
new_regions.add(item)
if tempx[1] != None: # some overlap exists
tempy = trim_along_y_axis(tempx[1], r) #returns [set of regions not overlapping, overlapping region]
for item in tempy[0]:
new_regions.add(item)
if tempy[1] != None: #some overlap exists
tempz = trim_along_z_axis(tempy[1], r) #returns [set of regions not overlapping, overlapping region]
for item in tempz[0]:
new_regions.add(item)
#item tempz[1] overlaps the original 'r' region and should not be counted
return new_regions
# returns [set of regions not overlapping, overlapping region]
def trim_along_x_axis(nr, r):
no_overlap = set()
#nr completely enclosed by r
if (nr.xmax <= r.xmax) and (nr.xmin >= r.xmin):
return [no_overlap, nr]
# no overlap at all (nr completely to right/left of r)
elif (nr.xmax < r.xmin) or (nr.xmin > r.xmax):
no_overlap.add(nr)
return [no_overlap, None]
# partial overlap to the left of r
elif (nr.xmin < r.xmin) and (nr.xmax <= r.xmax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.xmax = r.xmin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.xmin = r.xmin
return [no_overlap, nr2]
# partial overlap to the right of r
elif (nr.xmin >= r.xmin) and (nr.xmax > r.xmax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.xmin = r.xmax + 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.xmax = r.xmax
return [no_overlap, nr2]
#nr completely engulfs r
elif (nr.xmin < r.xmin) and (nr.xmax > r.xmax):
nr1 = copy.deepcopy(nr) #left non-overlapping portion
nr1.xmax = r.xmin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #right non-overlapping portion
nr2.xmin = r.xmax + 1
no_overlap.add(nr2)
nr3 = copy.deepcopy(nr) #overlaping portion
nr3.xmin = r.xmin
nr3.xmax = r.xmax
return [no_overlap, nr3]
# returns [set of regions not overlapping, overlapping region]
def trim_along_y_axis(nr, r):
no_overlap = set()
#nr completely enclosed by r
if (nr.ymax <= r.ymax) and (nr.ymin >= r.ymin):
return [no_overlap, nr]
# no overlap at all (nr completely to right/left of r)
elif (nr.ymax < r.ymin) or (nr.ymin > r.ymax):
no_overlap.add(nr)
return [no_overlap, None]
# partial overlap to the left of r
elif (nr.ymin < r.ymin) and (nr.ymax <= r.ymax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.ymax = r.ymin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.ymin = r.ymin
return [no_overlap, nr2]
# partial overlap to the right of r
elif (nr.ymin >= r.ymin) and (nr.ymax > r.ymax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.ymin = r.ymax + 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.ymax = r.ymax
return [no_overlap, nr2]
#nr completely engulfs r
elif (nr.ymin < r.ymin) and (nr.ymax > r.ymax):
nr1 = copy.deepcopy(nr) #left non-overlapping portion
nr1.ymax = r.ymin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #right non-overlapping portion
nr2.ymin = r.ymax + 1
no_overlap.add(nr2)
nr3 = copy.deepcopy(nr) #overlaping portion
nr3.ymin = r.ymin
nr3.ymax = r.ymax
return [no_overlap, nr3]
# returns [set of regions not overlapping, overlapping region]
def trim_along_z_axis(nr, r):
no_overlap = set()
#nr completely enclosed by r
if (nr.zmax <= r.zmax) and (nr.zmin >= r.zmin):
return [no_overlap, nr]
# no overlap at all (nr completely to right/left of r)
elif (nr.zmax < r.zmin) or (nr.zmin > r.zmax):
no_overlap.add(nr)
return [no_overlap, None]
# partial overlap to the left of r
elif (nr.zmin < r.zmin) and (nr.zmax <= r.zmax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.zmax = r.zmin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.zmin = r.zmin
return [no_overlap, nr2]
# partial overlap to the right of r
elif (nr.zmin >= r.zmin) and (nr.zmax > r.zmax):
nr1 = copy.deepcopy(nr) #non-overlapping portion
nr1.zmin = r.zmax + 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #overlaping portion
nr2.zmax = r.zmax
return [no_overlap, nr2]
#nr completely engulfs r
elif (nr.zmin < r.zmin) and (nr.zmax > r.zmax):
nr1 = copy.deepcopy(nr) #left non-overlapping portion
nr1.zmax = r.zmin - 1
no_overlap.add(nr1)
nr2 = copy.deepcopy(nr) #right non-overlapping portion
nr2.zmin = r.zmax + 1
no_overlap.add(nr2)
nr3 = copy.deepcopy(nr) #overlaping portion
nr3.zmin = r.zmin
nr3.zmax = r.zmax
return [no_overlap, nr3]
regions = set()
for step in range(len(reboot_steps) - 1, -1, -1):
turn_on_off_regions(reboot_steps[step])
print(step)
print(num_lit_cubes)
|
import json
import traceback
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from discussion.models import Discussion
from user.models import User
@csrf_exempt
def create(request):
res = {'code': 0, 'msg': 'success', 'data': {}}
if not {'user_id','content'}.issubset(request.POST.keys()):
return HttpResponse(json.dumps({'code': -1, 'msg': 'error-1|unexpected params!', 'data': []}))
try:
params = request.POST.dict()
dis=Discussion.objects.create(**params)
res['data']['discussion_id']=dis.id
except:
traceback.print_exc()
return HttpResponse(json.dumps({'code': -2, 'msg': 'error-2', 'data': []}))
return HttpResponse(json.dumps(res))
@csrf_exempt
def update(request):
res = {'code': 0, 'msg': 'success', 'data': []}
if not {'discussion_id','update'}.issubset(request.POST.keys()):
return HttpResponse(json.dumps({'code': -1, 'msg': 'error-1|unexpected params!', 'data': []}))
try:
Discussion.objects.filter(id=request.POST['discussion_id']).update(**json.loads(request.POST['update']))
except:
traceback.print_exc()
return HttpResponse(json.dumps({'code': -2, 'msg': 'error-2', 'data': []}))
return HttpResponse(json.dumps(res))
@csrf_exempt
def delete(request):
res = {'code': 0, 'msg': 'success', 'data': []}
if not {'discussion_id'}.issubset(request.POST.keys()):
return HttpResponse(json.dumps({'code': -1, 'msg': 'error-1|unexpected params!', 'data': []}))
try:
Discussion.objects.filter(id=request.POST['discussion_id']).update(status=0)
except:
traceback.print_exc()
return HttpResponse(json.dumps({'code': -2, 'msg': 'error-2', 'data': []}))
return HttpResponse(json.dumps(res))
@csrf_exempt
def list(request):
res = {'code': 0, 'msg': 'success', 'data': {}}
try:
params = request.POST.dict()
page=0
size=20
if 'page' in params:
page=params['page']
params.pop('page')
if 'size' in params:
size=params['size']
params.pop('size')
params['status']=1
res['data']['count']=Discussion.objects.filter(**params).count()
res['data']['discussion']=[]
qset=Discussion.objects.filter(**params).order_by('-ctime')[page*size:(page+1)*size]
dynamics=json.loads(serializers.serialize("json", qset))
print(dynamics)
for dynamic in dynamics:
data_row=dynamic['fields']
data_row['dynamic_id']=dynamic['pk']
data_row['images']=json.loads(data_row['images'])
data_row['user_info'] = json.loads(serializers.serialize("json", User.objects.filter(id=dynamic['fields']['user_id'])))[0]['fields']
data_row['user_info'].pop('password')
# data_row['user_info'].pop('status')
# data_row['user_info'].pop('ctime')
data_row['user_info'].pop('mtime')
res['data']['discussion'].append(data_row)
except:
traceback.print_exc()
return HttpResponse(json.dumps({'code': -2, 'msg': 'error-2', 'data': []}))
return HttpResponse(json.dumps(res)) |
'''
Data processor for the CMOS count packets.
CMOS counts are processed into rates. The input packet contains up to 8 slots.
The counts are stored in the class (static) array. This runs in multiple greenlets,
so clashes are likely. Practically it doesn't matter, we get the correct rates.
input:
"type": "cmos_counts",
"timestamp": "yyyy-MM-ddTHH:mm:ss.SSSZ" //ORCA
"crate_num": 0, //ORCA
"slot_mask": 0xffff,
"channel_mask": [ 0xffffffff, ..., 0xffffffff ], //all 16 slots
"error_flags": 0x0000,
"counts": [ [ 0*0, ..., 0*31], ..., [ 7*0, ..., 7*31 ] ] //8 slots * 32 channels
output:
'key': 'cmos_rate',
'crate_num': cr,
'slot_num': slot,
'ts': ts,
'vl': {
'channel_mask': 0xffffffff,
'rate': [0,...]
}
'''
import gevent
import datetime
class CmosCountException(Exception):
def __init__(self, reason):
self.reason = reason
class CmosCount(gevent.Greenlet):
dat = [[[{'cnt': 0, 'ts': 0} for ch in range(32)] for sl in range(16)] for cr in range(19)]
max_time = 10 # do not calculate rates from counts if the time between counts is more than
def __init__(self, o, rqueue):
self.o = o
self.rqueue = rqueue
self.d = []
gevent.Greenlet.__init__(self)
def validate(self):
if 'timestamp' not in self.o:
raise CmosCountException('timestamp error')
if 'crate_num' not in self.o or not 0 <= self.o['crate_num'] <= 19:
raise CmosCountException('crate_num error')
if 'slot_mask' not in self.o or self.o['slot_mask'] > 0xffff:
raise CmosCountException('slot mask error')
if 'channel_mask' not in self.o or type(self.o['channel_mask']) is not list or len(self.o['channel_mask']) != 16:
raise CmosCountException('channel mask error')
if 'error_flags' not in self.o:
raise CmosCountException('error_flags missing')
if 'count' not in self.o:
raise CmosCountException('adc missing')
def process(self):
ts = float(datetime.datetime.strptime(self.o['timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ').strftime('%s.%f'))
slots = self.o['slot_mask']
sl_cnt = 0
cr = self.o['crate_num']
for slot in range(16):
if self.o['slot_mask'] >> slot & 0x1:
if not self.o['error_flags'] >> slot & 0x1:
ch_mask = self.o['channel_mask'][slot]
sl_channel_mask = 0
sl_rates = []
for ch in range(32):
#bit 31 in cmos counts means busy
rate = 0
if ch_mask >> ch & 0x1 and not self.o['count'][sl_cnt][ch] >> 31 & 0x1:
if 0 < ts - CmosCount.dat[cr][slot][ch]['ts'] < CmosCount.max_time:
rate = self.o['count'][sl_cnt][ch] & 0x7fffffff - CmosCount.dat[cr][slot][ch]['cnt']
if rate < 0: rate += 0x80000000
rate /= ts - CmosCount.dat[cr][slot][ch]['ts']
sl_channel_mask |= 1 << ch
CmosCount.dat[cr][slot][ch]['cnt'] = self.o['count'][sl_cnt][ch] & 0x7fffffff
CmosCount.dat[cr][slot][ch]['ts'] = ts
sl_rates.append(rate)
if sl_channel_mask:
self.d.append({
'key': 'cmos_rate',
'crate_num': cr,
'slot_num': slot,
'ts': ts,
'v': {
'channel_mask': sl_channel_mask,
'rate': list(sl_rates)
}
})
#increment even if the error flag is set
sl_cnt += 1
def _run(self):
try:
self.validate()
except CmosCountException as e:
print 'CmosCount packet invalid: ' + e.reason
return
self.process()
self.rqueue(self.d)
|
# If the numbers 1 to 5 are written out in words: one, two, three, four, five,
# then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
# If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words,
# how many letters would be used?
#
#import pdb #(debugger)
D = [0,3,3,5,4,4,3,5,5,4,3,6,6,8,8,7,7,9,8,8]
T = [0,3,6,6,5,5,5,7,6,6]
H = 7
Th = 8
sumOfLetter = 0
for i in range(1, 1000) :
#Access the lists based on i
#singles digit
digit = i % 10
#tens digit
tens = ((i % 100) - digit) / 10
#hundreds digit
hundreds = ((i % 1000) - (10 * int(tens)) - digit) / 100
#pdb.set_trace() #(debugger callout)
if hundreds != 0:
sumOfLetter += D[int(hundreds)] + H #D[hundreds] hundred
if tens != 0 or digit != 0 : sumOfLetter += 3 #add "and"
if tens == 0 or tens == 1 :
sumOfLetter += D[int((10 * tens) + digit)]
else : sumOfLetter += T[int(tens)] + D[int(digit)]
sumOfLetter += D[1] + Th
print(sumOfLetter)
|
import cloudpickle
import torch
from torchvision.utils import save_image
import numpy as np
import cv2
from GAN import Generator
#取り出すepochを指定する
point = 100
#モデルの構造を定義
z_dim = 30
num_class = 49
G = Generator(z_dim = z_dim, num_class = num_class)
#checkpointを取り出す
checkpoint = torch.load('./checkpoint_cGAN/G_model_{}'.format(point), map_location=torch.device('cpu'))
#Generatorにパラメータを入れる
G.load_state_dict(checkpoint['model_state_dict'])
#検証モードにしておく
G.eval()
#pickleで保存
with open ('KMNIST_cGAN.pkl','wb')as f:
cloudpickle.dump(G,f)
letter = 'あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわゐゑをんゝ'
strs = input()
with open('KMNIST_cGAN.pkl','rb')as f:
Generator = cloudpickle.load(f)
for i in range(len(str(strs))):
noise = torch.normal(mean = 0.5, std = 0.2, size = (1, 30))
str_index = letter.index(strs[i])
tmp = np.identity(49)[str_index]
tmp = np.array(tmp, dtype = np.float32)
label = [tmp]
img = Generator(noise, torch.Tensor(label))
img = img.reshape((28,28))
img = img.detach().numpy().tolist()
if i == 0:
comp_img = img
else:
comp_img.extend(img)
save_image(torch.tensor(comp_img), './sentence.png', nrow=len(str(strs))) |
from django.contrib import admin
from django import forms
from django.db.models import Sum
from django.forms import ModelForm
from django.utils import timezone
#from django.contrib.contenttypes.admin import GenericTabularInline
#from tabbed_admin import TabbedModelAdmin
# Register your models here.
from .models import (
Empresa,
Persona,
Escala,
Cargo,
Departamento,
Rac,
Formulacion,
Empleado,
Variable,
ElementoPago2,
PagoEmpleado2,
Prenomina,
Educacion,
Familia,
)
@admin.register(Empresa)
class EmpresaAdmin(admin.ModelAdmin):
list_display = ('cod_empresa', 'nombre','usuario', 'fecha_creacion', 'fecha_update')
search_fields = ('cod_empresa', 'nombre')
@admin.register(Escala)
class EscalaAdmin(admin.ModelAdmin):
list_display = ('cod_escala', 'escala', 'grado', 'paso', 'sueldo', 'cod_empresa')
ordering = ('escala',)
list_editable = ('sueldo',)
@admin.register(Cargo)
class CargoAdmin(admin.ModelAdmin):
list_display = ('cod_cargo', 'cod_escala', 'des_cargo', 'cod_empresa')
ordering = ('cod_escala',)
list_editable = ('des_cargo','cod_escala')
@admin.register(Departamento)
class DepartamentoAdmin(admin.ModelAdmin):
list_display = ('cod_departamento', 'descripcion', 'cod_empresa')
ordering = ('descripcion',)
list_editable = ('descripcion',)
@admin.register(Rac)
class RacAdmin(admin.ModelAdmin):
list_display = ('cod_rac','cod_departamento','cod_empresa')
@admin.register(Formulacion)
class FormulacionAdmin(admin.ModelAdmin):
list_display = ('cod_formula','formula','cod_empresa',)
@admin.register(Variable)
class VariableAdmin(admin.ModelAdmin):
list_display = ('cod_variable','descripcion','monto')
class PagoEmpleado2Inline(admin.TabularInline):
model = PagoEmpleado2
extra = 1
class EmpleadoAdmin(admin.ModelAdmin):
list_display = ('pk','nombre','apellido','cedula',)
inlines = (PagoEmpleado2Inline,)
class ElementoPago2Admin(admin.ModelAdmin):
inlines = (PagoEmpleado2Inline,)
admin.site.register(Empleado, EmpleadoAdmin)
admin.site.register(ElementoPago2, ElementoPago2Admin)
admin.site.register(PagoEmpleado2)
class FamiliaInline(admin.StackedInline):
model = Familia
extra = 1
fieldsets = (
(None, {
'fields': ('cedula',('nombre_1','nombre_2'),('apellido_1','apellido_2'),'fecha_nacimiento')
}),
)
#(('cedula', 'apellido_1','nombre_1'),('apellido_2','nombre_2'),('fecha_nacimiento',),)
class EducacionInline(admin.StackedInline):
model = Educacion
extra = 1
fieldsets = (
(None, {
'fields': (('titulo', 'finalizado'),('inicio',),('fin',),'aptitudes')
}),
)
@admin.register(Persona)
class PersonaAdmin(admin.ModelAdmin):
list_display = ['cod_solicitud', 'cod_empresa', 'nombre_1', 'cedula', 'apellido_1', 'status','cargo_opt',]
ordering = ['status','cod_solicitud',]
list_filter = ['status',]
search_fields = ['nombre_1', 'cedula','cod_solicitud','cargo_opt']
inlines = [EducacionInline,FamiliaInline]
radio_fields = {'status': admin.HORIZONTAL}
fieldsets = (
(None, {
'classes': ('extrapretty','wide'),
'fields': ('status',)
}), ('Datos Personales', {
'classes': ('extrapretty','wide'),
'fields': ('cedula',('nombre_1','nombre_2'),('apellido_1','apellido_2'),'fecha_nacimiento',('edad','genero'))
}), ('Contacto',{
'classes': ('extrapretty','wide'),
'fields': (('email','telf'),'ocupacion','cargo_opt')
})
)
#'classes': ('collapse',),
# class PersonaAdmin(TabbedModelAdmin):
# model = Persona
# tab_overview = (
# (None, {
# 'fields': ('cedula', 'status', ('nombre_1','apellido_1'),('nombre_2','apellido_2'),'cod_empresa',
# 'fecha_nacimiento','edad','ocupacion','cargo_opt','email','telf','genero')
# } ),
# )
# tab_familia = (
# FamiliaInline,
# )
# tab_educacion = (
# EducacionInline,
# )
# tabs = [
# ('Persona', tab_overview),
# ('Familia', tab_familia),
# ('Educacion', tab_educacion),
# ]
# admin.site.register(Persona, PersonaAdmin)
@admin.register(Prenomina)
class PrenominaAdmin(admin.ModelAdmin):
filter_horizontal=('pagos_empleados',)
#extra = 1
list_display = ('pk','tipo',)
# date_hierarchy para prenomina o para pagos?
#prepopulate
# class NominaSummaryAdmin(admin.ModelAdmin):
# def changelist_view(self, request, extra_context:None):
# date_hierarchy = 'fecha_inicio'
# response = super().changelist_view(request, extra_context)
# try:
# qs = response.context_data['cl'].queryset
# except (AttributeError, KeyError):
# return response
# metrics = {
# 'total': Sum('pagos_empleados__monto'),
# }
# response.context_data['summary'] = list(
# qs.values('pagos_empleados__monto').annotate(**metrics).order_by('-pagos_empleados')
# )
# response.context_data['summary_total'] = dict (
# qs.aggregate(**metrics)
# )
# return response
# admin.site.register(NominaSummary)
# #
# @admin.register(Prenomina)
# class PrenominaAdmin(admin.ModelAdmin):
# list_display = ('pk','descripcion')
# search_fields = ['descripcion']
# filter_horizontal = ('pagos_empleados',)
# @admin.register(Empleado)
# class EmpleadoAdmin(admin.ModelAdmin):
# list_display = ('cod_empleado','cod_rac')
# @admin.register(PagoEmpleado)
# class PagoEmpleadoAdmin(admin.ModelAdmin):
# search_fields = ['elementopago']
# filter_horizontal = ('empleado',)
#admin.site.register(PagoEmpleado, PagoEmpleadoAdmin)
#@admin.register(ElementoPago)
#class ElementoPagoAdmin(admin.ModelAdmin):
# list_display = ('cod_elemento_pago','descripcion','cod_formula')
# search_fields = ['descripcion']
#filter_horizontal = ('empleado',)
|
# -*- coding: utf-8 -*-
import os
#from plugin_color_widget import color_widget
from plugin_multiselect_widget import hmultiselect_widget
db.define_table('webadmin',
Field('webname',notnull=True,requires=IS_NOT_EMPTY(),label="网站名称",comment='必填'), # 网站名称
Field('weburl',label="网站地址"), #网站地址
Field('logo','upload',label="网站LOGO"), #网站LOGO
Field('pic','upload',label="网站大图片"), #网站大图片
Field('author',label="作者"), #作者
Field('seotitle',label="seo标签"), #seo标签
Field('keyword',label="关键字"), #关键字
Field('description','text',label="描述"), #描述
Field('copyright','text',label="版权信息"), #版权信息
Field('hotline',label="客服热线"), #客服热线
Field('icp',label="icp备案号"), #icp备案号
format='%(webname)s')
db.define_table('categories',
Field('name',unique=True,notnull=True,requires=(IS_NOT_EMPTY(),IS_NOT_IN_DB(db,'categories.name')),label="标签名",comment='必填'), #标签名
Field('description','string',label='描述'), #标签描述
Field('sort_num','integer',default=1,label='排序'), #标签排序
auth.signature,
format='%(name)s')
db.define_table('news',
Field('title',unique=True,requires=(IS_NOT_EMPTY(),IS_NOT_IN_DB(db,'news.title')),label='标题',comment='必填'), #新闻标题
Field('contents','text',label='新闻内容'),
Field('views','integer',default=+1,writable=False,readable=False), #浏览量
auth.signature,
format='%(title)s')
db.define_table('colors',
Field('name',requires=(IS_NOT_EMPTY(),IS_NOT_IN_DB(db,'colors.name')),label='颜色名称',comment='必填'), #颜色名称
Field('description','string',label='颜色描述'), #颜色描述
Field('engname',requires=(IS_NOT_EMPTY()),label='颜色英文名',comment='必填'),
#Field('image',widget=color_widget,label='颜色选择'), #颜色图片
auth.signature,
format='%(name)s')
#db.colors.image.widget = color_widget
db.define_table('images', #图片管理表
Field('categories','reference categories',requires=(IS_IN_DB(db,'categories.id','%(name)s')),label='栏目',comment='必填'), #分类
#Field('colors','list:integer',label='颜色',requires=(IS_NOT_EMPTY(),IS_IN_SET([(color.id,color.name) for color in db(db.colors).select()], multiple=True)),comment='必填'), #颜色
Field('colors','reference colors',requires=(IS_NOT_EMPTY(),IS_IN_DB(db,'colors.id','%(name)s')),label='颜色',comment='必填, 单选'), #颜色
Field('title','string',requires=(IS_NOT_EMPTY(),IS_NOT_IN_DB(db, 'images.title')),label='标题',comment='必填'), #标题
Field('picattribute',label='属性'), #属性
Field('picfrom',label='图片来源'), #图片来源
Field('picauthor',label='作者'), #作者
Field('linkurl',label='跳转链接'), #跳转链接
Field('pickeywords',label='关键词'), #关键词
Field('picdescription','string',label='摘要',comment='显示在列表左上角'), #摘要
Field('piccontent','text',label='详细内容'), #详细内容
Field('picurl','upload',uploadseparate=False,autodelete=True,requires=[IS_NOT_EMPTY(),IS_IMAGE()],label='图片',comment='必填'), #图片
#Field('picresize','upload',readable=False,writable=False,autodelete=True), #改变图片体积后保存
#Field('picsmallsize',readable=False,writable=False,autodelete=True), #截图用于前台展示
Field('picresolution',readable=False,writable=False,label='图片大小',comment='图片大小 4896x3264 PX (X-LARGE)'), #图片大小 4896x3264 PX (X-LARGE)
Field('picsize',readable=False,writable=False,label='图片尺寸',comment='图片尺寸 4 MB'), #图片尺寸 4 MB
Field('camera',label='相机型号',comment='相机型号 如:FUJIFILM X-E2'), #相机型号 FUJIFILM , X-E2
Field('checkinfo','boolean',default=True,label='审核状态'), #审核状态
Field('hits','integer',default=+1), #点击次数
Field('zan','integer',default=+1), #赞
auth.signature,
format='%(title)s')
db.images.colors.widget = hmultiselect_widget
|
import sys
from cx_Freeze import setup, Executable
setup(
name = "Soundset Manager",
version = "1",
executables = [Executable("manager.py")]
)
|
from bs4 import BeautifulSoup
import requests
class Parse:
def __init__(self, url):
self.soup = BeautifulSoup(requests.get(url).content, 'html5lib')
def generate_webpage(self):
list_of_output = self.soup.find_all(["h1", "p", "noscript"])
title = ""
html_script = ""
start_tag, end_tag = self.__template()
html_script += start_tag
for output in list_of_output:
if output.name == "h1" or output.name == "p":
if output.name == "h1" and title == "":
title = str(output.text).strip()
html_script += output.prettify()
elif output.name == "noscript":
html_script += output.find("img").prettify()
html_script += end_tag
return title, html_script
def generate_webpage2(self):
return "abc",self.soup.prettify()
def __template(self):
start_template = """<!DOCTYPE html>
<html>
<head>
<title>Free Medium</title>
<style>
h1 {
margin-right: 150px;
margin-left: 80px;
}
p {
margin-right: 150px;
margin-left: 80px;
}
img {
display: block;
max-height:70%;
max-width:70%;
height:auto;
width:auto;
margin-left: auto;
margin-right: auto;
}
</style>
</head>
<body>"""
end_template = """</body>
</html>"""
return start_template, end_template
|
import pymysql
db = pymysql.connect("10.0.251.50","root","1234qwer","cms_ju",charset='utf8' )
cursor = db.cursor(pymysql.cursors.DictCursor)
cursor.execute("select * from tj_news_clob order by news_id desc limit %s,%s",(0,10))
rows = cursor.fetchall()
for row in rows:
print(row['news_id'])
db.close()
|
from clients.codeforces import CodeforcesHttpClient
from models import MemberCodeforcesProfile
def fetch_member_codeforces_profile(codeforces_handle):
client = CodeforcesHttpClient()
profile = client.send_request("user.info", {"handles": codeforces_handle})
if profile:
return MemberCodeforcesProfile.load(profile[0])
return None
|
import os
import math
from Pre_Process import punctuations_regex
from Pre_Process import make_words
from Pre_Process import return_lowered_lines
from Pre_Process import get_all_files_dir
from Pre_Process import merge
from Pre_Process import create_out_file
from Pre_Process import TRAIN_POS_OUT_FILE
from Pre_Process import TRAIN_NEG_OUT_FILE
ROOT = os.getcwd()+"/movie-review-HW2/aclImdb/"
TEST_POS = ROOT+"/test/pos/"
TEST_NEG = ROOT+"/test/neg/"
MOVIE_OUTPUT_PREDICTION = "MOVIE-OUTPUT-PREDICTION.txt"
MOVIE_OUTPUT_FET_PARAM = "movie-review-BOW.NB"
TRAIN_POS_FILE = TRAIN_POS_OUT_FILE
TRAIN_NEG_FILE = TRAIN_NEG_OUT_FILE
POS_IND = "+"
NEG_IND = "-"
TEST_POS_FILES = 60
TEST_NEG_FILES = 60
TOTAL_FILES = TEST_POS_FILES+TEST_NEG_FILES
def read_words_make_map(file_path):
file = open(file_path)
line = file.readline().rstrip()
words = line.split()
word_map = dict()
for word in words:
try:
word_map[word] += 1
except KeyError:
word_map[word] = 1
return word_map
def cal_prob_test_file(class_map, total_words, test_file, total_train_words, IND, FEAT):
words = punctuations_regex(make_words(return_lowered_lines(test_file)))
total_probability = math.log((TEST_POS_FILES/TOTAL_FILES), 2)
for word in words:
top = class_map.get(word, 0) + 1
bottom = total_words + total_train_words
total_probability += math.log((top/bottom), 2)
FEAT.write("P( "+word+" | "+IND+" ) = " + str(top/bottom) + "\n")
return total_probability
def class_predictor_file(map1, map1_words, map2, map2_words, directory,
test_file, total_train_words, IND1, IND2, OUT, FEAT):
class1_probability = cal_prob_test_file(map1, map1_words, directory+test_file, total_train_words, IND1, FEAT)
class2_probability = cal_prob_test_file(map2, map2_words, directory+test_file, total_train_words, IND2, FEAT)
if class2_probability > class1_probability:
OUT.write(test_file + ", " + IND2 + ", " + IND1 + "\n")
return 1
else:
OUT.write(test_file + ", " + IND1 + ", " + IND2 + "\n")
return 0
def class_predictor_dir(map1, map2, directory, total_train_words, ind1, ind2, out, feat):
files = get_all_files_dir(directory)
map1_words = sum(map1.values())
map2_words = sum(map2.values())
counter = 0
for file in files:
counter += class_predictor_file(map1, map1_words, map2, map2_words,
directory, file, total_train_words, ind1, ind2, out, feat)
return counter
def accuracy(total_mismatch, files):
return 100-(total_mismatch/files)*100
def run_movie_class_predictor():
print("PROGRAM STARTED")
train_pos_map = read_words_make_map(TRAIN_POS_FILE)
train_neg_map = read_words_make_map(TRAIN_NEG_FILE)
merged_map = merge(train_pos_map, train_neg_map)
total_train_keys = len(merged_map)
create_out_file(MOVIE_OUTPUT_PREDICTION)
create_out_file(MOVIE_OUTPUT_FET_PARAM)
print("2 FILES WILL BE CREATED WHERE OUTPUT WILL BE SAVED - \n1. "
""+MOVIE_OUTPUT_PREDICTION+" 2."+MOVIE_OUTPUT_FET_PARAM)
OUT_PRED = open(MOVIE_OUTPUT_PREDICTION, "a")
OUT_FEAT_PARAM = open(MOVIE_OUTPUT_FET_PARAM, "a")
OUT_PRED.write("file, my-prediction, label"+"\n")
pos_neg = class_predictor_dir(train_pos_map, train_neg_map, TEST_POS, total_train_keys, POS_IND, NEG_IND,
OUT_PRED, OUT_FEAT_PARAM)
neg_pos = class_predictor_dir(train_neg_map, train_pos_map, TEST_NEG, total_train_keys, NEG_IND, POS_IND,
OUT_PRED, OUT_FEAT_PARAM)
pos_accuracy = accuracy(pos_neg, TEST_POS_FILES)
neg_accuracy = accuracy(neg_pos, TEST_NEG_FILES)
OUT_PRED.write("Overall accuracy: "+str((pos_accuracy+neg_accuracy)/2))
OUT_PRED.close()
OUT_FEAT_PARAM.close()
print("PROGRAM FINISHED")
run_movie_class_predictor()
|
species(
label = '[CH]=C(C=C)O[C]=C(26507)',
structure = SMILES('[CH]=C(C=C)O[C]=C'),
E0 = (503.326,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,3010,987.5,1337.5,450,1655,3120,650,792.5,1650,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,267.015,268.588,268.955],'cm^-1')),
HinderedRotor(inertia=(0.312507,'amu*angstrom^2'), symmetry=1, barrier=(16.1061,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.313858,'amu*angstrom^2'), symmetry=1, barrier=(16.1133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.315061,'amu*angstrom^2'), symmetry=1, barrier=(16.1031,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.987755,0.0647213,-6.53991e-05,3.49806e-08,-7.52072e-12,60645.9,27.4045], Tmin=(100,'K'), Tmax=(1123.49,'K')), NASAPolynomial(coeffs=[12.7825,0.0227284,-9.33381e-06,1.71236e-09,-1.17908e-13,57995.6,-30.8719], Tmin=(1123.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(503.326,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(C=CJO)"""),
)
species(
label = 'CH2CO(28)',
structure = SMILES('C=C=O'),
E0 = (-60.8183,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2120,512.5,787.5],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (42.0367,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.12,'J/mol'), sigma=(3.97,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.13241,0.0181319,-1.74093e-05,9.35336e-09,-2.01725e-12,-7148.09,13.3808], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[5.75871,0.00635124,-2.25955e-06,3.62322e-10,-2.15856e-14,-8085.33,-4.9649], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-60.8183,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""CH2CO""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'CH2CHCCH(26391)',
structure = SMILES('C#CC=C'),
E0 = (274.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,750,770,3400,2100,3010,987.5,1337.5,450,1655,2175,525],'cm^-1')),
HinderedRotor(inertia=(1.46338,'amu*angstrom^2'), symmetry=1, barrier=(33.6459,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (52.0746,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2968.28,'J/mol'), sigma=(5.18,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.87083,0.0182042,1.06711e-05,-2.72492e-08,1.19478e-11,33023.8,11.2934], Tmin=(100,'K'), Tmax=(955.249,'K')), NASAPolynomial(coeffs=[8.52653,0.0108962,-3.56564e-06,6.31243e-10,-4.51891e-14,31196.2,-19.6435], Tmin=(955.249,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(274.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(178.761,'J/(mol*K)'), label="""CH2CHCCH""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1C=C1O[C]=C(27390)',
structure = SMILES('[CH2]C1C=C1O[C]=C'),
E0 = (607.807,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.16617,0.0628376,-6.66088e-05,3.97525e-08,-9.68998e-12,73203.9,26.6122], Tmin=(100,'K'), Tmax=(990.899,'K')), NASAPolynomial(coeffs=[10.3882,0.0256104,-1.02549e-05,1.838e-09,-1.24273e-13,71376.3,-17.7947], Tmin=(990.899,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(607.807,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + ring(Cyclopropene) + radical(Isobutyl) + radical(C=CJO)"""),
)
species(
label = '[CH]=C1OC(=C)C1[CH2](27391)',
structure = SMILES('[CH]=C1OC(=C)C1[CH2]'),
E0 = (490.336,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.55253,0.0430631,-2.41741e-06,-2.90529e-08,1.46885e-11,59071.6,23.597], Tmin=(100,'K'), Tmax=(970.853,'K')), NASAPolynomial(coeffs=[13.1729,0.0206809,-7.22622e-06,1.29767e-09,-9.20726e-14,55613.7,-38.3087], Tmin=(970.853,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(490.336,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(303.478,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(Cyclobutane) + radical(Cds_P) + radical(Isobutyl)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH]=C(C=C)OC#C(27392)',
structure = SMILES('[CH]=C(C=C)OC#C'),
E0 = (472.908,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2175,525,750,770,3400,2100,350,440,435,1725,180,180],'cm^-1')),
HinderedRotor(inertia=(1.16464,'amu*angstrom^2'), symmetry=1, barrier=(26.7773,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.16742,'amu*angstrom^2'), symmetry=1, barrier=(26.8414,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.16708,'amu*angstrom^2'), symmetry=1, barrier=(26.8335,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (93.1033,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.721856,0.0692721,-7.70066e-05,4.30651e-08,-9.43563e-12,56998.2,22.6863], Tmin=(100,'K'), Tmax=(1117.69,'K')), NASAPolynomial(coeffs=[15.3098,0.0170651,-6.94245e-06,1.27442e-09,-8.81378e-14,53737.2,-49.3152], Tmin=(1117.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(472.908,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Ct-CtOs) + group(Ct-CtH) + radical(Cds_P)"""),
)
species(
label = '[CH]=[C]C=C(4699)',
structure = SMILES('[CH]=C=C[CH2]'),
E0 = (451.584,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,540,610,2055,3000,3100,440,815,1455,1000,180,1024.85,1025.53,1026.61],'cm^-1')),
HinderedRotor(inertia=(0.00938781,'amu*angstrom^2'), symmetry=1, barrier=(7.01846,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (52.0746,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.76805,0.020302,8.75519e-06,-2.87666e-08,1.37354e-11,54363.7,13.5565], Tmin=(100,'K'), Tmax=(915.031,'K')), NASAPolynomial(coeffs=[9.46747,0.00887314,-1.78262e-06,2.38534e-10,-1.6263e-14,52390.1,-22.2544], Tmin=(915.031,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(451.584,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(C=C=CJ) + radical(Allyl_P)"""),
)
species(
label = 'C2H3(30)',
structure = SMILES('[CH]=C'),
E0 = (286.361,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,677.08,1086.68,3788.01],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (27.0452,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1737.73,'J/mol'), sigma=(4.1,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.36378,0.000265766,2.79621e-05,-3.72987e-08,1.5159e-11,34475,7.9151], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.15027,0.00754021,-2.62998e-06,4.15974e-10,-2.45408e-14,33856.6,1.72812], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(286.361,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""C2H3""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C#CO[C]=C(5800)',
structure = SMILES('C#CO[C]=C'),
E0 = (418.884,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2175,525,750,770,3400,2100,1685,370,180,180],'cm^-1')),
HinderedRotor(inertia=(1.21883,'amu*angstrom^2'), symmetry=1, barrier=(28.0233,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.21808,'amu*angstrom^2'), symmetry=1, barrier=(28.0061,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (67.066,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.30607,0.0360715,-3.58533e-05,1.8608e-08,-3.87253e-12,50442.2,17.7058], Tmin=(100,'K'), Tmax=(1159.67,'K')), NASAPolynomial(coeffs=[9.19037,0.0123252,-5.13737e-06,9.49688e-10,-6.56848e-14,48845.5,-16.5265], Tmin=(1159.67,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(418.884,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(174.604,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Ct-CtOs) + group(Ct-CtH) + radical(C=CJO)"""),
)
species(
label = '[CH]=COC(=[CH])C=C(27393)',
structure = SMILES('[CH]=COC(=[CH])C=C'),
E0 = (510.678,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,3115,3125,620,680,785,800,1600,1700,2995,3025,975,1000,1300,1375,400,500,1630,1680,2950,3100,1380,975,1025,1650,180,180],'cm^-1')),
HinderedRotor(inertia=(0.989362,'amu*angstrom^2'), symmetry=1, barrier=(22.7474,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.983862,'amu*angstrom^2'), symmetry=1, barrier=(22.6209,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.98497,'amu*angstrom^2'), symmetry=1, barrier=(22.6464,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.247872,0.0718323,-7.37134e-05,3.74182e-08,-7.31398e-12,61564.5,27.3361], Tmin=(100,'K'), Tmax=(1305.64,'K')), NASAPolynomial(coeffs=[18.7288,0.0131768,-3.98657e-06,6.20539e-10,-3.93193e-14,56912.2,-66.0877], Tmin=(1305.64,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(510.678,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_P)"""),
)
species(
label = 'C=[C]OC(=C)[C]=C(27394)',
structure = SMILES('C=[C]OC(=C)[C]=C'),
E0 = (455.225,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.99619,0.0675404,-7.69257e-05,4.90924e-08,-1.27298e-11,54857.8,26.5718], Tmin=(100,'K'), Tmax=(935.7,'K')), NASAPolynomial(coeffs=[10.6536,0.0262562,-1.07437e-05,1.93908e-09,-1.31343e-13,53050.5,-19.3778], Tmin=(935.7,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(455.225,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(C=CJO)"""),
)
species(
label = '[CH]=C([C]=C)OC=C(27395)',
structure = SMILES('[CH]C(=C=C)OC=C'),
E0 = (439.285,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.674273,0.0647168,-4.86573e-05,1.65905e-08,-1.60428e-12,52960.7,26.9527], Tmin=(100,'K'), Tmax=(1122.63,'K')), NASAPolynomial(coeffs=[14.5338,0.0255846,-1.00666e-05,1.80677e-09,-1.23227e-13,49202.9,-44.3914], Tmin=(1122.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(439.285,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]=CC(=C)O[C]=C(27396)',
structure = SMILES('[CH]=CC(=C)O[C]=C'),
E0 = (503.326,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,3010,987.5,1337.5,450,1655,3120,650,792.5,1650,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,267.015,268.588,268.955],'cm^-1')),
HinderedRotor(inertia=(0.312507,'amu*angstrom^2'), symmetry=1, barrier=(16.1061,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.313858,'amu*angstrom^2'), symmetry=1, barrier=(16.1133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.315061,'amu*angstrom^2'), symmetry=1, barrier=(16.1031,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.987755,0.0647213,-6.53991e-05,3.49806e-08,-7.52072e-12,60645.9,27.4045], Tmin=(100,'K'), Tmax=(1123.49,'K')), NASAPolynomial(coeffs=[12.7825,0.0227284,-9.33381e-06,1.71236e-09,-1.17908e-13,57995.6,-30.8719], Tmin=(1123.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(503.326,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(C=CJO)"""),
)
species(
label = '[CH]=[C]OC(=C)C=C(27397)',
structure = SMILES('C#COC([CH2])=C[CH2]'),
E0 = (411.739,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.684427,0.0678694,-6.75807e-05,3.44492e-08,-6.93969e-12,49644.4,25.1782], Tmin=(100,'K'), Tmax=(1207.92,'K')), NASAPolynomial(coeffs=[15.2599,0.0196032,-7.64363e-06,1.36927e-09,-9.32382e-14,46123.2,-47.8934], Tmin=(1207.92,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(411.739,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Ct-CtOs) + group(Ct-CtH) + radical(C=C(O)CJ) + radical(Allyl_P)"""),
)
species(
label = '[CH]=CC(=[CH])OC=C(27398)',
structure = SMILES('[CH]=CC(=[CH])OC=C'),
E0 = (510.678,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,3115,3125,620,680,785,800,1600,1700,2995,3025,975,1000,1300,1375,400,500,1630,1680,2950,3100,1380,975,1025,1650,180,180],'cm^-1')),
HinderedRotor(inertia=(0.989362,'amu*angstrom^2'), symmetry=1, barrier=(22.7474,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.983862,'amu*angstrom^2'), symmetry=1, barrier=(22.6209,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.98497,'amu*angstrom^2'), symmetry=1, barrier=(22.6464,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.247872,0.0718323,-7.37134e-05,3.74182e-08,-7.31398e-12,61564.5,27.3361], Tmin=(100,'K'), Tmax=(1305.64,'K')), NASAPolynomial(coeffs=[18.7288,0.0131768,-3.98657e-06,6.20539e-10,-3.93193e-14,56912.2,-66.0877], Tmin=(1305.64,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(510.678,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_P)"""),
)
species(
label = 'C=[C][O](173)',
structure = SMILES('[CH2][C]=O'),
E0 = (160.185,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,539.612,539.669],'cm^-1')),
HinderedRotor(inertia=(0.000578908,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (42.0367,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.39563,0.0101365,2.30741e-06,-8.97566e-09,3.68242e-12,19290.3,10.0703], Tmin=(100,'K'), Tmax=(1068.9,'K')), NASAPolynomial(coeffs=[6.35055,0.00638951,-2.69368e-06,5.4221e-10,-4.02476e-14,18240.9,-6.33602], Tmin=(1068.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(160.185,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(CsCJ=O) + radical(CJC=O)"""),
)
species(
label = 'C=[C]OC1[CH]CC=1(27399)',
structure = SMILES('C=[C]OC1[CH]CC=1'),
E0 = (506.012,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.86722,0.0333437,2.28744e-05,-5.35692e-08,2.27134e-11,60948.1,27.7659], Tmin=(100,'K'), Tmax=(978.738,'K')), NASAPolynomial(coeffs=[13.1465,0.0201621,-7.36993e-06,1.39303e-09,-1.02623e-13,57163.7,-34.4617], Tmin=(978.738,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(506.012,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(299.321,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + ring(Cyclobutene) + radical(C=CJO) + radical(CCJCO)"""),
)
species(
label = '[CH]=C1[CH]CC(=C)O1(27400)',
structure = SMILES('[CH]C1=CCC(=C)O1'),
E0 = (324.62,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.23995,0.0177921,8.49274e-05,-1.21181e-07,4.71067e-11,39125.1,23.1597], Tmin=(100,'K'), Tmax=(955.605,'K')), NASAPolynomial(coeffs=[13.3172,0.0242216,-8.03921e-06,1.49209e-09,-1.12021e-13,34597.4,-42.3918], Tmin=(955.605,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(324.62,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(303.478,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclopentane) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC1=CC(=C)O1(27401)',
structure = SMILES('C=CC1=CC(=C)O1'),
E0 = (143.925,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (94.1112,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.25403,0.0457197,3.76108e-06,-4.4763e-08,2.23705e-11,17422.4,18.5001], Tmin=(100,'K'), Tmax=(952.16,'K')), NASAPolynomial(coeffs=[17.2535,0.0146712,-4.29894e-06,7.70436e-10,-5.84129e-14,12736.2,-66.5123], Tmin=(952.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(143.925,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(303.478,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(Cyclobutene)"""),
)
species(
label = 'H2CC(41)',
structure = SMILES('[C]=C'),
E0 = (401.202,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (26.0373,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2480.69,'J/mol'), sigma=(4.48499,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=387.48 K, Pc=62.39 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.28155,0.00697643,-2.38528e-06,-1.21078e-09,9.82042e-13,48319.2,5.92036], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.27807,0.00475623,-1.63007e-06,2.54623e-10,-1.4886e-14,48014,0.639979], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(401.202,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""H2CC""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH]=C([O])C=C(5816)',
structure = SMILES('[CH]=C([O])C=C'),
E0 = (264.929,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,180],'cm^-1')),
HinderedRotor(inertia=(0.979636,'amu*angstrom^2'), symmetry=1, barrier=(22.5238,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (68.074,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.41648,0.0452197,-4.57103e-05,2.25525e-08,-4.15485e-12,31966.6,18.4199], Tmin=(100,'K'), Tmax=(1540.92,'K')), NASAPolynomial(coeffs=[13.6856,0.00610864,-5.68369e-07,-3.63413e-11,6.19928e-15,29047.6,-43.2788], Tmin=(1540.92,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(264.929,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cds-Cds(Cds-Cds)O2s) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(C=C(C)OJ) + radical(Cds_P)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (503.326,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (607.807,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (591.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (699.81,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (503.326,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (721.544,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (616.115,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (725.651,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (604.503,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (906.759,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (694.535,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (543.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (611.769,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (641.381,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (546.839,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (511.61,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (666.131,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['CH2CO(28)', 'CH2CHCCH(26391)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['[CH2]C1C=C1O[C]=C(27390)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(2.54e+10,'s^-1'), n=0.69, Ea=(104.481,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 14 used for R4_D_D;doublebond_intra_2H_pri;radadd_intra_cdsingleH
Exact match found for rate rule [R4_D_D;doublebond_intra_2H_pri;radadd_intra_cdsingleH]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 104.1 to 104.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['[CH]=C1OC(=C)C1[CH2](27391)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.65009e+08,'s^-1'), n=1.00067, Ea=(87.7294,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS_D;doublebond_intra_2H_pri;radadd_intra] for rate rule [R5_SS_D;doublebond_intra_2H_pri;radadd_intra_cddouble]
Euclidian distance = 1.0
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction4',
reactants = ['H(3)', '[CH]=C(C=C)OC#C(27392)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(4278.27,'m^3/(mol*s)'), n=1.383, Ea=(15.1097,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ct_Ct;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['CH2CO(28)', '[CH]=[C]C=C(4699)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(11.6997,'m^3/(mol*s)'), n=2.021, Ea=(112.56,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Od_R;YJ] for rate rule [Od_Cdd;CJ]
Euclidian distance = 1.41421356237
family: R_Addition_MultipleBond
Ea raised from 108.3 to 112.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction6',
reactants = ['C2H3(30)', 'C#CO[C]=C(5800)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.324566,'m^3/(mol*s)'), n=2.2487, Ea=(16.299,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Ct_Ct;CdsJ-H] for rate rule [Ct-O_Ct;CdsJ-H]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH]=COC(=[CH])C=C(27393)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(1.08e+06,'s^-1'), n=1.99, Ea=(105.437,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 17 used for R2H_D;Cd_rad_out_singleH;Cd_H_out_singleNd
Exact match found for rate rule [R2H_D;Cd_rad_out_singleH;Cd_H_out_singleNd]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['C=[C]OC(=C)[C]=C(27394)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(191.5,'s^-1'), n=3.05, Ea=(222.325,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_DS;Cd_rad_out_singleH;Cd_H_out_doubleC]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['[CH]=C([C]=C)OC=C(27395)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(1.286e+08,'s^-1'), n=1.323, Ea=(101.177,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSR;Cd_rad_out_Cd;XH_out] for rate rule [R4H_SSS;Cd_rad_out_Cd;Cd_H_out_doubleC]
Euclidian distance = 2.82842712475
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH]=CC(=C)O[C]=C(27396)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(2.27529e+12,'s^-1'), n=1.09983, Ea=(403.433,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSD;Cd_rad_out_single;Cd_H_out_singleH] for rate rule [R4H_DSD;Cd_rad_out_singleH;Cd_H_out_singleH]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['[CH]=[C]OC(=C)C=C(27397)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.456e+11,'s^-1'), n=0.86, Ea=(191.209,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;Cd_H_out_singleH] for rate rule [R5HJ_3;Cd_rad_out_singleH;Cd_H_out_singleH]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH]=CC(=[CH])OC=C(27398)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(136000,'s^-1'), n=1.9199, Ea=(33.0402,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H;Cd_rad_out_singleH;XH_out] for rate rule [R5H_DSSS;Cd_rad_out_singleH;Cd_H_out_doubleC]
Euclidian distance = 3.60555127546
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['C=[C][O](173)', '[CH]=[C]C=C(4699)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction14',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['C=[C]OC1[CH]CC=1(27399)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.953e+11,'s^-1'), n=0.387, Ea=(138.055,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_D_D;doublebond_intra_pri;radadd_intra_cdsingleH] for rate rule [R4_D_D;doublebond_intra_pri_2H;radadd_intra_cdsingleH]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction15',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['[CH]=C1[CH]CC(=C)O1(27400)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(9.47e+07,'s^-1'), n=0.85, Ea=(43.5136,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 7 used for R5_SS_D;doublebond_intra_pri_2H;radadd_intra_cddouble
Exact match found for rate rule [R5_SS_D;doublebond_intra_pri_2H;radadd_intra_cddouble]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction16',
reactants = ['[CH]=C(C=C)O[C]=C(26507)'],
products = ['C=CC1=CC(=C)O1(27401)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_out;Ypri_rad_out] for rate rule [R4_SSD;Y_rad_out;CdsinglepriH_rad_out]
Euclidian distance = 2.2360679775
family: Birad_recombination"""),
)
reaction(
label = 'reaction17',
reactants = ['H2CC(41)', '[CH]=C([O])C=C(5816)'],
products = ['[CH]=C(C=C)O[C]=C(26507)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1355.7,'m^3/(mol*s)'), n=1.40819, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [O_sec_rad;Birad] for rate rule [O_rad/OneDe;Birad]
Euclidian distance = 1.0
family: Birad_R_Recombination
Ea raised from -12.0 to 0 kJ/mol."""),
)
network(
label = '4475',
isomers = [
'[CH]=C(C=C)O[C]=C(26507)',
],
reactants = [
('CH2CO(28)', 'CH2CHCCH(26391)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4475',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
import numpy as np
import cv2
import time
"""
Start of:
Reading input image
"""
# Reading image with OpenCV library
# In this way image is opened already as numpy array
image_BGR = cv2.imread('images/woman-working-in-the-office.jpg')
# Showing Original Image
# Giving name to the window with Original Image
# And specifying that window is resizable
cv2.namedWindow('Original Image', cv2.WINDOW_NORMAL)
# Pay attention! 'cv2.imshow' takes images in BGR format
cv2.imshow('Original Image', image_BGR)
# Waiting for any key being pressed
cv2.waitKey(0)
# Destroying opened window with name 'Original Image'
cv2.destroyWindow('Original Image')
# # Check point
# # Showing image shape
# print('Image shape:', image_BGR.shape) # tuple of (511, 767, 3)
# Getting spatial dimension of input image
h, w = image_BGR.shape[:2] # Slicing from tuple only first two elements
# # Check point
# # Showing height an width of image
# print('Image height={0} and width={1}'.format(h, w)) # 511 767
"""
End of:
Reading input image
"""
"""
Start of:
Getting blob from input image
"""
# Getting blob from input image
# The 'cv2.dnn.blobFromImage' function returns 4-dimensional blob
# from input image after mean subtraction, normalizing, and RB channels swapping
# Resulted shape has number of images, number of channels, width and height
blob = cv2.dnn.blobFromImage(image_BGR, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
# # Check point
# print('Image shape:', image_BGR.shape) # (511, 767, 3)
# print('Blob shape:', blob.shape) # (1, 3, 416, 416)
# # Check point
# # Showing blob image in OpenCV window
# # Slicing blob image and transposing to make channels come at the end
# blob_to_show = blob[0, :, :, :].transpose(1, 2, 0)
# print(blob_to_show.shape) # (416, 416, 3)
#
# # Showing Blob Image
# # Giving name to the window with Blob Image
# # And specifying that window is resizable
# cv2.namedWindow('Blob Image', cv2.WINDOW_NORMAL)
# # Pay attention! 'cv2.imshow' takes images in BGR format
# # Consequently, convert image from RGB to BGR firstly
# cv2.imshow('Blob Image', cv2.cvtColor(blob_to_show, cv2.COLOR_RGB2BGR))
# # Waiting for any key being pressed
# cv2.waitKey(0)
# # Destroying opened window with name 'Blob Image'
# cv2.destroyWindow('Blob Image')
"""
End of:
Getting blob from input image
"""
"""
Start of:
Loading YOLO v3 network
"""
# Loading COCO class labels from file
# Opening file
with open('yolo-coco-data/coco.names') as f:
# Getting labels reading every line
# and putting them into the list
labels = [line.strip() for line in f]
# # Check point
# print('List with labels names:')
# print(labels)
# Loading trained YOLO v3 Objects Detector
# with the help of 'dnn' library from OpenCV
'
network = cv2.dnn.readNetFromDarknet('yolo-coco-data/yolov3.cfg',
'yolo-coco-data/yolov3.weights')
# Getting list with names of all layers from YOLO v3 network
layers_names_all = network.getLayerNames()
# Getting only output layers' names that we need from YOLO v3 algorithm
# with function that returns indexes of layers with unconnected outputs
layers_names_output = \
[layers_names_all[i[0] - 1] for i in network.getUnconnectedOutLayers()]
# Setting minimum probability to eliminate weak predictions
probability_minimum = 0.5
# Setting threshold for filtering weak bounding boxes
# with non-maximum suppression
threshold = 0.3
# Generating colours for representing every detected object
# with function randint(low, high=None, size=None, dtype='l')
colours = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# # Check point
# print()
# print(type(colours)) # <class 'numpy.ndarray'>
# print(colours.shape) # (80, 3)
# print(colours[0]) # [172 10 127]
"""
End of:
Loading YOLO v3 network
"""
"""
Start of:
Implementing Forward pass
"""
# Implementing forward pass with our blob and only through output layers
# Calculating at the same time, needed time for forward pass
network.setInput(blob) # setting blob as input to the network
start = time.time()
output_from_network = network.forward(layers_names_output)
end = time.time()
# Showing spent time for forward pass
print('Objects Detection took {:.5f} seconds'.format(end - start))
"""
End of:
Implementing Forward pass
"""
"""
Start of:
Getting bounding boxes
"""
# Preparing lists for detected bounding boxes,
# obtained confidences and class's number
bounding_boxes = []
confidences = []
class_numbers = []
# Going through all output layers after feed forward pass
for result in output_from_network:
# Going through all detections from current output layer
for detected_objects in result:
# Getting 80 classes' probabilities for current detected object
scores = detected_objects[5:]
# Getting index of the class with the maximum value of probability
class_current = np.argmax(scores)
# Getting value of probability for defined class
confidence_current = scores[class_current]
# Eliminating weak predictions with minimum probability
if confidence_current > probability_minimum:
# Scaling bounding box coordinates to the initial image size
# YOLO data format keeps coordinates for center of bounding box
# and its current width and height
# That is why we can just multiply them elementwise
# to the width and height
# of the original image and in this way get coordinates for center
# of bounding box, its width and height for original image
box_current = detected_objects[0:4] * np.array([w, h, w, h])
# Now, from YOLO data format, we can get top left corner coordinates
# that are x_min and y_min
x_center, y_center, box_width, box_height = box_current
x_min = int(x_center - (box_width / 2))
y_min = int(y_center - (box_height / 2))
# Adding results into prepared lists
bounding_boxes.append([x_min, y_min, int(box_width), int(box_height)])
confidences.append(float(confidence_current))
class_numbers.append(class_current)
"""
End of:
Getting bounding boxes
"""
"""
Start of:
Non-maximum suppression
"""
# Implementing non-maximum suppression of given bounding boxes
# With this technique we exclude some of bounding boxes if their
# corresponding confidences are low or there is another
# bounding box for this region with higher confidence
# It is needed to make sure that data type of the boxes is 'int'
# and data type of the confidences is 'float'
results = cv2.dnn.NMSBoxes(bounding_boxes, confidences,
probability_minimum, threshold)
"""
End of:
Non-maximum suppression
"""
"""
Start of:
Drawing bounding boxes and labels
"""
# Defining counter for detected objects
counter = 1
# Checking if there is at least one detected object after non-maximum suppression
if len(results) > 0:
# Going through indexes of results
for i in results.flatten():
# Showing labels of the detected objects
print('Object {0}: {1}'.format(counter, labels[int(class_numbers[i])]))
# Incrementing counter
counter += 1
# Getting current bounding box coordinates,
# its width and height
x_min, y_min = bounding_boxes[i][0], bounding_boxes[i][1]
box_width, box_height = bounding_boxes[i][2], bounding_boxes[i][3]
# Preparing colour for current bounding box
# and converting from numpy array to list
colour_box_current = colours[class_numbers[i]].tolist()
# Drawing bounding box on the original image
cv2.rectangle(image_BGR, (x_min, y_min),
(x_min + box_width, y_min + box_height),
colour_box_current, 2)
# Preparing text with label and confidence for current bounding box
text_box_current = '{}: {:.4f}'.format(labels[int(class_numbers[i])],
confidences[i])
# Putting text with label and confidence on the original image
cv2.putText(image_BGR, text_box_current, (x_min, y_min - 5),
cv2.FONT_HERSHEY_COMPLEX, 0.7, colour_box_current, 2)
# Comparing how many objects where before non-maximum suppression
# and left after
print()
print('Total objects been detected:', len(bounding_boxes))
print('Number of objects left after non-maximum suppression:', counter - 1)
"""
End of:
Drawing bounding boxes and labels
"""
# Showing Original Image with Detected Objects
# Giving name to the window with Original Image
# And specifying that window is resizable
cv2.namedWindow('Detections', cv2.WINDOW_NORMAL)
# Pay attention! 'cv2.imshow' takes images in BGR format
cv2.imshow('Detections', image_BGR)
# Waiting for any key being pressed
cv2.waitKey(0)
# Destroying opened window with name 'Detections'
cv2.destroyWindow('Detections')
|
l = list(input("Enter your list: "))
v = input('Enter your value: ')
if v in l:
print(True)
else:
print(False)
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field author_nicks on 'Production'
db.create_table('demoscene_production_author_nicks', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('production', models.ForeignKey(orm['demoscene.production'], null=False)),
('nick', models.ForeignKey(orm['demoscene.nick'], null=False))
))
db.create_unique('demoscene_production_author_nicks', ['production_id', 'nick_id'])
# Adding M2M table for field author_affiliation_nicks on 'Production'
db.create_table('demoscene_production_author_affiliation_nicks', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('production', models.ForeignKey(orm['demoscene.production'], null=False)),
('nick', models.ForeignKey(orm['demoscene.nick'], null=False))
))
db.create_unique('demoscene_production_author_affiliation_nicks', ['production_id', 'nick_id'])
def backwards(self, orm):
# Removing M2M table for field author_nicks on 'Production'
db.delete_table('demoscene_production_author_nicks')
# Removing M2M table for field author_affiliation_nicks on 'Production'
db.delete_table('demoscene_production_author_affiliation_nicks')
models = {
'demoscene.downloadlink': {
'Meta': {'object_name': 'DownloadLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'download_links'", 'to': "orm['demoscene.Production']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2048'})
},
'demoscene.nick': {
'Meta': {'object_name': 'Nick'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'releaser': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nicks'", 'to': "orm['demoscene.Releaser']"})
},
'demoscene.nickvariant': {
'Meta': {'object_name': 'NickVariant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nick': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variants'", 'to': "orm['demoscene.Nick']"})
},
'demoscene.platform': {
'Meta': {'object_name': 'Platform'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'demoscene.production': {
'Meta': {'object_name': 'Production'},
'author_affiliation_nicks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'member_productions'", 'symmetrical': 'False', 'to': "orm['demoscene.Nick']"}),
'author_nicks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productions'", 'symmetrical': 'False', 'to': "orm['demoscene.Nick']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'platforms': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productions'", 'symmetrical': 'False', 'to': "orm['demoscene.Platform']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productions'", 'symmetrical': 'False', 'to': "orm['demoscene.ProductionType']"})
},
'demoscene.productiontype': {
'Meta': {'object_name': 'ProductionType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'demoscene.releaser': {
'Meta': {'object_name': 'Releaser'},
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'members'", 'symmetrical': 'False', 'to': "orm['demoscene.Releaser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_group': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['demoscene']
|
import string
data = open("names.txt","r").read()
names = sorted(data.replace("\n", "").replace('"',"").split(","))
alphadict = dict((x,i) for (i,x) in enumerate(map(str,string.uppercase)))
total = 0
for y in range(len(names)):
total = total + sum([int(alphadict[x])+1 for x in map(str,names[y])]) * (y + 1)
print total
|
__version__ = "0.0.1"
__author__ = "Tongtong (Suri) Sun"
from .cbapi import get_org, get_ppl, set_key
__all__ = ['get_org', 'get_ppl', 'set_key']
|
print("helloworld")
name=int(input("enter the name"))
age=int(input("enter the number"))
#myself sree i am 20 years old
print("myself",name,"i am",age,"years old")
|
from django.shortcuts import render, redirect
from phonebook.models import PhoneBook
# Create your views here.
def test(request):
return render(request, 'phonebook/test.html')
def index(request):
alluser = PhoneBook.objects.values('id','이름', '전화번호')
print(alluser)
context = {
"phonebook":alluser
}
return render(request, 'phonebook/index.html', context)
def phoneAdd(request):
if request.method == "POST":
name = request.POST.get("name")
phNum = request.POST.get("phNum")
email = request.POST.get("email")
addr = request.POST.get("addr")
birth = request.POST.get("birth")
author = request.POST.get("author")
phonebook = PhoneBook()
phonebook.이름 = name
phonebook.전화번호 = phNum
phonebook.이메일 = email
phonebook.주소 = addr
phonebook.생년월일 = birth
phonebook.작성자 = author
phonebook.save()
return redirect("PB:I")
else :
print('사용자 : ', request.user)
print('type : ', type(request.user))
if request.user.is_active :
return render(request, 'phonebook/phoneAdd.html')
else :
return redirect('login')
def phoneDelete(request, userId):
if request.method=="POST":
PhoneBook.objects.get(id=userId).delete()
return redirect("PB:I")
else :
userInfo = PhoneBook.objects.values('id', '이름', '전화번호').get(id=userId)
context = {
"userInfo" : userInfo
}
return render(request, 'phonebook/phoneDelete.html', context)
def phoneDetail(request,userId):
userInfo = PhoneBook.objects.values('id','이름','전화번호','주소','이메일','생년월일','작성자').get(id=userId)
context={
'phonebook':userInfo
}
return render(request, 'phonebook/phoneDetail.html',context)
def phoneUpdate(request, userId):
if request.method == "POST":
name = request.POST.get("name")
phNum = request.POST.get("phNum")
email = request.POST.get("email")
addr = request.POST.get("addr")
birth = request.POST.get("birth")
author = request.POST.get("author")
phonebook = PhoneBook()
phonebook.id = userId
phonebook.이름 = name
phonebook.전화번호 = phNum
phonebook.이메일 = email
phonebook.주소 = addr
phonebook.생년월일 = birth
phonebook.작성자 = author
phonebook.save()
return redirect("PB:I")
else :
if request.user.is_active:
userInfo = PhoneBook.objects.values('id','이름','전화번호','주소','이메일','생년월일', '작성자').get(id=userId)
if str(request.user) == userInfo["작성자"]:
context = {
"userInfo" : userInfo
}
return render(request, 'phonebook/phoneUpdate.html', context)
else :
return redirect("errorAccess")
else :
return redirect('errorAccess') |
import os
import sys
import scipy.misc
import pprint
import numpy as np
import time
import math
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from glob import glob
from random import shuffle
from dfc_vae import *
from utils import *
from vgg_loss import *
pp = pprint.PrettyPrinter()
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="1"
'''
Tensorlayer implementation of DFC-VAE
'''
flags = tf.app.flags
flags.DEFINE_integer("epoch", 50, "Epoch to train [50]")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate of for adam [0.001]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 32, "The number of batch images [32]")
flags.DEFINE_integer("image_size", 148, "The size of image to use (will be center cropped) [148]")
flags.DEFINE_integer("output_size", 128, "The size of the output images to produce [128]")
flags.DEFINE_integer("sample_size", 128, "The number of sample images [128]")
flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
flags.DEFINE_integer("z_dim", 100, "Dimension of latent representation vector from. [100]")
flags.DEFINE_integer("sample_step", 500, "The interval of generating sample. [500]")
flags.DEFINE_integer("save_step", 500, "The interval of saveing checkpoints. [500]")
flags.DEFINE_string("dataset", "img_align_celeba", "The name of dataset [img_align_celeba]")
flags.DEFINE_string("test_name", "testname", "The number of experiment [testname]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_string("times_dir", "times", "Directory name to save times [times]")
flags.DEFINE_boolean("random_crop", False, "True to perform random cropping (non centered) [False]")
flags.DEFINE_string("load_model","no", "Provide the name of the model to load [no]")
flags.DEFINE_integer("init_blur", 0, "Initial training on blurred images. [0]")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(FLAGS.__flags)
# prepare for the file directory
tl.files.exists_or_mkdir(FLAGS.checkpoint_dir)
tl.files.exists_or_mkdir(FLAGS.sample_dir)
with tf.device("/gpu:0"):
##========================= DEFINE MODEL ===========================##
# the input_imgs are input for both encoder and discriminator
input_imgs = tf.placeholder(tf.float32,[FLAGS.batch_size, FLAGS.output_size,
FLAGS.output_size, FLAGS.c_dim], name='real_images')
# normal distribution for generator
z_p = tf.random_normal(shape=(FLAGS.batch_size, FLAGS.z_dim), mean=0.0, stddev=1.0)
# normal distribution for reparameterization trick
eps = tf.random_normal(shape=(FLAGS.batch_size, FLAGS.z_dim), mean=0.0, stddev=1.0)
lr_vae = tf.placeholder(tf.float32, shape=[])
# ----------------------encoder----------------------
net_out1, net_out2, z_mean, z_log_sigma_sq = encoder(input_imgs, is_train=True, reuse=False)
# ----------------------decoder----------------------
# decode z
# z = z_mean + z_sigma * eps
z = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_log_sigma_sq)), eps)) # using reparameterization tricks
gen0, gen0_logits = generator(z, is_train=True, reuse=False) # reconstruction
# ----------------------vgg net--------------------------
vgg1_input = tf.image.resize_images(input_imgs,[224,224])
net_in_real = InputLayer(vgg1_input, name='input1')
conv1,l1_r,l2_r,l3_r,_,_ = conv_layers_simple_api(net_in_real,reuse=False)
vgg1 = fc_layers(conv1,reuse=False)
vgg2_input = tf.image.resize_images(gen0.outputs,[224,224])
net_in_fake = InputLayer(vgg2_input, name='input2')
conv2,l1,l2,l3,_,_ = conv_layers_simple_api(net_in_fake,reuse=True)
vgg2 = fc_layers(conv2,reuse=True)
# ----------------------for samples----------------------
gen2, gen2_logits = generator(z, is_train=False, reuse=True)
gen3, gen3_logits = generator(z_p, is_train=False, reuse=True)
##========================= DEFINE TRAIN OPS =======================##
''''
reconstruction loss:
use the learned similarity measurement in l-th layer(feature space) of pretrained VGG-16
'''
SSE_loss = tf.reduce_mean(tf.reduce_sum(tf.square(gen0.outputs - input_imgs),[1,2,3]))
print(SSE_loss.get_shape(),type(SSE_loss))
# perceptual loss in feature space in VGG net
p1_loss = tf.reduce_mean(tf.reduce_sum(tf.square(l1 - l1_r), [1,2,3]))
p2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(l2 - l2_r), [1,2,3]))
p3_loss = tf.reduce_mean(tf.reduce_sum(tf.square(l3 - l3_r), [1,2,3]))
p_loss = p1_loss + p2_loss + p3_loss
'''
KL divergence:
we get z_mean,z_log_sigma_sq from encoder, then we get z from N(z_mean,z_sigma^2)
then compute KL divergence between z and standard normal gaussian N(0,I)
'''
# train_vae
KL_loss = tf.reduce_mean(- 0.5 * tf.reduce_sum(1 + z_log_sigma_sq - tf.square(z_mean) - tf.exp(z_log_sigma_sq),1))
print(KL_loss.get_shape(),type(KL_loss))
### important points! ###
style_content_weight = 3e-5 # you may need to tweak this weight for a different dataset
VAE_loss = KL_loss + style_content_weight*p_loss
e_vars = tl.layers.get_variables_with_name('encoder',True,True)
g_vars = tl.layers.get_variables_with_name('generator', True, True)
vae_vars = e_vars + g_vars
print("-------encoder-------")
net_out1.print_params(False)
print("-------generator-------")
gen0.print_params(False)
# optimizers for updating encoder and generator
vae_optim = tf.train.AdamOptimizer(lr_vae, beta1=FLAGS.beta1) \
.minimize(VAE_loss, var_list=vae_vars)
sess = tf.InteractiveSession()
tl.layers.initialize_global_variables(sess)
npz = np.load('vgg16_weights.npz')
params = []
for val in sorted( npz.items() ):
print(" Loading %s" % str(val[1].shape))
params.append(val[1])
tl.files.assign_params(sess, params, vgg1)
tl.files.assign_params(sess, params, vgg2)
# load checkpoint params
if FLAGS.load_model != "no":
print 'Loading model: '+str(FLAGS.load_model)
load_params = tl.files.load_npz(name=FLAGS.load_model+'_e1.npz')
tl.files.assign_params(sess, load_params, net_out1)
load_params = tl.files.load_npz(name=FLAGS.load_model+'_e2.npz')
tl.files.assign_params(sess, load_params, net_out2)
load_params = tl.files.load_npz(name=FLAGS.load_model+'_g.npz')
tl.files.assign_params(sess, load_params, gen0)
# create checkpoint dir
save_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.test_name) #'./checkpoint/vae_0808'
tl.files.exists_or_mkdir(save_dir)
# create samples dir
samples_dir = FLAGS.sample_dir + "/" + FLAGS.test_name
tl.files.exists_or_mkdir(samples_dir)
# create times dir and file
tl.files.exists_or_mkdir(FLAGS.times_dir)
timesFilename = FLAGS.times_dir + "/" + FLAGS.test_name + ".times"
with open(timesFilename, "w") as f:
f.write(""); # clean file
# get the list of absolute paths of all images in dataset
data_files = glob(os.path.join("./data", FLAGS.dataset, "*.jpg"))
data_files = sorted(data_files)
data_files = np.array(data_files) # for tl.iterate.minibatches
##========================= TRAIN MODELS ================================##
iter_counter = 0
training_start_time = time.time()
blurVal = FLAGS.init_blur
# use all images in dataset in every epoch
for epoch in range(FLAGS.epoch):
## shuffle data
print("[*] Dataset shuffled!")
minibatch = tl.iterate.minibatches(inputs=data_files, targets=data_files, batch_size=FLAGS.batch_size, shuffle=True)
idx = 0
batch_idxs = min(len(data_files), FLAGS.train_size) // FLAGS.batch_size
blurVal -= 6
if blurVal < 0:
blurVal = 0
while True:
try:
batch_files,_ = minibatch.next()
batch = [get_image(batch_file, FLAGS.image_size, is_crop=True, resize_w=FLAGS.output_size, is_grayscale = 0, blur=blurVal, is_centered=not FLAGS.random_crop) \
for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
start_time = time.time()
vae_current_lr = FLAGS.learning_rate
# update
p, p1, p2, p3, kl, sse, errE, _ = sess.run([p_loss,p1_loss,p2_loss,p3_loss,KL_loss,SSE_loss,VAE_loss,vae_optim], feed_dict={input_imgs: batch_images, lr_vae:vae_current_lr})
print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, vae_loss:%.2f, kl_loss:%.2f, sse_loss:%.2f, p1_loss:%.2f, p2_loss:%.2f, p3_loss:%.2f, p_loss:%.2f" \
% (epoch, FLAGS.epoch, idx, batch_idxs,
time.time() - start_time, errE, kl, sse, p1, p2, p3, p))
sys.stdout.flush()
iter_counter += 1
# save samples
if np.mod(iter_counter, FLAGS.sample_step) == 0:
# generate and visualize generated images
img1, img2 = sess.run([gen2.outputs, gen3.outputs], feed_dict={input_imgs: batch_images})
save_images(img1, [8, 8],
'./{}/train_{:d}.png'.format(samples_dir, iter_counter))
# img2 = sess.run(gen3.outputs, feed_dict={input_imgs: batch_images})
save_images(img2, [8, 8],
'./{}/train_{:d}_random.png'.format(samples_dir, iter_counter))
# save input image for comparison
save_images(batch_images,[8, 8],'./{}/train_{:d}_input.png'.format(samples_dir, iter_counter))
print("[Sample] sample generated!!!")
sys.stdout.flush()
# write times to file
with open(timesFilename, "a") as file:
file.write("%8d,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n" % (iter_counter, errE, kl, sse, p1, p2, p3, p))
# save checkpoint
if np.mod(iter_counter, FLAGS.save_step) == 0:
# save current network parameters
print("[*] Saving checkpoints...")
net_e1_name = os.path.join(save_dir, 'net_%d_e1.npz' % iter_counter)
net_e2_name = os.path.join(save_dir, 'net_%d_e2.npz' % iter_counter)
net_g_name = os.path.join(save_dir, 'net_%d_g.npz' % iter_counter)
tl.files.save_npz(net_out1.all_params, name=net_e1_name, sess=sess)
tl.files.save_npz(net_out2.all_params, name=net_e2_name, sess=sess)
tl.files.save_npz(gen0.all_params, name=net_g_name, sess=sess)
print("[*] Saving checkpoints SUCCESS!")
idx += 1
# print idx
except StopIteration:
print 'one epoch finished'
break
training_end_time = time.time()
print("The processing time of program is : {:.2f}mins".format((training_end_time-training_start_time)/60.0))
if __name__ == '__main__':
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.