seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42448338704 | from alpha_vantage.timeseries import TimeSeries
from bs4 import BeautifulSoup
import json
with open("config.json", "r") as config_file:
config = json.load(config_file)
api_key = config.get("api_key")
print("apik key: ", api_key)
ts1 = TimeSeries(key=api_key)
# Retrieve the monthly time series data for AAPL
# data, meta_data = ts1.get_monthly("AAPL")
data = ts1.get_weekly("AAPL")
# Print the data
print("Monthly Time Series Data for AAPL:")
print(data)
# Optionally, you can print the metadata as well
print("Meta Data:")
# print(meta_data)
| tokyo-lab/alpha | data_using_alpha_vantage_package.py | data_using_alpha_vantage_package.py | py | 554 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "alpha_vantage.timeseries.TimeSeries",
"line_number": 11,
"usage_type": "call"
}
] |
8704975946 | import time
from stable_baselines3 import PPO, A2C
from batkill_gym import BatkillEnv
import os
models_dir = "ppo"
logdir = f"logs"
if not os.path.exists(models_dir):
os.makedirs(models_dir)
if not os.path.exists(logdir):
os.makedirs(logdir)
env = BatkillEnv()
env.reset()
TIMESTEPS = 100000
model = PPO('MlpPolicy', env, verbose=1, tensorboard_log=logdir)
model.learn(total_timesteps=TIMESTEPS)
model.save(f"{models_dir}/{TIMESTEPS}")
# img = model.env.render(mode='rgb_array')
# imageio.mimsave('lander_a2c.gif', [np.array(img) for i, img in enumerate(images) if i%2 == 0], fps=29) | polako/batkill | batkill_ai_train.py | batkill_ai_train.py | py | 595 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numb... |
42123413608 | import datetime
from collections import namedtuple
import isodate
from .. import build_data_path as build_data_path_global
from ..input_definitions.examples import LAGTRAJ_EXAMPLES_PATH_PREFIX
TrajectoryOrigin = namedtuple("TrajectoryOrigin", ["lat", "lon", "datetime"])
TrajectoryDuration = namedtuple("TrajectoryDuration", ["forward", "backward"])
TrajectoryDefinition = namedtuple(
"TrajectoryDefinition",
[
"domain",
"duration",
"origin",
"name",
"type",
"timestep",
"extra_kwargs",
"version",
],
)
def duration_or_none(s):
if s is None:
return datetime.timedelta()
return isodate.parse_duration(s)
INPUT_REQUIRED_FIELDS = {
"trajectory_type": ["linear", "eulerian", "lagrangian"],
# domain should only be given when creating a lagrangian trajectory or if
# we're trying to get the timestep from the domain data. In both cases the
# domain should be a string
"domain": [
dict(requires=dict(trajectory_type="lagrangian"), choices=str),
dict(requires=dict(timestep="domain_data"), choices=str),
None,
],
"lat_origin": float,
"lon_origin": float,
"datetime_origin": isodate.parse_datetime,
"forward_duration|backward_duration": duration_or_none,
# if the domain is given we can use domain data for the timestep, otherwise
# the timestep should be a parsable duration string
"timestep": (
dict(
requires=dict(domain="__is_set__"),
choices=["domain_data"],
),
isodate.parse_duration,
),
# only linear trajectories need to have their velocity prescribed
"u_vel": dict(requires=dict(trajectory_type="linear"), choices=float),
"v_vel": dict(requires=dict(trajectory_type="linear"), choices=float),
# velocity method is only relevant when making lagrangian trajectories
"velocity_method": dict(
requires=dict(trajectory_type="lagrangian"),
choices=[
"single_height_level",
"single_pressure_level",
"lower_troposphere_humidity_weighted",
],
),
"velocity_method_height": dict(
requires=dict(velocity_method="single_height_level"),
choices=float,
),
"velocity_method_pressure": dict(
requires=dict(velocity_method="single_pressure_level"),
choices=float,
),
}
def build_data_path(root_data_path, trajectory_name):
# we need to strip the `lagtraj://` prefix before we construct the path
# since the data is stored locally
if trajectory_name.startswith(LAGTRAJ_EXAMPLES_PATH_PREFIX):
trajectory_name = trajectory_name[len(LAGTRAJ_EXAMPLES_PATH_PREFIX) :]
data_path = build_data_path_global(
root_data_path=root_data_path, data_type="trajectory"
)
return data_path / "{}.nc".format(trajectory_name)
| EUREC4A-UK/lagtraj | lagtraj/trajectory/__init__.py | __init__.py | py | 2,892 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "d... |
35754362412 | import pandas as pandas
import matplotlib.pyplot as pyplot
import numpy as numpy
import streamlit as st
import geopandas as gpd
import pydeck as pdk
from helpers.data import load_data, data_preprocessing, load_geo_data, geo_data_preprocessing
from helpers.viz import yearly_pollution, monthly_pollution, ranking_pollution, pollution_map
from helpers.model import pollution_prediction
DATA_PATH = 'pollution_us_2000_2016.csv'
st.title("Analysis of US Pollution between 2000 and 2016, focusing on California")
# Read Data
df = load_data(DATA_PATH,145000)
st.header('Raw data')
st.dataframe(df)
# Clean Data
st.header('Data Preprocessing')
df_cleaned = data_preprocessing(df.copy())
st.subheader('Cleaned data')
st.dataframe(df_cleaned)
# Data Visualization
st.header('Data Visualization')
st.sidebar.title('Filters')
pollutant = st.sidebar.selectbox('Pollutant', ["NO2Mean", "SO2Mean", "O3Mean", "COMean"])
cali = st.sidebar.checkbox('Cali Data Only')
values = st.sidebar.checkbox('Show Data Values')
# Yearly plot
st.subheader('Yearly pollution change')
st.markdown(f"__{pollutant} in {'California' if cali else 'the US'} by year between 2000 and 2016__")
yearly_pollution_chart = yearly_pollution(df_cleaned, pollutant, cali, values)
st.pyplot(yearly_pollution_chart)
# Monthly plot
st.subheader('Monthly pollution change')
st.markdown(f"__{pollutant} in {'California' if cali else 'the US'} by month between 2000 and 2016__")
monthly_pollution_chart = monthly_pollution(df_cleaned, pollutant, cali, values)
st.pyplot(monthly_pollution_chart)
# Ranking plot
st.subheader('State rankings')
st.markdown(f"__Top 30 {pollutant} rankings in the US__")
ranking_pollution_chart = ranking_pollution(df_cleaned, pollutant, values)
st.pyplot(ranking_pollution_chart)
# Modeling
st.subheader('Prediction Model')
st.markdown(f"__{pollutant} predictions until 2026 in {'California' if cali else 'the US'}__")
prediction_model = pollution_prediction(df_cleaned, pollutant, cali, values)
st.pyplot(prediction_model)
# Data Mapping
st.header('Data Mapping')
GEO_DATA_PATH = 'geo_data.json'
# Read Data
geo_data = load_geo_data(GEO_DATA_PATH)
st.subheader('Raw Geo Data (sample of 3)')
st.write(geo_data.sample(3))
# Clean and merge data
st.subheader('Geo data Preprocessing: Cleaned and Merged Geo data (sample of 3)')
merged = geo_data_preprocessing(geo_data.copy(), df_cleaned.copy())
st.write(merged)
# Map data
st.subheader('Mapped data')
st.markdown(f"__US {pollutant} Averages from 2000 to 2016__")
COLOR_BREWER_BLUE_SCALE = [
[240, 249, 232],
[204, 235, 197],
[168, 221, 181],
[123, 204, 196],
[67, 162, 202],
[8, 104, 172],
]
NO2Mean = pdk.Layer(
"HeatmapLayer",
data=merged,
opacity=0.9,
get_position=["long", "lat"],
aggregation=pdk.types.String("MEAN"),
color_range=COLOR_BREWER_BLUE_SCALE,
threshold=1,
get_weight="NO2Mean",
pickable=True,
)
SO2Mean = pdk.Layer(
"ColumnLayer",
data=merged,
get_position=["long", "lat"],
get_elevation="SO2Mean",
elevation_scale=100,
radius=50,
get_fill_color=[180, 0, 200, 140],
pickable=True,
auto_highlight=True,
)
st.pydeck_chart(pdk.Deck(
map_style='mapbox://styles/mapbox/light-v9',
initial_view_state=pdk.ViewState(
latitude=37.6000,
longitude=-95.6650,
zoom=5,
pitch=50,
),
layers=[NO2Mean]
)) | natalie-cheng/pollution-project | main.py | main.py | py | 3,405 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.title",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "helpers.data.load_data",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.dat... |
20395581562 | """empty message
Revision ID: fbfbb357547c
Revises: 2152db7558b2
Create Date: 2021-05-07 17:56:36.699948
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fbfbb357547c'
down_revision = '2152db7558b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('finished', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'finished')
# ### end Alembic commands ###
| metalsalmon/remote_monitoring | migrations/versions/fbfbb357547c_.py | fbfbb357547c_.py | py | 653 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTim... |
22893421369 | # -*- coding: utf-8 -*-
from collective.transmogrifier.interfaces import ISection
from collective.transmogrifier.interfaces import ISectionBlueprint
from collective.transmogrifier.utils import resolvePackageReferenceOrFile
from zope.interface import classProvides
from zope.interface import implements
import os
try:
import json
except ImportError:
import simplejson as json
DATAFIELD = '_datafield_'
class JSONSource(object):
"""
"""
classProvides(ISectionBlueprint)
implements(ISection)
def __init__(self, transmogrifier, name, options, previous):
self.transmogrifier = transmogrifier
self.name = name
self.options = options
self.previous = previous
self.context = transmogrifier.context
self.path = resolvePackageReferenceOrFile(options['path'])
if self.path is None or not os.path.isdir(self.path):
raise Exception('Path (' + str(self.path) + ') does not exists.')
self.datafield_prefix = options.get('datafield-prefix', DATAFIELD)
def __iter__(self):
for item in self.previous:
yield item
for item3 in sorted([
int(i) for i in os.listdir(self.path) if not i.startswith('.')
]):
for item2 in sorted([
int(j[:-5])
for j in os.listdir(os.path.join(self.path, str(item3)))
if j.endswith('.json')
]):
f = open(os.path.join(
self.path, str(item3), '%s.json' % item2
))
item = json.loads(f.read())
f.close()
yield item
| eikichi18/collective.jsonmigrator | collective/jsonmigrator/blueprints/source_json.py | source_json.py | py | 1,658 | python | en | code | null | github-code | 6 | [
{
"api_name": "zope.interface.classProvides",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collective.transmogrifier.interfaces.ISectionBlueprint",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "zope.interface.implements",
"line_number": 23,
"u... |
70799503229 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import requests
import random
from itertools import count
# Request fails unless we provide a user-agent
api_response = requests.get('https://api.thevirustracker.com/free-api?countryTimeline=US', headers={"User-Agent": "Chrome"})
covid_stats = api_response.json()['timelineitems']
# Break out individual stats
date= []
deaths = []
daily_deaths =[]
total_casesL = []
daily_cases = []
for i in covid_stats:
print (i)
del i['stat']
for c_date, info in i.items():
print("\nDate:", c_date)
date.append(c_date)
print ('Total Cases:',info['total_cases'])
total_casesL.append(info['total_cases'])
print ('New Cases:',info['new_daily_cases'])
daily_cases.append(info['new_daily_cases'])
daily_deaths.append(info['new_daily_deaths'])
deaths.append(info['total_deaths'])
print(total_casesL)
print(daily_cases)
print(daily_deaths)
print (date)
print (plt.style.available)
death_rate = deaths[-1]/total_casesL[-1]
c = str(death_rate)
print('Death rate: ' + c[1:6])
y = np.arange(len(date))
plt.plot(y,total_casesL, label = 'Cases', marker = '.', linewidth=3 )
plt.plot(y,daily_cases , 'y', label = 'New Cases', linestyle = '--', )
plt.plot(y, daily_deaths, 'k', label = 'New Deaths' )
plt.plot(y, deaths, color = 'r', label = 'Deaths' , )
plt.ylabel('People')
plt.xlabel('Days')
plt.title('Covid 19 Cases (USA)')
plt.tight_layout()
plt.grid(True)
plt.legend()
plt.show()
| it2515/Covid-19 | Covid19.py | Covid19.py | py | 1,590 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.ara... |
43462667811 | import pyshorteners
def shorten(url):
link = pyshorteners.Shortener()
return link.tinyurl.short(url)
if __name__ == "__main__":
url = input("Enter link for sorting:")
print(f"\n {shorten(url)}")
# https://github.com/urmil89
| urmil404/url-Sorter | main.py | main.py | py | 245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyshorteners.Shortener",
"line_number": 5,
"usage_type": "call"
}
] |
13322067740 | import sys
import getopt
import time
import random
import os
import math
import Checksum
import BasicSender
'''
This is a skeleton sender class. Create a fantastic transport protocol here.
'''
class Sender(BasicSender.BasicSender):
def __init__(self, dest, port, filename, debug=False):
super(Sender, self).__init__(dest, port, filename, debug)
def handle_response(self,response_packet):
if Checksum.validate_checksum(response_packet):
print("recv: %s" % response_packet)
else:
print("recv: %s <--- CHECKSUM FAILED" % response_packet)
# Main sending loop.
def start(self):
seqno = 0
msg = self.infile.read(500).decode()
msg_type = None
while not msg_type == 'end':
next_msg = self.infile.read(500).decode()
msg_type = 'data'
if seqno == 0:
msg_type = 'start'
elif next_msg == "":
msg_type = 'end'
packet = self.make_packet(msg_type,seqno,msg)
self.send(packet.encode())
print("sent: %s" % packet)
##### your code goes here ... #####
# your code should be able to handle packet
# 1. loss
# 2. corruption
# 3. duplication
# 4. delay
# add new functions as necessary
response = self.receive()
resp_str = response.decode()
self.handle_response(resp_str)
##### your code ends here ... #####
msg = next_msg
seqno += 1
self.infile.close()
'''
This will be run if you run this script from the command line. You should not
change any of this; the grader may rely on the behavior here to test your
submission.
'''
if __name__ == "__main__":
def usage():
print("BEARDOWN-TP Sender")
print("-f FILE | --file=FILE The file to transfer; if empty reads from STDIN")
print("-p PORT | --port=PORT The destination port, defaults to 33122")
print("-a ADDRESS | --address=ADDRESS The receiver address or hostname, defaults to localhost")
print("-d | --debug Print debug messages")
print("-h | --help Print this usage message")
try:
opts, args = getopt.getopt(sys.argv[1:],
"f:p:a:d", ["file=", "port=", "address=", "debug="])
except:
usage()
exit()
port = 33122
dest = "localhost"
filename = None
debug = False
for o,a in opts:
if o in ("-f", "--file="):
filename = a
elif o in ("-p", "--port="):
port = int(a)
elif o in ("-a", "--address="):
dest = a
elif o in ("-d", "--debug="):
debug = True
s = Sender(dest,port,filename,debug)
try:
s.start()
except (KeyboardInterrupt, SystemExit):
exit()
| weichen-ua/MIS543O_Project2 | Sender.py | Sender.py | py | 2,910 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "BasicSender.BasicSender",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "Checksum.validate_checksum",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "getopt.getopt",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": ... |
71634339387 | from flask import Flask
import requests
URL="https://en.wikipedia.org/w/api.php"
app = Flask(__name__)
#configuring the server name as required
app.config['SERVER_NAME'] = "wiki-search.com:5000"
@app.route("/")
def home():
return 'Enter you query as the subdomain.'
@app.route('/', subdomain="<SEARCHPAGE>")
#function that searches for the URL
def url_search(SEARCHPAGE):
if SEARCHPAGE is None:
return 'Enter you search query as the subdomain'
title_data = requests.get(URL, params={
"action": "query",
"format": "json",
"list": "search",
"srsearch": SEARCHPAGE}).json()
#creating a list named titles and appending the titles of every search result into it
titles = []
for title in title_data['query']['search']:
titles.append(title['title'])
#creating a list named urls to which the url of every title is appended
urls = []
for title in titles:
url_data = requests.get(URL, params={
"action": "query",
"format": "json",
"titles": title,
"prop": "info",
"inprop": "url"}).json()
for key in url_data['query']['pages']:
urls.append(url_data['query']['pages'][key]['fullurl'])
#creating a dictionary that contains the links appended as a list to the key named links
results={"links":urls}
return results
if __name__ == '__main__':
app.run(debug=True,port=5000)
| jubinjacob93/Opensearch-Server | wiksearch.py | wiksearch.py | py | 1,513 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
}
] |
40333923387 | import numpy as np
import wave
import pyaudio
from scipy.io import wavfile
from scipy import interpolate
import math
import matplotlib.pyplot as plt
#MaxVal = 2147483647
MaxVal = 2147483647
#found relavant blog post:
#http://yehar.com/blog/?p=197
def clippingFunction(inSample):
threshold = MaxVal #maximum 24 bit output
outSample = threshold - threshold/(abs(inSample) + math.sqrt(threshold) + 1) ** 2
#return 1
return outSample * np.sign(inSample) #preserves sign
def softClip(sampleArr):
numSamples = len(sampleArr)
sampleArrOut = [[0] * 2 for i in range(numSamples)]
for i in range(numSamples):
sampleArrOut[i][0] = clippingFunction(sampleArr[i][0])
sampleArrOut[i][1] = clippingFunction(sampleArr[i][1])
return sampleArrOut
def main():
#testAudioIn = 'sinC2'
testAudioIn = 'flume test'
fileName = 'TestAudioIn/' + testAudioIn + '.wav'
sampleRate, sampleArr = wavfile.read(fileName)
stepSize = 500
#sampleArrClipped = softClip(sampleArr)
#wavfile.write("test.wav", sampleRate, np.array(sampleArrClipped)) #need to convert to a numpy array for this function
for i in range(1, 500+1, 50):
(splineEval, skipNValues, linSpace) = applySpline(sampleArr, i)
wavfile.write("TestAudioOut/" "" + testAudioIn + "_" + str(int(i/50)) + ".mp3", sampleRate, np.array(splineEval)) #need to convert to a numpy array for this function
#graphSignal(sampleArr)
#graphSignal([[i*2 + 1, i*2 + 1] for i in range(10)])
#graphSignal([sampleArr, sFlat])
print("File Name:", fileName)
print("Frame Rate:", sampleRate)
#print("Sample Array In:", sampleArr[0:100])
#print("Sample Array Out :", sampleArrClipped[0:100])
graphSignal(sampleArr, stepSize)
def applySpline(sampleArrs, stepSize):
extractedChannel0 = list(map(lambda x: x[0]/MaxVal, sampleArrs))
skipNValues = extractedChannel0[::stepSize]
linSpace = list(range(0, len(extractedChannel0), stepSize))
interpolationSpace = list(range(0, len(extractedChannel0)))
splineRep = interpolate.splrep(linSpace, skipNValues, s="0")
splineEval = interpolate.splev(interpolationSpace, splineRep)
return (splineEval, skipNValues, linSpace)
def graphSignal(sampleArrs, stepSize):
(splineEval, skipNValues, linSpace) = applySpline(sampleArrs, stepSize)
plt.plot(splineEval)
plt.plot(linSpace, skipNValues, marker = "x", linestyle = 'None')
plt.axis([0, 10000, -1, 1])
plt.show()
main()
'''
Cades Clipper
yOut = threshold - frac(threshold)(yIn +1)^power
Sigmoid Clipper
yOut = (2*threshold)/1+e^(power*-yIn) - threshold
Bounce Clipper:
Recursively mirrors yIn over threshold until yOut is inbetween the threshold values.
'''
'''
The following is tests regarding using the wave library
with wave.open('TestAudioIn/silence.wav', 'rb') as inFile:
print ( "Number of channels",inFile.getnchannels())
print ( "Sample width",inFile.getsampwidth())
print ( "Frame rate.",inFile.getframerate())
print ( "Number of frames",inFile.getnframes())
print ( "parameters:",inFile.getparams())
samplerate, data = wavfile.read('TestAudioIn/silence.wav')
frame = inFile.setpos(100)
f1 = inFile.readframes(1)
f1Int = int.from_bytes(f1, "big")
frame = inFile.setpos(50)
f2 = inFile.readframes(1)
f2Int = int.from_bytes(f2, "big")
#print(frames)
#print( f1Int)
#print( f2Int)
'''
| theshieber/Spline-Filter | splinefilterPOC.py | splinefilterPOC.py | py | 3,288 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"li... |
22877254400 | import matplotlib.pyplot as plt
import math
for number in range(0,15,5):
formatString = "%0.1f" % (number/10.0)
filename = "data/stats_2000n_"+formatString+"00000th_"+str(int(number/10) + 1)+"00000times_0.600000kmin_0.200000kstep_2.000000kmax_10statsize.dat"
f = open(filename, 'r')
headers = f.readline().replace('\n','').split('\t')
x = []
y = []
for line in f:
datum = line.replace('\n','').split('\t')
x.append(float(datum[0]))
y.append(float(datum[1]))
f.close()
if number == 0:
plt.scatter(x,y, c='r', marker='s', label="theta = {0}".format(formatString))
elif number == 5:
plt.scatter(x,y, c='g', marker='^', label="theta = {0}".format(formatString))
else:
plt.scatter(x,y, c='b', marker='o', label="theta = {0}".format(formatString))
plt.title("Size of Largest Component: theta = 0, 0.5, 1")
plt.xlabel("Average Degree")
plt.ylabel("Fraction of Vertices in Largest Component")
##plt.xlim(0, 2)
##plt.ylim(0,1)
plt.legend(loc=4)
plt.savefig("data/degree_model_lc_2000n.pdf")
| vitchyr/Research-in-Math | degree_model/data_analysis.py | data_analysis.py | py | 1,119 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": ... |
38750349973 | from imagenet_c import *
from torchvision.datasets import ImageNet
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import os
import torch
import gorilla
DATA_ROOT = './data'
CORRUPTION_PATH = './corruption'
corruption_tuple = (gaussian_noise, shot_noise, impulse_noise, defocus_blur,
glass_blur, motion_blur, zoom_blur, snow, frost, fog,
brightness, contrast, elastic_transform, pixelate, jpeg_compression)
corruption_dict = {corr_func.__name__: corr_func for corr_func in corruption_tuple}
class corrupt(object):
def __init__(self, corruption_name, severity=5):
self.corruption_name = corruption_name
self.severity = severity
return
def __call__(self, x):
# x: PIL.Image
x_corrupted = corruption_dict[self.corruption_name](x, self.severity)
return np.uint8(x_corrupted)
def __repr__(self):
return "Corruption(name=" + self.corruption_name + ", severity=" + str(self.severity) + ")"
if os.path.exists(os.path.join(DATA_ROOT, CORRUPTION_PATH)) is False:
os.mkdir(os.path.join(DATA_ROOT, CORRUPTION_PATH))
for corruption in corruption_dict.keys():
if os.path.exists(os.path.join(DATA_ROOT, CORRUPTION_PATH, corruption + '.pth')):
continue
print(corruption)
val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
corrupt(corruption, 5)
])
target_dataset = ImageNet(DATA_ROOT, 'val', transform=val_transform)
target_dataloader = DataLoader(target_dataset, batch_size=256, shuffle=False, drop_last=False, num_workers=2)
datas = []
for batch in gorilla.track(target_dataloader):
datas.append(batch[0])
datas = torch.concat(datas)
torch.save(datas, os.path.join(DATA_ROOT, CORRUPTION_PATH, corruption + '.pth'))
| Gorilla-Lab-SCUT/TTAC | imagenet/utils/create_corruption_dataset.py | create_corruption_dataset.py | py | 1,892 | python | en | code | 37 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number"... |
30814400750 | __author__ = "https://github.com/kdha0727"
import os
import functools
import contextlib
import torch
import torch.distributed as dist
from torch.cuda import is_available as _cuda_available
RANK = 0
WORLD_SIZE = 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Setup Tools #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def is_initialized():
# if pytorch isn't compiled with c10d, is_initialized is omitted from namespace.
# this function wraps
"""
Returns c10d (distributed) runtime is initialized.
"""
return dist.is_available() and getattr(dist, "is_initialized", lambda: False)()
def setup_dist(temp_dir, rank, world_size):
"""
Set up a distributed process group.
"""
if is_initialized():
return True
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
dist.init_process_group(
backend='gloo', init_method=init_method, rank=rank, world_size=world_size)
else:
init_method = f'file://{init_file}'
dist.init_process_group(
backend='nccl', init_method=init_method, rank=rank, world_size=world_size)
global RANK, WORLD_SIZE
RANK = rank
WORLD_SIZE = world_size
torch.cuda.set_device(dev())
torch.cuda.empty_cache()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# General Tools #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@functools.lru_cache(maxsize=None)
def get_rank(group=None):
if group is not None and is_initialized():
return dist.get_rank(group=group)
return RANK
@functools.lru_cache(maxsize=None)
def get_world_size(group=None):
if group is not None and is_initialized():
return dist.get_world_size(group=group)
return WORLD_SIZE
def barrier(*args, **kwargs):
if is_initialized():
return dist.barrier(*args, **kwargs)
@contextlib.contextmanager
def synchronized_ops():
barrier()
yield
barrier()
return
@functools.lru_cache(maxsize=None)
def dev(group=None):
"""
Get the device to use for torch.distributed.
"""
if _cuda_available():
return torch.device(get_rank(group))
return torch.device("cpu")
def load_state_dict(local_or_remote_path, **kwargs):
"""
Load a PyTorch file.
"""
with open(local_or_remote_path, "rb") as f:
return torch.load(f, **kwargs)
def broadcast(tensor, src=0, group=None, async_op=False):
"""
Synchronize a Tensor across ranks from {src} rank. (default=0)
:param tensor: torch.Tensor.
:param src: source rank to sync params from. default is 0.
:param group:
:param async_op:
"""
if not is_initialized():
return
with torch.no_grad():
dist.broadcast(tensor, src, group=group, async_op=async_op)
def sync_params(params, src=0, group=None, async_op=False):
"""
Synchronize a sequence of Tensors across ranks from {src} rank. (default=0)
:param params: Sequence of torch.Tensor.
:param src: source rank to sync params from. default is 0.
:param group:
:param async_op:
"""
if not is_initialized():
return
for p in params:
broadcast(p, src, group=group, async_op=async_op)
| studio-YAIVERSE/studio-YAIVERSE | dist_util.py | dist_util.py | py | 3,621 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "torch.distributed.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.p... |
18490735964 | import boto3
import json
import uuid
print('Loading function')
def lambda_handler(event, context):
bucketName = event['Records'][0]['s3']['bucket']['name']
fileName = event['Records'][0]['s3']['object']['key']
return detect_labels_and_put_dynamoDB(fileName, bucketName)
def detect_labels_and_put_dynamoDB(photo, bucket):
rekognitionClient=boto3.client('rekognition', 'us-east-2')
dynamoClient = boto3.client('dynamodb')
response = rekognitionClient.detect_labels(Image={'S3Object':{'Bucket':bucket,'Name':photo}},
MaxLabels=10)
print('Detected labels for ' + photo)
for label in response['Labels']:
dynamoClient.put_item(
TableName='RekognitionDetails',
Item= {
'ID' : {
'S': str(uuid.uuid4())
},
'Filename': {
'S': photo
},
'Category': {
'S' : label['Name']
},
'Confidence': {
'N': str(label['Confidence'])
}
})
def main():
photo=''
bucket=''
label_count=detect_labels(photo, bucket)
print("Labels detected: " + str(label_count))
| Samir42/RekognitionService | RekognitionLambda.py | RekognitionLambda.py | py | 1,249 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 25,
"usage_type": "call"
}
] |
25144655510 | # pylint: disable=W0611, E0401
"""
Main goal of this module is to scrape and parse data from "visityerevan.am" website
"""
import logging
import sys
from dataclasses import dataclass
from urllib.parse import urljoin
from httpx import Client
from selectolax.parser import HTMLParser, Node
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARNING)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d:%(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
HEADERS = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/109.0.0.0 Safari/537.36"}
@dataclass
class Event:
""" Class contains all info about event """
title: str
description: str
url_to_original: str
time: str
price: str
img: str
@dataclass
class Response:
""" Class contains html of page and info about existing of the next page """
body_html: HTMLParser
status_code: int
def serialize_event(event):
""" Resulting format for each event """
return {
"id": "work in progress...",
"type": "parsed_v1",
"parserName": "visityerevan",
"title": event.title,
"description": event.description,
"date": event.time,
"durationInSeconds": 0,
"location": {
"country": "Armenia",
"city": "Erevan",
},
"image": event.img,
"price": {
"amount": event.price,
"currency": "AMD"
},
"timezone": {
"timezoneName": "AMT",
"timezoneOffset": "UTC +4",
},
"url": event.url_to_original,
}
def get_page(client: Client, url: str) -> Response:
""" Scrape html from page and check if next pages appears """
resp = client.get(url, headers=HEADERS)
html = HTMLParser(resp.text)
return Response(body_html=html, status_code=resp.status_code)
def get_pages_amount(client: Client, url: str) -> int:
""" func to get number of pages with events """
resp = client.get(url, headers=HEADERS)
html = HTMLParser(resp.text)
pages_amount = html.css("ul[class='pagination justify-content-center'] >" +
"li[class='page-item']")[-1:][0].text()
return int(pages_amount)
def is_valid(data):
""" Helps us to catch website's structure changes """
if data is None:
logger.warning(
"Seems that website changed structure. Please recheck code and website")
return False
else:
return True
def parse_detail(blocks: list) -> list:
""" Clean and prepare all data that we need """
result = []
# In this loop we will extract all
# Info that we can from each event's div
for block in blocks:
# Extract and prepare "time"
month_day = block.css_first(
"div[class='col-12 mt-n1'] > div")
# Need validate data each parsing attempt
if is_valid(month_day):
month_day = month_day.text().replace('\n', '').strip()
time = block.css_first(
"div[class='text-grey text-md mb-2']")
if is_valid(time):
time = time.text().replace('\n', '').strip().split(' ')
cleaned_time = f"{month_day} {time[-1:][0]}"
else:
cleaned_time = None
# Extract and prepare "description"
description = block.css_first("p")
if is_valid(description):
description = description.text().strip()
# Clean and prepare "url"
url = block.css_first("a").attrs["href"]
if is_valid(url):
url = "https://www.visityerevan.am" + url
# Extract price
price = ''
cards = block.css("p.card-text > span")
if len(cards) == 0:
logger.warning(
"Seems that website changed structure. Please recheck code and website")
else:
for card in cards:
card = card.text()
if "AMD" in card:
price = card.replace("AMD", "").strip()
else:
price = "no info"
# Extract img link
img = block.css_first("img").attrs["src"]
if is_valid(img):
img = "https://www.visityerevan.am" + img
# There is not need in cleaning "title"
# With data we have create a new event object
event = Event(
title=block.css_first("h5").text(),
description=description,
url_to_original=url,
time=cleaned_time,
price=price,
img=img
)
result.append(serialize_event(event))
return result
def scrape_blocks(html: HTMLParser) -> list:
""" Getting all divs with information from page """
blocks = html.css("div[class='row px-lg-7']" +
" > div")
return blocks
def pagination_loop(client: Client) -> list:
""" Loop through all pages """
url = "https://www.visityerevan.am/browse/things-to-do-events/ru/"
# How many pages we will scrape
pages_amount = get_pages_amount(client, url)
# Blocks contains all divs that we need
blocks = []
# Iterating through all pages
for page_number in range(1, pages_amount + 1):
# Mutating a url to get page with current page number
url = urljoin(url, f"?sel_filters=¤t_page={page_number}")
# Get object with scraped html markup from current page
page = get_page(client, url)
# Grad all divs with events data and append to list
blocks += scrape_blocks(page.body_html)
# Scraping is done, time to close session
client.close()
return blocks
async def scrape_website() -> list:
""" Main function which contains all logic """
# Start a new session
client = Client()
# Create list with all divs which contain info about events
all_blocks = pagination_loop(client)
# Parsing data from divs
parsed_data = parse_detail(all_blocks)
return parsed_data
| EPguitars/events-parsing-archive | standalone/scraper_visityerevan.py | scraper_visityerevan.py | py | 6,391 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.St... |
72279625789 | import firebase_admin
import googleapiclient
from firebase_admin import credentials
from firebase_admin import db
import os
from os.path import join, dirname
from dotenv import load_dotenv
from XmlParser import XmlParser
class FirebaseService:
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
DATABASE_URL = os.environ.get("DATABASE_URL")
if not firebase_admin._apps:
print('初期化')
cred = credentials.Certificate('hologram-test-firebase-adminsdk.json')
firebase_admin.initialize_app(cred, {
'databaseURL': DATABASE_URL,
})
ref_db = db.reference('/video')
def __init__(self, video_item):
self.video_item = video_item
# FirestoreのドキュメントIDを取得
def get_db_id(self):
print("FirebaseService", 'get_db_id')
id_list = []
key_val = self.ref_db.get()
# DB上に書き込まれたアイテムのvideoIdを取得
for key, val in key_val.items():
id_list.append(key)
return id_list
#
def write_video_item(self):
print("FirebaseService", "write_video_item")
self.ref_db.update(self.video_item)
def delete_video_item(self, update_db_items, xml_video_ids, error_channel_ids):
print("FirebaseService", 'update_db_items', len(update_db_items), update_db_items)
print("FirebaseService", 'xml_video_ids', len(xml_video_ids), xml_video_ids)
print("FirebaseService", 'error_channel_ids', len(error_channel_ids), error_channel_ids)
# 一週間以上まえのアイテムのみ抽出
# (更新後のDB上のアイテム)-(XMLで取得したアイテム)
last_week_ids = set(update_db_items).difference(set(xml_video_ids))
print('last_week_ids', last_week_ids)
for single_id in last_week_ids:
db_channelId = self.ref_db.child(f'{single_id}').child('channelId').get()
print('db_channel_id', db_channelId)
# dbから取得したアイテムのチャンネルIDがエラーが発生したチャンネルIDリストの中に含まれていなければ削除
# xml_parseで取得できなかったチャンネルの動画情報が削除されてしまうため
if db_channelId not in set(error_channel_ids):
print('delete', f'{single_id}')
self.ref_db.child(f'{single_id}').delete()
| CIDRA4023/Hologram-backend | FirebaseService.py | FirebaseService.py | py | 2,457 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
... |
71861349309 | import os
import sys
# 修改工作目录为程序所在目录,这样通过注册表实现开机自动启动时也能获取到正确的工作目录
# PS: 放到这个地方,是确保在所有其他初始化代码之前先修改掉工作目录
dirpath = os.path.dirname(os.path.realpath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(dirpath)
import argparse
import datetime
import time
from multiprocessing import freeze_support
import psutil
import ga
from check_first_run import check_first_run_async
from config import config, load_config
from db_def import try_migrate_db
from first_run import is_weekly_first_run
from log import color, log_directory, logger
from main_def import (
auto_send_cards,
check_all_skey_and_pskey,
check_djc_role_binding,
check_proxy,
check_update,
get_user_buy_info,
print_update_message_on_first_run_new_version,
run,
sas,
show_ask_message_box,
show_buy_info,
show_extra_infos,
show_lottery_status,
show_multiprocessing_info,
show_notices,
show_pay_info,
show_recommend_reward_tips,
try_auto_update,
try_auto_update_ignore_permission_on_special_case,
try_join_xinyue_team,
try_load_old_version_configs_from_user_data_dir,
try_report_usage_info,
try_save_configs_to_user_data_dir,
try_take_dnf_helper_chronicle_task_awards_again_after_all_accounts_run_once,
try_take_xinyue_team_award,
)
from pool import close_pool, init_pool
from qq_login import QQLogin
from show_usage import show_usage
from update import notify_manual_check_update_on_release_too_long
from usage_count import increase_counter
from util import (
MiB,
async_call,
async_message_box,
change_console_window_mode_async,
change_title,
clean_dir_to_size,
disable_pause_after_run,
disable_quick_edit_mode,
is_run_in_github_action,
kill_other_instance_on_start,
pause,
remove_old_version_portable_chrome_files,
show_head_line,
show_unexpected_exception_message,
)
from version import author, now_version, ver_time
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--no_max_console", default=False, action="store_true", help="是否不将窗口调整为最大化")
parser.add_argument(
"--wait_for_pid_exit",
default=0,
type=int,
help="启动后是否等待对应pid的进程结束后再启动,主要用于使用配置工具启动小助手的情况,只有配置工具退出运行,自动更新才能正常进行",
)
parser.add_argument("--max_wait_time", default=5, type=int, help="最大等待时间")
args = parser.parse_args()
return args
def prepare_env():
args = parse_args()
# 最大化窗口
if not args.no_max_console:
logger.info("尝试调整窗口显示模式,打包exe可能会运行的比较慢")
change_console_window_mode_async()
if args.wait_for_pid_exit != 0:
# 通过配置工具打开
increase_counter(ga_category="open_by", name="config_tool", ga_misc_params={"dr": "config_tool"})
logger.info(f"等待pid为{args.wait_for_pid_exit}的配置工具退出运行,从而确保可能有的自动更新能够正常进行,最大将等待{args.max_wait_time}秒")
wait_time = 0.0
retry_time = 0.1
while wait_time <= args.max_wait_time:
if not psutil.pid_exists(args.wait_for_pid_exit):
logger.info("配置工具已成功退出,将开始运行小助手~")
break
time.sleep(retry_time)
wait_time += retry_time
else:
# 直接打开
increase_counter(ga_category="open_by", name="directly", ga_misc_params={"dr": "directly"})
def main():
try_migrate_db()
increase_counter(name="run/begin", ga_type=ga.GA_REPORT_TYPE_PAGE_VIEW)
prepare_env()
# 启动时检查是否需要同步本机数据目录备份的旧版本配置
try_load_old_version_configs_from_user_data_dir()
change_title()
print_update_message_on_first_run_new_version()
logger.warning(f"开始运行DNF蚊子腿小助手,ver={now_version} {ver_time},powered by {author}")
logger.warning(color("fg_bold_cyan") + "如果觉得我的小工具对你有所帮助,想要支持一下我的话,可以帮忙宣传一下或打开付费指引/支持一下.png,扫码打赏哦~")
# 读取配置信息
load_config("config.toml", "config.toml.local")
cfg = config()
if len(cfg.account_configs) == 0:
raise Exception("未找到有效的账号配置,请检查是否正确配置。ps:多账号版本配置与旧版本不匹配,请重新配置")
try_auto_update_ignore_permission_on_special_case(cfg)
notify_manual_check_update_on_release_too_long(cfg.common)
check_proxy(cfg)
try_report_usage_info(cfg)
if cfg.common.disable_cmd_quick_edit:
disable_quick_edit_mode()
show_notices()
if cfg.common.allow_only_one_instance:
logger.info("当前仅允许单个实例运行,将尝试干掉其他实例~")
async_call(kill_other_instance_on_start)
else:
logger.info("当前允许多个实例同时运行~")
pool_size = cfg.get_pool_size()
init_pool(pool_size)
change_title(multiprocessing_pool_size=pool_size, enable_super_fast_mode=cfg.common.enable_super_fast_mode)
show_multiprocessing_info(cfg)
account_names = []
for account_cfg in cfg.account_configs:
account_names.append(account_cfg.name)
logger.info(f"当前共配置{len(account_names)}个账号,具体如下:{account_names}")
clean_dir_to_size(log_directory, cfg.common.max_logs_size * MiB, cfg.common.keep_logs_size * MiB)
clean_dir_to_size(f"utils/{log_directory}", cfg.common.max_logs_size * MiB, cfg.common.keep_logs_size * MiB)
current_chrome_version = QQLogin(cfg.common).get_chrome_major_version()
remove_old_version_portable_chrome_files(current_chrome_version)
show_ask_message_box(cfg)
# 检查是否有更新,用于提示未购买自动更新的朋友去手动更新~
if cfg.common.check_update_on_start:
check_update(cfg)
check_all_skey_and_pskey(cfg)
check_djc_role_binding()
# 确保道聚城绑定OK后在活动运行同时进行异步的弹窗提示
check_first_run_async(cfg)
# 挪到所有账号都登陆后再尝试自动更新,从而能够判定是否已购买DLC
try_auto_update(cfg)
# 查询付费信息供后面使用
show_head_line("查询付费信息")
logger.warning("开始查询付费信息,请稍候~")
user_buy_info = get_user_buy_info(cfg.get_qq_accounts())
show_buy_info(user_buy_info, cfg, need_show_message_box=False)
sas(cfg, "启动时展示账号概览", user_buy_info)
# 预先尝试创建和加入固定队伍,从而每周第一次操作的心悦任务也能加到队伍积分中
try_join_xinyue_team(cfg, user_buy_info)
# 正式进行流程
run(cfg, user_buy_info)
try_take_dnf_helper_chronicle_task_awards_again_after_all_accounts_run_once(cfg, user_buy_info)
# 尝试领取心悦组队奖励
try_take_xinyue_team_award(cfg, user_buy_info)
# # 尝试派赛利亚出去打工
# try_xinyue_sailiyam_start_work(cfg)
# 活动开启关闭时调这个开关即可
enable_card_lottery = True
if enable_card_lottery:
auto_send_cards(cfg)
show_extra_infos(cfg)
sas(cfg, "运行完毕展示账号概览", user_buy_info)
if enable_card_lottery:
show_lottery_status("卡片赠送完毕后展示各账号抽卡卡片以及各礼包剩余可领取信息", cfg, need_show_tips=True)
show_pay_info(cfg)
show_recommend_reward_tips(user_buy_info)
# 显示小助手的使用概览
if cfg.common._show_usage:
show_usage()
# 运行结束展示下多进程信息
show_multiprocessing_info(cfg)
# 检查是否有更新,用于提示未购买自动更新的朋友去手动更新~
if cfg.common.check_update_on_end:
check_update(cfg)
# 运行完毕备份配置到本机数据目录
try_save_configs_to_user_data_dir()
increase_counter(name="run/end", ga_type=ga.GA_REPORT_TYPE_PAGE_VIEW)
show_head_line("运行完毕")
def main_wrapper():
freeze_support()
logger.info(color("bold_green") + f"已将工作目录设置为小助手所在目录:{dirpath},之前为:{old_path}")
try:
run_start_time = datetime.datetime.now()
main()
total_used_time = datetime.datetime.now() - run_start_time
logger.warning(color("fg_bold_yellow") + f"运行完成,共用时{total_used_time}")
# 如果总用时太高的情况时,尝试提示开启多进程和超快速模式
cfg = config()
if total_used_time > datetime.timedelta(minutes=10) and (
not cfg.common.enable_multiprocessing or not cfg.common.enable_super_fast_mode
):
msg = (
f"当前累计用时似乎很久({total_used_time}),是否要尝试多进程和超快速模式?\n"
"多进程模式下,将开启多个进程并行运行不同账号的领取流程\n"
"额外开启超快速模式,会进一步将不同账号的不同活动都异步领取,进一步加快领取速度\n"
"\n"
"如果需要开启,请打开配置工具,在【公共配置】tab中勾选【是否启用多进程功能】和【是否启用超快速模式(并行活动)】"
)
logger.warning(color("bold_cyan") + msg)
if is_weekly_first_run("用时过久提示"):
async_message_box(msg, "用时过久", print_log=False)
# 按照分钟级别来统计使用时长
total_minutes = int(total_used_time.total_seconds()) // 60
increase_counter(ga_category="run_used_time_minutes", name=total_minutes)
except Exception as e:
show_unexpected_exception_message(e)
# 如果在github action,则继续抛出异常
if is_run_in_github_action():
raise e
finally:
# 暂停一下,方便看结果
if not disable_pause_after_run() and not is_run_in_github_action():
async_call_close_pool_after_some_time()
pause()
close_pool()
def async_call_close_pool_after_some_time():
def _close():
wait_time = 10 * 60
logger.info(f"{wait_time} 秒后将自动关闭进程池,方便有足够时间查看进程池中触发的弹窗信息")
time.sleep(wait_time)
close_pool()
async_call(_close)
if __name__ == "__main__":
main_wrapper()
| fzls/djc_helper | main.py | main.py | py | 10,763 | python | zh | code | 319 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numbe... |
21897871134 | import got3
import pymongo
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# connect to mongo deamon
connection = pymongo.MongoClient("mongodb://localhost")
# connect to the collection called uber_tweets in the kubrick db
db = connection.kubrick.uberban_tweets
count = 0
try:
while True:
tweetCriteria = got3.manager.TweetCriteria().setSince("2017-09-22").setQuerySearch("uberban")
#tweetCriteria = got3.manager.TweetCriteria().setQuerySearch("uberban")
tweet = got3.manager.TweetManager.getTweets(tweetCriteria)[count]
sent = SentimentIntensityAnalyzer().polarity_scores(tweet.text)['compound']
print(tweet.text)
print(sent)
print(tweet.date)
db.insert_many([{"tweet": tweet.text, "sentiment": sent}])
count += 1
except:
print("tweet scrape ended with {no_tweets} tweets".format(no_tweets = count))
| JackJoeKul/cities-in-need | Old UberBan Tweets Scrape + Sentiment Analysis/old_tweets.py | old_tweets.py | py | 906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "got3.manager.TweetCriteria",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "got3.manager",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "got3.... |
35988325228 | import joblib
model = None
def init_model(
db,
model_themes_path='./flaskr/model/log_reg_themes',
model_cats_path='./flaskr/model/log_reg_cats'
):
global model
cur = db.cursor()
query = """
select id from theme order by id;
"""
cur.execute(query)
theme_ids = [id[0] for id in cur.fetchall()]
cur.close()
cur = db.cursor()
query = """
select id from category order by id;
"""
cur.execute(query)
cats_ids = [id[0] for id in cur.fetchall()]
cur.close()
model = Model(theme_ids, cats_ids, model_themes_path, model_cats_path)
def get_model():
return model
class Model:
def __init__(self, theme_ids, cats_id, model_themes_path, model_cats_path):
self.model_themes = joblib.load(model_themes_path)
self.model_cats = joblib.load(model_cats_path)
self.theme_ids = theme_ids
self.cats_ids = cats_id
def analyse_theme(self, text: str, detailed_text:str = None, probs_count=3):
probs = self.model_themes.predict_proba([text])[0]
if detailed_text:
d_probs = self.model_themes.predict_proba([detailed_text])[0]
probs = probs * d_probs
most_likely_probs = None
if probs_count <= 0:
most_likely_probs = [self.theme_ids[id]
for id in probs.argsort().tolist()[:][::-1]]
else:
most_likely_probs = [self.theme_ids[id]
for id in probs.argsort().tolist()[-probs_count:][::-1]]
return most_likely_probs
def analyse_cat(self, text: str, detailed_text:str = None):
probs = self.model_cats.predict_proba([text])[0]
if detailed_text:
d_probs = self.model_cats.predict_proba([detailed_text])[0]
probs = probs * d_probs
most_likely_probs = [self.cats_ids[id]
for id in probs.argsort().tolist()[:][::-1]]
return most_likely_probs
| dimayasha7123/kursach3 | flaskr/model/model.py | model.py | py | 1,979 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "joblib.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 39,
"usage_type": "call"
}
] |
3167027289 | #!/usr/bin/env python3
import random
from typing import Tuple
from functions.aes import AESCipher, pkcs7_pad, get_blocks, gen_random_bytes
def _encryption_oracle(bytes_: bytes) -> Tuple[bytes, str]:
key = gen_random_bytes(16)
iv = gen_random_bytes(16)
prefix = gen_random_bytes(random.randint(5, 10))
suffix = gen_random_bytes(random.randint(5, 10))
pt = prefix + bytes_ + suffix
cbc_mode = random.choice([True, False])
if cbc_mode:
cbc = AESCipher(AESCipher.MODE_CBC, key, iv=iv)
ct = cbc.encrypt(pkcs7_pad(pt))
answer = "cbc"
else:
ecb = AESCipher(AESCipher.MODE_ECB, key)
ct = ecb.encrypt(pkcs7_pad(pt))
answer = "ecb"
return ct, answer
def challenge11() -> bool:
pt = bytes(gen_random_bytes(1) * random.randint(100, 200))
ct, answer = _encryption_oracle(pt)
blocks = get_blocks(ct)
unique_blocks = len(set(blocks))
guess = "cbc" if len(blocks) == unique_blocks else "ecb"
return True if guess == answer else False
if __name__ == "__main__":
for _ in range(100):
assert challenge11(), "The result does not match the expected value"
print("Ok")
| svkirillov/cryptopals-python3 | cryptopals/set2/challenge11.py | challenge11.py | py | 1,185 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 12,
"usage_type": "call"
... |
24506033331 | from nose.tools import eq_
from mock import patch, Mock, sentinel
from noderunner.process import open_process
@patch("subprocess.Popen", return_value=sentinel.proc)
def test_open_process(p):
ret = open_process(sentinel.fd,
sentinel.secret,
nodepath=sentinel.node_path)
eq_(ret, sentinel.proc)
p.assert_called_once()
| williamhogman/noderunner | tests/test_process.py | test_process.py | py | 379 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "noderunner.process.open_process",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.fd",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "mock... |
14025841679 | """
@author: Yuhao Cheng
@contact: yuhao.cheng[at]outlook.com
"""
#!!!!! ignore the warning messages
import warnings
warnings.filterwarnings('ignore')
import os
import pickle
import math
import torch
import time
import numpy as np
from PIL import Image
from collections import OrderedDict
import torchvision.transforms as T
import torchvision.transforms.functional as tf
from torch.utils.data import DataLoader
from pyanomaly.core.utils import AverageMeter, flow_batch_estimate, tensorboard_vis_images, make_info_message, ParamSet
from pyanomaly.datatools.evaluate.utils import psnr_error
from ..abstract.base_engine import BaseTrainer, BaseInference
from ..engine_registry import ENGINE_REGISTRY
__all__ = ['MEMAETrainer', 'MEMAEInference']
@ENGINE_REGISTRY.register()
class MEMAETrainer(BaseTrainer):
NAME = ["MEMAE.TRAIN"]
def custom_setup(self):
# basic meter
self.loss_meter_MemAE = AverageMeter(name='loss_memae')
def train(self,current_step):
# Pytorch [N, C, D, H, W]
# initialize
start = time.time()
self.set_requires_grad(self.MemAE, True)
self.MemAE.train()
writer = self.kwargs['writer_dict']['writer']
global_steps = self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])]
# get the data
data, anno, meta = next(self._train_loader_iter) # the core for dataloader
self.data_time.update(time.time() - start)
input_data = data.cuda()
# True Process =================Start===================
output_rec, att = self.MemAE(input_data)
loss_rec = self.rec_loss(output_rec, input_data)
loss_mem = self.mem_loss(att)
loss_memae_all = self.loss_lamada['rec_loss'] * loss_rec + self.loss_lamada['mem_loss'] * loss_mem
# loss_memae_all = self.loss_lamada['rec_loss'] * loss_rec
self.optim_MemAE.zero_grad()
# with torch.autograd.set_detect_anomaly(True):
loss_memae_all.backward()
self.optim_MemAE.step()
self.loss_meter_MemAE.update(loss_memae_all.detach())
if self.config.TRAIN.adversarial.scheduler.use:
self.lr_memae.step()
# ======================End==================
self.batch_time.update(time.time() - start)
if (current_step % self.steps.param['log'] == 0):
msg = make_info_message(current_step, self.steps.param['max'], self.kwargs['model_type'], self.batch_time,
self.config.TRAIN.batch_size, self.data_time, [self.loss_meter_MemAE])
self.logger.info(msg)
writer.add_scalar('Train_loss_MemAE', self.loss_meter_MemAE.val, global_steps)
if (current_step % self.steps.param['vis'] == 0):
vis_objects = OrderedDict({
'train_output_rec_memeae': output_rec.detach(),
'train_input': input_data.detach()
})
tensorboard_vis_images(vis_objects, writer, global_steps, self.normalize.param['train'])
global_steps += 1
# reset start
start = time.time()
# self.saved_model = {'MemAE':self.MemAE}
self.saved_model['MemAE'] = self.MemAE
# self.saved_optimizer = {'optim_MemAE': self.optim_MemAE}
self.saved_optimizer['optimizer_MemAE']= self.optim_MemAE
# self.saved_loss = {'loss_MemAE':self.loss_meter_MemAE.val}
self.saved_loss['loss_MemAE'] = self.loss_meter_MemAE.val
self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])] = global_steps
@ENGINE_REGISTRY.register()
class MEMAEInference(BaseInference):
NAME = ["MEMAE.INFERENCE"]
def inference(self):
for h in self._hooks:
h.inference() | YuhaoCheng/PyAnomaly | pyanomaly/core/engine/functions/memae.py | memae.py | py | 3,913 | python | en | code | 107 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "abstract.base_engine.BaseTrainer",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pyanomaly.core.utils.AverageMeter",
"line_number": 33,
"usage_type": "call"
},
... |
30168009886 | import requests
import pandas as pd
import arrow
import warnings
import io
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import logging
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
url = "https://protect.cylance.com/Reports/ThreatDataReportV1/memoryprotection/"
token = "Token"
fullurl = (url + token)
path = 'Filepath'
logfile = 'FilePath'
nv = arrow.now()
date = nv.shift(days=-1).format('M/D/YYYY')
date2 = nv.shift(days=-1).format('YYYYMD')
def email_send(email_data):
from_addr = "EmailFrom"
to_addr = "EmailTo"
to_list = ["To_List"]
msg = MIMEMultipart()
msg['From'] = from_addr
msg['To'] = to_addr
msg['Subject'] = "Cylance Exploit Attempts for %s" %(date)
part2 = MIMEText(email_data, 'html')
msg.attach(part2)
server = smtplib.SMTP("smtpRelay", 25)
server.sendmail(from_addr,to_list,msg.as_string())
server.quit()
if __name__ == '__main__':
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
logging.info('Requesting MEM TDR')
urlData = requests.get(fullurl).content
rawData = pd.read_csv(io.StringIO(urlData.decode('utf-8')))
logging.info('Creating dataframe')
df2 = pd.DataFrame(rawData)
logging.info('Dropping Serial Column')
df3 = df2.drop(["Serial Number",], axis = 1)
logging.info('Filtering Data by date')
test3 = (df3[df3['ADDED'].str.contains(date)])
logging.info('Selecting Column Headers')
output = (test3[["Device Name","ADDED",'PROCESS NAME','ACTION','TYPE','USER NAME']])
print(output)
if output.empty:
logging.info('No Memory Exploit for %s' % (date))
else:
logging.info('Creating CSV')
output.to_csv(path + date2 + "mem.csv", index=False)
logging.info('CSV Created')
logging.info('Converting Data to HTML')
email_data = output.to_html(index = False)
logging.info('Preparing Email')
email_send(email_data)
logging.info('Email Sent')
| cmoxley1/Cylance | MemTDREmail.py | MemTDREmail.py | py | 2,128 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "arrow.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
27867118318 |
import tensorflow as tf
from utils.nn import linear
from .tdnn import TDNN
def embed_characters(input, vocab_size, embed_dim=40, scope=None, reuse=None,
use_batch_norm=True, use_highway=True, highway_layers=2):
""" Character-level embedding """
with tf.variable_scope(scope or 'Embedder') as scope:
if reuse: scope.reuse_variables()
input = tf.unpack(tf.transpose(input, [1, 0, 2])) # L * [N, W]
embedding = tf.get_variable('embedding', [vocab_size, embed_dim])
embedded = []
for word in input:
embed = tf.nn.embedding_lookup(embedding, word) # [N, W, d]
conved = TDNN(embed, embed_dim)
if use_batch_norm:
conved = batch_norm(conved)
if use_highway:
conved = highway(conved, conved.get_shape()[1], highway_layers, 0)
embedded.append(conved)
scope.reuse_variables()
return embedded
def batch_norm(x, epsilon=1e-5):
shape = x.get_shape().as_list()
with tf.variable_scope('BatchNorm'):
gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
mean, variance = tf.nn.moments(x, [0, 1])
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, beta, gamma, epsilon,
scale_after_normalization=True)
def highway(input_, size, layer_size=1, bias=-2, f=tf.nn.relu):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
with tf.variable_scope('Highway'):
output = input_
for idx in range(layer_size):
output = f(linear(output, size, 0, scope='output_lin_%d' % idx, init='he'))
transform_gate = tf.sigmoid(linear(input_, size, 0, scope='transform_lin_%d' % idx) + bias)
carry_gate = 1. - transform_gate
output = transform_gate * output + carry_gate * input_
return output | therne/logue | models/embedding.py | embedding.py | py | 2,203 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.variable_scope",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.unpack",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.transpose",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tens... |
6827552389 | """ https://adventofcode.com/2020/day/10 """
from typing import Dict, List
from collections import defaultdict
Adapters = List[int]
def part1(adapters: Adapters) -> int:
""" O(nLogn) solution """
jolts = 0
diffs: Dict[int, int] = defaultdict(int)
for adapter in sorted(adapters):
diffs[adapter - jolts] += 1
jolts = adapter
return diffs[1] * (diffs[3] + 1)
def part2(adapters: Adapters) -> int:
""" O(nLogn) solution """
adapters = sorted(adapters)
adapters = [0] + adapters + [max(adapters)+3]
paths = {adapters[0]: 1}
for x in adapters[1:]:
paths[x] = sum(paths[x - y] for y in range(1, 4) if x - y in paths)
return paths[adapters[-1]]
if __name__ == "__main__":
TEST1 = [int(line.strip()) for line in open("tests/d10.txt", "r")]
TEST2 = [int(line.strip()) for line in open("tests/d10_2.txt", "r")]
PUZZLE = [int(line.strip()) for line in open("puzzles/d10.txt", "r")]
assert part1(TEST1) == 35
assert part1(TEST2) == 220
assert part2(TEST1) == 8
assert part2(TEST2) == 19208
print(f"Part 1: {part1(PUZZLE)}")
print(f"Part 2: {part2(PUZZLE)}")
| pozhega/AoC | 2020/d10.py | d10.py | py | 1,164 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
}
] |
41236533985 | import rest_framework.authentication
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
from user.auth.auth import JwtQueryParamsAuthentication
schema_view = get_schema_view(
openapi.Info(
title="接口文档",
default_version="1.0",
terms_of_service='',
contact=openapi.Contact(name="Andy Z Wright", email="andyzwright021@gmail.com"),
license=openapi.License(name="MIT LICENCE"),
),
public=True,
permission_classes=(permissions.AllowAny,),
# authentication_classes=(JwtQueryParamsAuthentication,)
authentication_classes=(),
)
| beishangongzi/porcelain-backend | swagger_doc/views.py | views.py | py | 653 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dr... |
20157578435 | import numpy as np
import pandas as pd
import time
from metric import SampleScore,EventScore, AdjustedMutualInfoScore
from joblib import Parallel, delayed
class Experiment:
def __init__(self,algorithms:list, configurations:list, thresholds = np.linspace(0,1,101),njobs=1,verbose = True) -> None:
"""Initialization
Args:
algorithms (list): list of algorithm classes
configurations (list): list of list of configurations as dictionnaries for each algorithms classes
thresholds (np.ndarray, optional): numpy array of thresholds to consider for the event based metric. Defaults to numpy.linspace(0,1,101).
"""
self.algorithms = algorithms
self.configurations = configurations
self.thresholds = thresholds
self.njobs = njobs
self.verbose = verbose
def compute_scores(self,label,prediction):
single_pred = np.clip(np.sum(prediction,axis=0),0,1).reshape(1,-1)
single_label = np.clip(np.sum(label,axis=0),0,1).reshape(1,-1)
scores = []
#single sample score
p,r,f = SampleScore().score(single_label,single_pred)
scores.append(["sss-precision",p])
scores.append(["sss-recall",r])
scores.append(["sss-fscore",f])
#sample score
p,r,f = SampleScore().score(label,prediction)
scores.append(["ss-precision",p])
scores.append(["ss-recall",r])
scores.append(["ss-fscore",f])
# weigthed sample score
p,r,f = SampleScore(averaging="weighted").score(label,prediction)
scores.append(["w-ss-precision",p])
scores.append(["w-ss-recall",r])
scores.append(["w-ss-fscore",f])
#single event score
lp,lr,lf = EventScore().score(single_label,single_pred,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"ses-precision_{np.round(t,2)}",p])
scores.append([f"ses-recall_{np.round(t,2)}",r])
scores.append([f"ses-fscore_{np.round(t,2)}",f])
scores.append(["ses-auc-precision",np.mean(lp)])
scores.append(["ses-auc-recall",np.mean(lr)])
scores.append(["ses-auc-fscore",np.mean(lf)])
#event score
lp,lr,lf = EventScore().score(label,prediction,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"es-precision_{np.round(t,2)}",p])
scores.append([f"es-recall_{np.round(t,2)}",r])
scores.append([f"es-fscore_{np.round(t,2)}",f])
scores.append(["es-auc-precision",np.mean(lp)])
scores.append(["es-auc-recall",np.mean(lr)])
scores.append(["es-auc-fscore",np.mean(lf)])
# weighted event score
lp,lr,lf = EventScore(averaging="weighted").score(label,prediction,self.thresholds)
for t,p,r,f in zip(self.thresholds,lp,lr,lf):
scores.append([f"w-es-precision_{np.round(t,2)}",p])
scores.append([f"w-es-recall_{np.round(t,2)}",r])
scores.append([f"w-es-fscore_{np.round(t,2)}",f])
scores.append(["w-es-auc-precision",np.mean(lp)])
scores.append(["w-es-auc-recall",np.mean(lr)])
scores.append(["w-es-auc-fscore",np.mean(lf)])
#ajusted mutual information
scores.append(["amis",AdjustedMutualInfoScore().score(label,prediction)])
return scores
def signal_algo_class_experiement(self,signal_idx,signal,label,algo_class,config,config_idx):
"Return a DF"
#keep only labels row that are activated by the signal
label = label[label.sum(axis=1)>0]
#update the number of patterns to predict if required
t_config = config.copy()
if ("n_patterns" in t_config.keys()):
if (isinstance(t_config["n_patterns"],int)):
t_config["n_patterns"] = label.shape[0]
else:
t_config["n_patterns"] = None
try:
#get predictions
algo = algo_class(**t_config)
start = time.time()
algo.fit(signal)
end = time.time()
#compute scores
scores = self.compute_scores(label,algo.prediction_mask_)
tdf = pd.DataFrame(scores,columns=["metric","score"])
tdf["algorithm"] = algo_class.__name__
tdf["config_idx"] = config_idx
tdf["execution_time"] = end - start
tdf["signal_idx"] = signal_idx
tdf["n_patterns"] = label.shape[0]
tdf["predicted_n_patterns"] = algo.prediction_mask_.shape[0]
if self.verbose:
s1 = np.round(tdf[tdf["metric"] == "es-auc-fscore"].score.values[0],2)
s2 = np.round(tdf[tdf["metric"] == "amis"].score.values[0],2)
print(f"signal_id: {signal_idx}, algo: {algo_class.__name__}, config_id: {config_idx}, f-auc: {s1}, ami: {s2}")
return tdf
except:
s= f"signal_id: {signal_idx}, algo: {algo_class.__name__}, config_id: {config_idx} failed to fit."
if self.verbose:
print(s)
if self.logs_path_ is not None:
with open(self.logs_path_,"a") as f:
f.write(s +"\n")
def run_experiment(self,dataset:np.ndarray,labels:np.ndarray,backup_path = None,batch_size=10,logs_path = None,verbose = True)->np.ndarray:
"""_summary_
Args:
dataset (np.ndarray): array of signals, signal shape (L,), variable length allowed
labels (np.ndarray): array of labels, label shape (L,), variable length allowed
signal_configs (pd.DataFrame, optional): Dataframe containing the configuration of the synthetic generator for each signals.
backup_path (str, optional): Path to store df in case of big experiment. If None no saving. Defaults to None.
batch_size (int, optional)
verbose (bool, optional): verbose. Defaults to True.
Returns:
pd.DataFrame: scores_df
"""
self.logs_path_ = logs_path
n_signals = len(dataset)
n_configs = np.sum([len(conf) for conf in self.configurations])
total = n_signals*n_configs
if backup_path != None:
n_batches = n_signals//batch_size
if n_batches >0:
batches =[zip(dataset[i*batch_size:(i+1)*batch_size],labels[i*batch_size:(i+1)*batch_size]) for i in range(n_batches)]
else:
batches = []
if n_signals % batch_size !=0:
batches.append(zip(dataset[n_batches*batch_size:],labels[n_batches*batch_size:]))
else:
batches = [zip(dataset,labels)]
self.df_ = pd.DataFrame()
counts = 0
for batch in batches:
results = Parallel(n_jobs=self.njobs)(
delayed(self.signal_algo_class_experiement)(counts+id_s,signal,label,algo,config,id_c)
for id_s,(signal,label) in enumerate(batch)
for id_a,algo in enumerate(self.algorithms)
for id_c,config in enumerate(self.configurations[id_a])
)
counts = min(counts+batch_size,n_signals)
self.df_= pd.concat((self.df_,*results)).reset_index(drop = True)
self.df_ = self.df_.astype({'metric':str, "score":float, "algorithm":str,'config_idx':int,"signal_idx":int, "n_patterns":int, "predicted_n_patterns":int})
if backup_path != None:
self.df_.to_csv(backup_path)
if verbose:
print(f"Achieved [{counts*n_configs}/{total}]")
return self.df_ | thibaut-germain/lt-normalized | src/experiment.py | experiment.py | py | 7,754 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 3... |
21998584846 | from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
n = len(nums)
left = 0
right = n - 1
def get_num(i):
if i == -1 or i == n:
return float('-inf')
return nums[i]
ans = -1
while right >= left:
mid = left + (right - left) // 2
if get_num(mid - 1) < get_num(mid) > get_num(mid + 1):
return mid
elif get_num(mid) < get_num(mid + 1):
left = mid + 1
else:
right = mid - 1
return ans
| hangwudy/leetcode | 100-199/162. 寻找峰值.py | 162. 寻找峰值.py | py | 620 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
36153730574 | import boto3
import traceback
import datetime
import os
from botocore.exceptions import ClientError
from ..models.bucket import Bucket
from ..util.preprocessor import preprocess
"""
S3 functions
"""
def get_active_bucket_or_create_new(username):
"""
Returns the user's current active bucket. If there are no buckets,
or all the users buckets are full, a new one will be created first.
returns: Bucket object
"""
try:
# queries database for non-full buckets
range_cond = Bucket.sort.startswith("BUCKET_")
buckets = Bucket.query(hash_key=username, range_key_condition=range_cond)
# return existing if not full or create new
for buck in buckets:
if not buck.full:
return buck
else:
bucket = create_bucket(username)
return bucket
except Exception as e:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Could not query buckets in DB. {}".format(e),
}
return response_object, 500
def create_bucket(username, region="eu-central-1"):
"""
Creates an S3 bucket in S3.
Naming format: 'flasktextapi-{ENV}-{USERNAME}-BUCKET{BUCKET_ID}'
IMPORTANT: underscores in usernames are converted to dashes.
returns: bucket
"""
# create S3 bucket
try:
bucket_id = _generate_bucket_id(username)
username_conv = username.replace("_", "-")
bucket_name = "flasktextapi-{env}-{username}-bucket{id}".format(
env=os.getenv("BOILERPLATE_ENV"), username=username_conv, id=bucket_id
)
bucket = boto3.resource("s3").Bucket(bucket_name)
location = {"LocationConstraint": region}
response = bucket.create(CreateBucketConfiguration=location)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
try:
db_bucket = _create_db_bucket(username, bucket_id, bucket_name)
return db_bucket
except Exception:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Bucket created successfully but bucket details could not be stored to database.",
}
return response_object, 500
else:
response_object = {"status": "failed", "message": "could not create bucket"}
return response_object, 500
except Exception as e:
traceback.print_exc()
response_object = {
"status": "failed",
"message": "Could not create bucket. {}".format(e),
}
return response_object, 500
def add_file(username, input, bucket_name, id, region="eu-central-1"):
"""
Adds a text to an S3 bucket.
Naming format of file: 'unprocessed_{username}_{text.public_id}'
@return: Name of the file as String.
"""
# check input type
if not isinstance(input, str):
raise ValueError("Text needs to be a String.")
bucket = boto3.resource("s3").Bucket(bucket_name)
key = "unprocessed_{id}_{username}".format(id=id, username=username)
bucket.put_object(Body=bytes(input, "utf-8"), Key=key)
return key
def add_preprocessed_file(username, input, bucket_name, id, region="eu-central-1"):
"""
Adds a text to an S3 bucket.
Naming format of file: 'unprocessed_{username}_{text.public_id}'
@return: Name of the file as String.
"""
# check input type
if not isinstance(input, str):
raise ValueError("Text needs to be a String.")
# preprocess input
prepr_input = preprocess(input)
bucket = boto3.resource("s3").Bucket(bucket_name)
key = "preprocessed_{id}_{username}".format(id=id, username=username)
bucket.put_object(Body=bytes(prepr_input, "utf-8"), Key=key)
return key
def get_object(bucket_name, key):
"""
Fetches an object from S3.
returns: String
"""
s3 = boto3.resource("s3")
object = s3.Object(bucket_name, key)
return object.get()["Body"].read().decode("utf-8")
def delete_objects(username, bucket_id, objects):
"""
Deletes an object from an s3 bucket.
Returns: List of deleted objects
"""
db_bucket = Bucket.get(hash_key=username, range_key="BUCKET_{}".format(bucket_id))
bucket = boto3.resource("s3").Bucket(db_bucket.bucket_name)
delete_dict = {"Objects": [{"Key": name} for name in objects]}
response = bucket.delete_objects(Delete=delete_dict)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
deleted_items = []
for item in response["Deleted"]:
deleted_items.append(item["Key"])
return deleted_items
else:
deleted_items = []
return deleted_items
"""
DB Functions related to S3
"""
def _create_db_bucket(username, id, bucket_name):
new_bucket = Bucket(
username=username,
sort="BUCKET_{}".format(id),
bucket_name=bucket_name,
created_date=datetime.datetime.utcnow(),
public_id=id,
full=False,
)
new_bucket.save()
return new_bucket
"""
Helper functions
"""
def _generate_bucket_id(username):
full_buckets = Bucket.query(
hash_key=username, range_key_condition=Bucket.sort.startswith("BUCKET_")
)
new_id = 0
for buck in full_buckets:
if buck.public_id > new_id:
new_id = buck.public_id + 1
return new_id
def _bucket_full(bucket_name):
bucket = boto3.resource("s3").Bucket(bucket_name)
size = sum([object.size for object in bucket.objects.all()])
if size > 4990000000000:
return True
else:
return False
| jkausti/flask-textsapi | app/textsapi/service/s3buckets.py | s3buckets.py | py | 5,725 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "models.bucket.Bucket.sort.startswith",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.bucket.Bucket.sort",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.bucket.Bucket",
"line_number": 25,
"usage_type": "name"
},
... |
6854075721 | from flask import Flask,request,render_template,redirect, url_for
from flask import jsonify
import requests
from cassandra.cluster import Cluster
from collections import OrderedDict
app = Flask(__name__)
KEYSPACE = "twitterkeyspace"
@app.route('/', methods=['GET'])
def home():
if request.method == 'GET':
return render_template('lab.html',flag=0)
# Lab Query 1
@app.route('/labquery1', methods=['GET','POST'])
def labquery1():
if request.method == 'GET':
return render_template('lab.html',flag=0)
elif request.method == 'POST':
date = request.form['date1']
# print (date)
# Connecting cassandra session
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.default_timeout = 60
session.set_keyspace(KEYSPACE)
query = "SELECT author_id FROM tweet_table1 WHERE date = '{}'".format(date)
rows = session.execute(query)
session.execute("DROP TABLE IF EXISTS tweet_table_author_frequency")
session.execute("""
CREATE TABLE tweet_table_author_frequency (
author_id text,
frequency counter,
PRIMARY KEY (author_id)
)
""")
for row in rows:
if row.author_id:
query = "UPDATE tweet_table_author_frequency SET frequency = frequency + 1 WHERE author_id = '{}'".format(row.author_id)
session.execute(query)
rows = session.execute("SELECT * FROM tweet_table_author_frequency")
pop = {}
for row in rows:
pop.update({row.author_id : row.frequency})
res = sorted(pop,key=pop.get,reverse=True)
result = []
k = 0
for r in res:
temp = []
temp.append(date)
temp.append(r)
temp.append(pop[r])
result.append(temp)
k += 1
print (temp)
return render_template('lab.html',result=result,count=k,flag=1)
# Lab Query 2
@app.route('/labquery2', methods=['GET','POST'])
def labquery2():
if request.method == 'GET':
return render_template('lab.html',flag=0)
elif request.method == 'POST':
date = request.form['date2']
# print (date)
# Connecting cassandra session
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.default_timeout = 60
session.set_keyspace(KEYSPACE)
query = "SELECT hashtag, location FROM tweet_table2 WHERE date = '{}'".format(date)
rows = session.execute(query)
session.execute("DROP TABLE IF EXISTS tweet_table_hashtag_location")
session.execute("""
CREATE TABLE tweet_table_hashtag_location (
hashtag text,
location text,
frequency counter,
PRIMARY KEY ((hashtag,location))
)
""")
for row in rows:
if row.hashtag and row.location:
print (row.hashtag,row.location)
query = "UPDATE tweet_table_hashtag_location SET frequency = frequency + 1 WHERE hashtag = '{}'".format(row.hashtag) + "AND location = '{}'".format(row.location)
session.execute(query)
rows = session.execute("SELECT * FROM tweet_table_hashtag_location")
pop = {}
for row in rows:
pop.update({(row.hashtag,row.location) : row.frequency})
res = sorted(pop,key=pop.get,reverse=True)
result = []
k = 0
for r in res:
temp = []
temp.append(date)
temp.append(r[0])
temp.append(r[1])
temp.append(pop[r])
result.append(temp)
print (temp)
k += 1
return render_template('lab.html',result=result,count=k,flag=2)
else:
return render_template('lab.html',flag=0)
if __name__ == '__main__':
app.run(host='127.0.0.1',port=5000,debug=True) | piyush-jain1/Databases | Cassandra/Assignment2/app.py | app.py | py | 3,427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.render_templ... |
70078444988 | import logging
import math
import threading
import time
import torch
#import support.kernels as kernel_factory
from ...support.kernels import factory
from ...core import default
from ...core.model_tools.deformations.exponential import Exponential
from ...core.models.abstract_statistical_model import AbstractStatisticalModel
from ...core.models.model_functions import initialize_control_points, initialize_momenta
from ...core.observations.deformable_objects.deformable_multi_object import DeformableMultiObject
from ...in_out.array_readers_and_writers import *
from ...in_out.dataset_functions import create_template_metadata
#import support.utilities as utilities
from ...support.utilities import move_data, get_best_device
from .abstract_statistical_model import process_initial_data
import torch.nn.functional as F
logger = logging.getLogger(__name__)
# def _subject_attachment_and_regularity(arg):
# """
# Auxiliary function for multithreading (cannot be a class method).
# """
# from .abstract_statistical_model import process_initial_data
# if process_initial_data is None:
# raise RuntimeError('process_initial_data is not set !')
#
# # Read arguments.
# freeze_sparse_matrix = False
# (deformable_objects, multi_object_attachment, objects_noise_variance,
# freeze_template, freeze_control_points, freeze_momenta,
# exponential, sobolev_kernel, use_sobolev_gradient, tensor_scalar_type, gpu_mode) = process_initial_data
# (i, template, template_data, control_points, momenta, with_grad, momenta_t, sparse_matrix, alpha) = arg
#
# # start = time.perf_counter()
# device, device_id = get_best_device(gpu_mode=gpu_mode)
# # device, device_id = ('cpu', -1)
# if device_id >= 0:
# torch.cuda.set_device(device_id)
#
# # convert np.ndarrays to torch tensors. This is faster than transferring torch tensors to process.
# template_data = {key: move_data(value, device=device, dtype=tensor_scalar_type,
# requires_grad=with_grad and not freeze_template)
# for key, value in template_data.items()}
# template_points = {key: move_data(value, device=device, dtype=tensor_scalar_type,
# requires_grad=with_grad and not freeze_template)
# for key, value in template.get_points().items()}
# control_points = move_data(control_points, device=device, dtype=tensor_scalar_type,
# requires_grad=with_grad and not freeze_control_points)
# momenta = move_data(momenta, device=device, dtype=tensor_scalar_type,
# requires_grad=with_grad and not freeze_momenta)
#
# assert torch.device(
# device) == control_points.device == momenta.device, 'control_points and momenta tensors must be on the same device. ' \
# 'device=' + device + \
# ', control_points.device=' + str(control_points.device) + \
# ', momenta.device=' + str(momenta.device)
#
# attachment, regularity = DeterministicAtlasHypertemplate._deform_and_compute_attachment_and_regularity(
# exponential, template_points, control_points, momenta,
# template, template_data, multi_object_attachment,
# deformable_objects[i], objects_noise_variance, alpha,
# device)
#
# res = DeterministicAtlasHypertemplate._compute_gradients(
# attachment, regularity,
# freeze_template, momenta_t,
# freeze_control_points, control_points,
# freeze_momenta, momenta, freeze_sparse_matrix, sparse_matrix,
# with_grad)
# # elapsed = time.perf_counter() - start
# # logger.info('pid=' + str(os.getpid()) + ', ' + torch.multiprocessing.current_process().name +
# # ', device=' + device + ', elapsed=' + str(elapsed))
# return i, res
class DeterministicAtlasWithModule(AbstractStatisticalModel):
"""
Deterministic atlas object class.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self, template_specifications, number_of_subjects,
dimension=default.dimension,
tensor_scalar_type=default.tensor_scalar_type,
tensor_integer_type=default.tensor_integer_type,
dense_mode=default.dense_mode,
number_of_processes=default.number_of_processes,
deformation_kernel_type=default.deformation_kernel_type,
deformation_kernel_width=default.deformation_kernel_width,
deformation_kernel_device=default.deformation_kernel_device,
shoot_kernel_type=default.shoot_kernel_type,
number_of_time_points=default.number_of_time_points,
use_rk2_for_shoot=default.use_rk2_for_shoot, use_rk2_for_flow=default.use_rk2_for_flow,
freeze_template=default.freeze_template,
use_sobolev_gradient=default.use_sobolev_gradient,
smoothing_kernel_width=default.smoothing_kernel_width,
initial_control_points=default.initial_control_points,
freeze_control_points=default.freeze_control_points,
initial_cp_spacing=default.initial_cp_spacing,
initial_momenta=default.initial_momenta,
freeze_momenta=default.freeze_momenta,
gpu_mode=default.gpu_mode,
process_per_gpu=default.process_per_gpu,
**kwargs):
AbstractStatisticalModel.__init__(self, name='DeterministicAtlas', number_of_processes=number_of_processes,
gpu_mode=gpu_mode)
# Global-like attributes.
self.dimension = dimension
self.tensor_scalar_type = tensor_scalar_type
self.tensor_integer_type = tensor_integer_type
self.dense_mode = dense_mode
# Declare model structure.
self.fixed_effects['template_data'] = None
self.fixed_effects['hypertemplate_data'] = None
self.fixed_effects['control_points'] = None
self.fixed_effects['momenta'] = None
self.fixed_effects['momenta_t'] = None
self.fixed_effects['module_intensities'] = None
self.fixed_effects['module_positions'] = None
self.fixed_effects['module_variances'] = None
self.freeze_template = freeze_template
self.freeze_control_points = freeze_control_points
self.freeze_momenta = freeze_momenta
self.freeze_sparse_matrix = False
self.alpha = 1
# Deformation.
self.exponential = Exponential(
dense_mode=dense_mode,
kernel=factory(deformation_kernel_type,
gpu_mode=gpu_mode,
kernel_width=deformation_kernel_width),
shoot_kernel_type=shoot_kernel_type,
number_of_time_points=number_of_time_points,
use_rk2_for_shoot=use_rk2_for_shoot, use_rk2_for_flow=use_rk2_for_flow)
self.exponential_t = Exponential(
dense_mode=dense_mode,
kernel=factory(deformation_kernel_type,
gpu_mode=gpu_mode,
kernel_width=deformation_kernel_width),
shoot_kernel_type=shoot_kernel_type,
number_of_time_points=number_of_time_points,
use_rk2_for_shoot=use_rk2_for_shoot, use_rk2_for_flow=use_rk2_for_flow)
# Template.
(object_list, self.objects_name, self.objects_name_extension,
self.objects_noise_variance, self.multi_object_attachment) = create_template_metadata(template_specifications,
self.dimension)
self.template = DeformableMultiObject(object_list)
self.hypertemplate = DeformableMultiObject(object_list)
# self.template.update()
self.number_of_objects = len(self.template.object_list)
self.use_sobolev_gradient = use_sobolev_gradient
self.smoothing_kernel_width = smoothing_kernel_width
if self.use_sobolev_gradient:
self.sobolev_kernel = factory(deformation_kernel_type,
gpu_mode=gpu_mode,
kernel_width=smoothing_kernel_width)
# Template data.
self.fixed_effects['template_data'] = self.template.get_data()
self.fixed_effects['hypertemplate_data'] = self.hypertemplate.get_data()
# Control points.
self.fixed_effects['control_points'] = initialize_control_points(
initial_control_points, self.template, initial_cp_spacing, deformation_kernel_width,
self.dimension, self.dense_mode)
self.number_of_control_points = len(self.fixed_effects['control_points'])
# Momenta.
self.fixed_effects['momenta'] = initialize_momenta(
initial_momenta, self.number_of_control_points, self.dimension, number_of_subjects)
self.fixed_effects['momenta'] = 0.0001*np.ones(self.fixed_effects['momenta'].shape)
self.number_of_subjects = number_of_subjects
self.fixed_effects['momenta_t'] = initialize_momenta(
None, self.number_of_control_points, self.dimension, 1)
# initial_cp = initialize_control_points(None, self.template, 40, None, self.dimension, False)
# self.nb_modules = initial_cp.shape[0]
# self.fixed_effects['module_positions'] = np.array([initial_cp,]*self.number_of_subjects)
#self.fixed_effects['module_positions'] = np.array([[np.array(self.template.get_points()['image_points'].shape[:-1])/2]*initial_cp.shape[0]]*self.number_of_subjects)
for i in range(object_list[0].bounding_box.shape[0]):
object_list[0].bounding_box[i,0] = object_list[0].bounding_box[i,1]/2 - 10/2
object_list[0].bounding_box[i, 1] = object_list[0].bounding_box[i, 1] / 2 + 10 / 2
t = DeformableMultiObject(object_list)
initial_cp = initialize_control_points(None, t, 3, None, self.dimension, False)
self.nb_modules = initial_cp.shape[0]
self.fixed_effects['module_positions'] = np.array([initial_cp, ] * self.number_of_subjects)
# k = 0
# j = 0
# add = 1
# while k < self.nb_modules - 1:
# self.fixed_effects['module_positions'][:,k,j] += add
# self.fixed_effects['module_positions'][:, k+1, j] -= add
# k += 2
# if j == self.fixed_effects['module_positions'].shape[2] - 1:
# j = 0
# add += 1
# else:
# j+=1
self.fixed_effects['module_intensities'] = 0*np.ones([self.number_of_subjects, self.nb_modules])
self.fixed_effects['module_variances'] = 5*np.ones([self.number_of_subjects, self.nb_modules, self.dimension])
self.process_per_gpu = process_per_gpu
self.regu_var_m = 5
self.regu_var_m_ortho = 10
def initialize_noise_variance(self, dataset, device='cpu'):
if np.min(self.objects_noise_variance) < 0:
hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, momenta_t \
= self._fixed_effects_to_torch_tensors(False, device=device)
targets = dataset.deformable_objects
targets = [target[0] for target in targets]
residuals_torch = []
self.exponential.set_initial_template_points(template_points)
self.exponential.set_initial_control_points(control_points)
for i, target in enumerate(targets):
self.exponential.set_initial_momenta(momenta[i])
self.exponential.update()
deformed_points = self.exponential.get_template_points()
deformed_data = self.template.get_deformed_data(deformed_points, template_data)
residuals_torch.append(self.multi_object_attachment.compute_distances(
deformed_data, self.template, target))
residuals = np.zeros((self.number_of_objects,))
for i in range(len(residuals_torch)):
residuals += residuals_torch[i].detach().cpu().numpy()
# Initialize the noise variance hyper-parameter as a 1/100th of the initial residual.
for k, obj in enumerate(self.objects_name):
if self.objects_noise_variance[k] < 0:
nv = 0.01 * residuals[k] / float(self.number_of_subjects)
self.objects_noise_variance[k] = nv
logger.info('>> Automatically chosen noise std: %.4f [ %s ]' % (math.sqrt(nv), obj))
####################################################################################################################
### Encapsulation methods:
####################################################################################################################
# Template data ----------------------------------------------------------------------------------------------------
def get_template_data(self):
return self.fixed_effects['template_data']
def set_template_data(self, td):
self.fixed_effects['template_data'] = td
self.template.set_data(td)
def get_hypertemplate_data(self):
return self.fixed_effects['hypertemplate_data']
# Control points ---------------------------------------------------------------------------------------------------
def get_control_points(self):
return self.fixed_effects['control_points']
def set_control_points(self, cp):
self.fixed_effects['control_points'] = cp
# self.number_of_control_points = len(cp)
# Momenta ----------------------------------------------------------------------------------------------------------
def get_momenta(self):
return self.fixed_effects['momenta']
def set_momenta(self, mom):
self.fixed_effects['momenta'] = mom
def get_momenta_t(self):
return self.fixed_effects['momenta_t']
def set_momenta_t(self, mom):
self.fixed_effects['momenta_t'] = mom
def set_module_intensities(self, w):
self.fixed_effects['module_intensities'] = w
def get_module_intensities(self):
return self.fixed_effects['module_intensities']
def set_module_positions(self, c):
self.fixed_effects['module_positions'] = c
def get_module_positions(self):
return self.fixed_effects['module_positions']
def set_module_variances(self, sigma):
self.fixed_effects['module_variances'] = sigma
def get_module_variances(self):
return self.fixed_effects['module_variances']
# Full fixed effects -----------------------------------------------------------------------------------------------
def get_fixed_effects(self):
out = {}
if not self.freeze_template:
out['momenta_t'] = self.fixed_effects['momenta_t']
if not self.freeze_control_points:
out['control_points'] = self.fixed_effects['control_points']
if not self.freeze_momenta:
out['momenta'] = self.fixed_effects['momenta']
if not self.freeze_sparse_matrix:
out['module_positions'] = self.fixed_effects['module_positions']
out['module_intensities'] = self.fixed_effects['module_intensities']
out['module_variances'] = self.fixed_effects['module_variances']
return out
def set_fixed_effects(self, fixed_effects):
if not self.freeze_template:
device, _ = get_best_device(self.gpu_mode)
hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, momenta_t, module_intensities, module_positions, module_variances \
= self._fixed_effects_to_torch_tensors(False, device=device)
self.exponential_t.set_initial_template_points(hypertemplate_points)
self.exponential_t.set_initial_control_points(control_points)
self.exponential_t.set_initial_momenta(momenta_t[0])
self.exponential_t.move_data_to_(device=device)
self.exponential_t.update()
template_points = self.exponential_t.get_template_points()
template_data = self.hypertemplate.get_deformed_data(template_points, hypertemplate_data)
template_data = {key: value.detach().cpu().numpy() for key, value in template_data.items()}
self.set_momenta_t(fixed_effects['momenta_t'])
self.set_template_data(template_data)
if not self.freeze_control_points:
self.set_control_points(fixed_effects['control_points'])
if not self.freeze_momenta:
self.set_momenta(fixed_effects['momenta'])
if not self.freeze_sparse_matrix:
self.set_module_positions(fixed_effects['module_positions'])
self.set_module_variances((fixed_effects['module_variances']))
self.set_module_intensities(fixed_effects['module_intensities'])
####################################################################################################################
### Public methods:
####################################################################################################################
def setup_multiprocess_pool(self, dataset):
self._setup_multiprocess_pool(initargs=([target[0] for target in dataset.deformable_objects],
self.multi_object_attachment,
self.objects_noise_variance,
self.freeze_template, self.freeze_control_points, self.freeze_momenta,
self.exponential, self.sobolev_kernel, self.use_sobolev_gradient,
self.tensor_scalar_type, self.gpu_mode))
# Compute the functional. Numpy input/outputs.
def compute_log_likelihood(self, dataset, population_RER, individual_RER, mode='complete', with_grad=False):
"""
Compute the log-likelihood of the dataset, given parameters fixed_effects and random effects realizations
population_RER and indRER.
:param fixed_effects: Dictionary of fixed effects.
:param population_RER: Dictionary of population random effects realizations.
:param individual_RER: Dictionary of individual random effects realizations.
:param mode: Indicates which log_likelihood should be computed, between 'complete', 'model', and 'class2'.
:param with_grad: Flag that indicates wether the gradient should be returned as well.
:return:
"""
if self.number_of_processes > 1:
targets = [target[0] for target in dataset.deformable_objects]
(deformable_objects, multi_object_attachment, objects_noise_variance,
freeze_template, freeze_control_points, freeze_momenta,
exponential, sobolev_kernel, use_sobolev_gradient, tensor_scalar_type, gpu_mode) = process_initial_data
device, device_id = get_best_device(gpu_mode=gpu_mode)
self.exponential_t.set_initial_template_points(self.hypertemplate.get_points())
self.exponential_t.set_initial_control_points(self.fixed_effects['control_points'])
self.exponential_t.set_initial_momenta(self.fixed_effects['momenta_t'])
self.exponential_t.move_data_to_(device=device)
self.exponential_t.update()
template_points = self.exponential_t.get_template_points()
template_data = self.hypertemplate.get_deformed_data(template_points, self.fixed_effects['hypertemplate_data'])
args = [(i, self.template,
template_data,
self.fixed_effects['control_points'],
self.fixed_effects['momenta'][i],
with_grad) for i in range(len(targets))]
start = time.perf_counter()
results = self.pool.map(_subject_attachment_and_regularity, args, chunksize=1) # TODO: optimized chunk size
# results = self.pool.imap_unordered(_subject_attachment_and_regularity, args, chunksize=1)
# results = self.pool.imap(_subject_attachment_and_regularity, args, chunksize=int(len(args)/self.number_of_processes))
logger.debug('time taken for deformations : ' + str(time.perf_counter() - start))
# Sum and return.
if with_grad:
attachment = 0.0
regularity = 0.0
gradient = {}
if not self.freeze_template:
gradient['momenta_t'] = np.zeros(self.fixed_effects['momenta_t'].shape)
if not self.freeze_control_points:
gradient['control_points'] = np.zeros(self.fixed_effects['control_points'].shape)
if not self.freeze_momenta:
gradient['momenta'] = np.zeros(self.fixed_effects['momenta'].shape)
for result in results:
i, (attachment_i, regularity_i, gradient_i) = result
attachment += attachment_i
regularity += regularity_i
for key, value in gradient_i.items():
if key == 'momenta':
gradient[key][i] = value
else:
gradient[key] += value
return attachment, regularity, gradient
else:
attachment = 0.0
regularity = 0.0
for result in results:
i, (attachment_i, regularity_i) = result
attachment += attachment_i
regularity += regularity_i
return attachment, regularity
else:
device, device_id = get_best_device(gpu_mode=self.gpu_mode)
hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, \
momenta_t, module_intensities, module_positions, module_variances = self._fixed_effects_to_torch_tensors(with_grad,device=device)
sparse_matrix = self.construct_sparse_matrix(template_points['image_points'], module_positions, module_variances, module_intensities)
return self._compute_attachment_and_regularity(dataset, hypertemplate_data, hypertemplate_points, control_points,
momenta, momenta_t, sparse_matrix, module_intensities, module_positions, module_variances, with_grad, device=device)
####################################################################################################################
### Private methods:
####################################################################################################################
@staticmethod
def _deform_and_compute_attachment_and_regularity(exponential, template_points, control_points, momenta, module_positions, module_variances, module_intensities, sparse_matrix,
template, template_data,
multi_object_attachment, deformable_objects,
objects_noise_variance, regu_var_m, regu_var_m_ortho,
device='cpu'):
# Deform.
exponential.set_initial_template_points(template_points)
exponential.set_initial_control_points(control_points)
exponential.set_initial_momenta(momenta)
exponential.move_data_to_(device=device)
exponential.update()
# Compute attachment and regularity.
deformed_points = exponential.get_template_points()
deformed_data = template.get_deformed_data(deformed_points, template_data)
deformed_data['image_intensities'] += sparse_matrix
attachment = -multi_object_attachment.compute_weighted_distance(deformed_data, template, deformable_objects,
objects_noise_variance)
regularity = - exponential.get_norm_squared()
# x = template_points['image_points'].cpu().detach()
# final_cp = exponential.control_points_t[-1].cpu().detach()
# regu = torch.zeros(x.shape[:-1], dtype=torch.float64)
# for k in range(final_cp.shape[0]):
# m = momenta[k].clone().cpu().detach()
# if m.numpy().any():
# m /= torch.norm(m)
# e = torch.randn(m.size(), dtype=torch.float64)
# e -= e.dot(m) * m
# e /= torch.norm(e)
# if m.size()[0] == 2:
# y = x - final_cp[k]
# regu += torch.exp(-torch.mm(y.view(-1,2), m.view(2,1)) ** 2 / (2 * 1) - torch.mm(y.view(-1,2), e.view(2,1)) ** 2 / (2 * 10)).view(y.shape[:-1])
# else:
# e2 = torch.cross(m, e)
# y = x - final_cp[k]
# regu += torch.exp(
# -torch.dot(y, m) ** 2 / (2 * 1) - torch.dot(y, e) ** 2 / (2 * 10) - torch.dot(y, e2) ** 2 / (2 * 10))
#
# dim = template_data['image_intensities'].shape
# regu2 = torch.zeros(dim).double()
# for k in range(module_positions.shape[0]):
# x_norm = torch.mul(x ** 2, 1 / module_variances[k] ** 2).sum(-1).view(-1, 1)
# y_norm = torch.mul(module_positions[k] ** 2, 1 / module_variances[k] ** 2).sum().view(-1, 1)
# points_divided = torch.mul(x, 1 / module_variances[k] ** 2)
# dist = (x_norm + y_norm - 2.0 * torch.mul(points_divided, module_positions[k]).sum(-1).view(-1,
# 1)).reshape(
# dim)
# regu2 += torch.exp(-dist)*torch.abs(module_intensities[k].detach())
#
# regularity -= 200.*torch.sum(torch.mul(torch.tensor(regu, dtype=torch.float64),regu2))
final_cp = exponential.control_points_t[-1]
final_momenta = exponential.momenta_t[-1]
for k in range(final_cp.shape[0]):
if momenta[k].detach().numpy().any():
m = final_momenta[k] / torch.norm(momenta[k])
e = torch.randn(m.shape[0], dtype=torch.float64)
e = e - torch.dot(e, m) * m
e = e / torch.norm(e)
for l in range(module_positions.shape[0]):
if m.size()[0] == 2:
y = module_positions[l] - final_cp[k]
regularity += - 10000*torch.sum(torch.exp(- torch.mm(y.view(-1,2), m.view(2,1)) ** 2 / (2 * regu_var_m) - torch.mm(y.view(-1,2), e.view(2,1)) ** 2 / (2 * regu_var_m_ortho)).view(y.shape[:-1]))
else:
e2 = torch.cross(m, e)
y = module_positions[l] - final_cp[k]
regularity += - torch.sum(torch.exp(
-torch.dot(y, m) ** 2 / (2 * regu_var_m) - torch.dot(y, e) ** 2 / (2 * regu_var_m_ortho) - torch.dot(y, e2) ** 2 / (2 * regu_var_m_ortho)).view(y.shape[:-1]))
x_norm = (module_positions ** 2).sum(1).view(-1, 1)
dist = x_norm + x_norm.view(1, -1) - 2.0 * torch.mm(module_positions, torch.transpose(module_positions, 0, 1))
# dist = torch.zeros([16,16], dtype=torch.float64)
# for k in range(16):
# dist[k,:] = torch.norm(module_positions - module_positions[k], dim=1)**2/400
regularity -= -torch.sum(torch.exp(-dist))
assert torch.device(
device) == attachment.device == regularity.device, 'attachment and regularity tensors must be on the same device. ' \
'device=' + device + \
', attachment.device=' + str(attachment.device) + \
', regularity.device=' + str(regularity.device)
return attachment, regularity
@staticmethod
def _compute_gradients(attachment, regularity,
freeze_template, momenta_t,
freeze_control_points, control_points,
freeze_momenta, momenta, freeze_sparse_matrix, module_intensities, module_positions, module_variances,
with_grad=False):
if with_grad:
total_for_subject = attachment + regularity
total_for_subject.backward()
gradient = {}
if not freeze_template:
assert momenta_t.grad is not None, 'Gradients have not been computed'
gradient['momenta_t'] = momenta_t.grad.detach().cpu().numpy()
if not freeze_control_points:
assert control_points.grad is not None, 'Gradients have not been computed'
gradient['control_points'] = control_points.grad.detach().cpu().numpy()
if not freeze_momenta:
assert momenta.grad is not None, 'Gradients have not been computed'
gradient['momenta'] = momenta.grad.detach().cpu().numpy()
if not freeze_sparse_matrix:
gradient['module_intensities'] = module_intensities.grad.detach().cpu().numpy()
gradient['module_positions'] = module_positions.grad.detach().cpu().numpy()
gradient['module_variances'] = module_variances.grad.detach().cpu().numpy()
res = attachment.detach().cpu().numpy(), regularity.detach().cpu().numpy(), gradient
else:
res = attachment.detach().cpu().numpy(), regularity.detach().cpu().numpy()
return res
def _compute_attachment_and_regularity(self, dataset, hypertemplate_data, hypertemplate_points, control_points, momenta, momenta_t, sparse_matrix, module_intensities, module_positions, module_variances,
with_grad=False, device='cpu'):
"""
Core part of the ComputeLogLikelihood methods. Torch input, numpy output.
Single-thread version.
"""
# Initialize.
targets = [target[0] for target in dataset.deformable_objects]
attachment = 0.
regularity = 0.
self.exponential_t.set_initial_template_points(hypertemplate_points)
self.exponential_t.set_initial_control_points(control_points)
self.exponential_t.set_initial_momenta(momenta_t[0])
self.exponential_t.move_data_to_(device=device)
self.exponential_t.update()
template_points = self.exponential_t.get_template_points()
template_data = self.hypertemplate.get_deformed_data(template_points, hypertemplate_data)
self.set_template_data({key: value.detach().cpu().numpy() for key, value in template_data.items()})
regularity -= self.exponential_t.get_norm_squared()
# loop for every deformable object
# deform and update attachment and regularity
for i, target in enumerate(targets):
new_attachment, new_regularity = DeterministicAtlasWithModule._deform_and_compute_attachment_and_regularity(
self.exponential, template_points, control_points, momenta[i], module_positions[i], module_variances[i], module_intensities[i], sparse_matrix[i],
self.template, template_data, self.multi_object_attachment,
target, self.objects_noise_variance, self.regu_var_m, self.regu_var_m_ortho,
device=device)
attachment += new_attachment
regularity += new_regularity
# Compute gradient.
return self._compute_gradients(attachment, regularity,
self.freeze_template, momenta_t,
self.freeze_control_points, control_points,
self.freeze_momenta, momenta, self.freeze_sparse_matrix, module_intensities, module_positions, module_variances,
with_grad)
####################################################################################################################
### Private utility methods:
####################################################################################################################
def construct_sparse_matrix(self, points, module_centers, module_variances, module_intensities):
dim = (self.number_of_subjects,) + self.fixed_effects['template_data']['image_intensities'].shape
sparse_matrix = torch.zeros(dim).double()
for i in range(dim[0]):
for k in range(self.nb_modules):
x_norm = torch.mul(points ** 2, 1/module_variances[i,k]**2).sum(-1).view(-1, 1)
y_norm = torch.mul(module_centers[i,k] ** 2, 1/module_variances[i,k]**2).sum().view(-1, 1)
points_divided = torch.mul(points, 1/module_variances[i,k]**2)
dist = (x_norm + y_norm - 2.0 * torch.mul(points_divided, module_centers[i,k]).sum(-1).view(-1,1)).reshape(dim[1:])
sparse_matrix[i] += torch.exp(-dist)*module_intensities[i,k]
# sparse_matrix[i] += 70/81*(1-dist)**2*(dist<1).double()*module_intensities[i,k]
# x_norm = (points ** 2)
# y_norm = (module_centers[i,k] ** 2)
# dist = (x_norm + y_norm - 2.0 * torch.mul(points, module_centers[i, k])).view(-1, 2)
# rect = ((dist[:,0] < module_variances[i,k,0]) * (dist[:,1] < module_variances[i,k,1])).double()
# f = torch.exp(-dist.sum(1)/10)
# conv2 = torch.nn.Conv2d(1, 1, kernel_size=1, stride=1, padding=1, bias=False)
# conv2.weights = rect.reshape([1, 1, 100, 100])
# sparse_matrix[i] += conv2(f.float().reshape([1,1,100,100]))[0,0,1:-1,1:-1].double()*module_intensities[i,k]
#sparse_matrix[i] += module_intensities[i,k] * (1/(1+torch.exp(-2*2*(points[:,:,0] - module_centers[i,k,0] + module_variances[i,k,0]/2))) - 1/(1+torch.exp(-2*2*(points[:,:,0] - module_centers[i,k,0] - module_variances[i,k,0]/2)))) * (1/(1+ torch.exp(-2*2*(points[:,:,1] - module_centers[i,k,1] + module_variances[i,k,1]/2))) - 1/(1+torch.exp(-2*2*(points[:,:,1] - module_centers[i,k,1] - module_variances[i,k,1]/2))))
# x_norm = torch.mul(points ** 2, 1/module_variances[i,k]**2).sum(2).view(-1, 1)
# y_norm = torch.mul(module_centers[i,k] ** 2, 1/module_variances[i,k]**2).sum().view(-1, 1)
# points_divided = torch.mul(points, 1/module_variances[i,k]**2)
# dist = (x_norm + y_norm - 2.0 * torch.mul(points_divided, module_centers[i,k]).sum(2).view(-1,1)).reshape(dim[1:])
# sparse_matrix[i] += module_intensities[i,k] * (1/(1+torch.exp(-2*100*(dist+0.1))) - 1/(1+torch.exp(-2*100*(dist - 1))))
return sparse_matrix
def _fixed_effects_to_torch_tensors(self, with_grad, device='cpu'):
"""
Convert the fixed_effects into torch tensors.
"""
# Template data.
template_data = self.fixed_effects['template_data']
template_data = {key: move_data(value, device=device, dtype=self.tensor_scalar_type,
requires_grad=False)
for key, value in template_data.items()}
# Template points.
template_points = self.template.get_points()
template_points = {key: move_data(value, device=device, dtype=self.tensor_scalar_type,
requires_grad=False)
for key, value in template_points.items()}
hypertemplate_data = self.fixed_effects['hypertemplate_data']
hypertemplate_data = {key: move_data(value, device=device, dtype=self.tensor_scalar_type,
requires_grad=False)
for key, value in hypertemplate_data.items()}
# Template points.
hypertemplate_points = self.hypertemplate.get_points()
hypertemplate_points = {key: move_data(value, device=device, dtype=self.tensor_scalar_type,
requires_grad=False)
for key, value in hypertemplate_points.items()}
momenta_t = self.fixed_effects['momenta_t']
momenta_t = move_data(momenta_t, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_template and with_grad))
# Control points.
if self.dense_mode:
assert (('landmark_points' in self.template.get_points().keys()) and
('image_points' not in self.template.get_points().keys())), \
'In dense mode, only landmark objects are allowed. One at least is needed.'
control_points = template_points['landmark_points']
else:
control_points = self.fixed_effects['control_points']
control_points = move_data(control_points, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_control_points and with_grad))
# control_points = Variable(torch.from_numpy(control_points).type(self.tensor_scalar_type),
# requires_grad=(not self.freeze_control_points and with_grad))
# Momenta.
momenta = self.fixed_effects['momenta']
momenta = move_data(momenta, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_momenta and with_grad))
module_intensities = self.fixed_effects['module_intensities']
module_intensities = move_data(module_intensities, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_sparse_matrix and with_grad))
module_positions = self.fixed_effects['module_positions']
module_positions = move_data(module_positions, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_sparse_matrix and with_grad))
module_variances = self.fixed_effects['module_variances']
module_variances = move_data(module_variances, device=device, dtype=self.tensor_scalar_type,
requires_grad=(not self.freeze_sparse_matrix and with_grad))
return hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, momenta_t, module_intensities, module_positions, module_variances
####################################################################################################################
### Writing methods:
####################################################################################################################
def write(self, dataset, population_RER, individual_RER, output_dir, write_residuals=True):
# Write the model predictions, and compute the residuals at the same time.
residuals = self._write_model_predictions(dataset, individual_RER, output_dir,
compute_residuals=write_residuals)
# Write residuals.
if write_residuals:
residuals_list = [[residuals_i_k.data.cpu().numpy() for residuals_i_k in residuals_i]
for residuals_i in residuals]
write_2D_list(residuals_list, output_dir, self.name + "__EstimatedParameters__Residuals.txt")
# Write the model parameters.
self._write_model_parameters(output_dir)
def _write_model_predictions(self, dataset, individual_RER, output_dir, compute_residuals=True):
device, _ = get_best_device(self.gpu_mode)
# Initialize.
hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, momenta_t, \
module_intensities, module_positions, module_variances = self._fixed_effects_to_torch_tensors(False, device=device)
sparse_matrix = self.construct_sparse_matrix(template_points['image_points'], module_positions, module_variances, module_intensities)
# Deform, write reconstructions and compute residuals.
self.exponential.set_initial_template_points(template_points)
self.exponential.set_initial_control_points(control_points)
residuals = [] # List of torch 1D tensors. Individuals, objects.
for i, subject_id in enumerate(dataset.subject_ids):
self.exponential.set_initial_momenta(momenta[i])
self.exponential.update()
# Writing the whole flow.
names = []
for k, object_name in enumerate(self.objects_name):
name = self.name + '__flow__' + object_name + '__subject_' + subject_id
names.append(name)
#self.exponential.write_flow(names, self.objects_name_extension, self.template, template_data, output_dir)
deformed_points = self.exponential.get_template_points()
deformed_data = self.template.get_deformed_data(deformed_points, template_data)
deformed_data['image_intensities'] += sparse_matrix[i]
m = torch.max(deformed_data['image_intensities'])
for k in range(module_positions.shape[1]):
for j in range(module_positions[i,k].shape[0]):
module_positions[i,k,j] = min(99, module_positions[i,k,j])
module_positions[i, k, j] = max(0, module_positions[i, k, j])
deformed_data['image_intensities'][tuple(module_positions[i,k].int())] = 2* m
if compute_residuals:
residuals.append(self.multi_object_attachment.compute_distances(
deformed_data, self.template, dataset.deformable_objects[i][0]))
names = []
for k, (object_name, object_extension) \
in enumerate(zip(self.objects_name, self.objects_name_extension)):
name = self.name + '__Reconstruction__' + object_name + '__subject_' + subject_id + object_extension
names.append(name)
self.template.write(output_dir, names,
{key: value.detach().cpu().numpy() for key, value in deformed_data.items()})
deformed_data['image_intensities'] = sparse_matrix[i]
names = []
for k, (object_name, object_extension) \
in enumerate(zip(self.objects_name, self.objects_name_extension)):
name = self.name + '__Reconstruction__' + object_name + '__subject_' + subject_id + '_sparsematrix' + object_extension
names.append(name)
self.template.write(output_dir, names,
{key: value.detach().cpu().numpy() for key, value in deformed_data.items()})
return residuals
def _write_model_parameters(self, output_dir):
# Template.
device, _ = get_best_device(self.gpu_mode)
template_names = []
hypertemplate_data, hypertemplate_points, template_data, template_points, control_points, momenta, momenta_t, module_intensities, module_positions, module_variances \
= self._fixed_effects_to_torch_tensors(False, device=device)
self.exponential_t.set_initial_template_points(hypertemplate_points)
self.exponential_t.set_initial_control_points(control_points)
self.exponential_t.set_initial_momenta(momenta_t[0])
self.exponential_t.move_data_to_(device=device)
self.exponential_t.update()
template_points = self.exponential_t.get_template_points()
template_data = self.hypertemplate.get_deformed_data(template_points, hypertemplate_data)
self.set_template_data({key: value.detach().cpu().numpy() for key, value in template_data.items()})
for i in range(len(self.objects_name)):
aux = self.name + "__EstimatedParameters__Template_" + self.objects_name[i] + self.objects_name_extension[i]
template_names.append(aux)
self.template.write(output_dir, template_names)
# Control points.
write_2D_array(self.get_control_points(), output_dir, self.name + "__EstimatedParameters__ControlPoints.txt")
# Momenta.
write_3D_array(self.get_momenta(), output_dir, self.name + "__EstimatedParameters__Momenta.txt")
write_2D_array(self.get_momenta_t()[0], output_dir, self.name + "__EstimatedParameters__Momenta_t.txt")
write_3D_array(self.get_module_positions(), output_dir, self.name + "__EstimatedParameters__ModulePositions.txt")
write_3D_array(self.get_module_intensities(), output_dir, self.name + "__EstimatedParameters__ModuleIntensities.txt")
write_3D_array(self.get_module_variances(), output_dir, self.name + "__EstimatedParameters__ModuleVariances.txt")
| lepennec/Deformetrica_coarse_to_fine | core/models/deterministic_atlas_withmodule.py | deterministic_atlas_withmodule.py | py | 45,901 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "core.models.abstract_statistical_model.AbstractStatisticalModel",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "core.default.dimension",
"line_number": 94,
"usage_type... |
1004762180 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 11 20:06:31 2019
@author: saksake
"""
import numpy as np
from sklearn.datasets import load_iris
def datasets() :
# LOAD BOSTON HOUSING DATASET
boston = load_iris()
# MAKE FEATURE DICTIONARY
all_features = {}
for i in range(len(boston.feature_names)):
key_ = str(boston.feature_names[i])
key_split = key_.split()
key = key_split[0]+'_'+key_split[1]
all_features[key] = boston.data[:,i]
# ADD TARGET DICTIONARY
all_features['species'] = boston.target
return all_features
def splitdict(feature_dict, train_portion, label_key) :
train_feature, train_label = {}, {}
key = list(feature_dict.keys())
ndata = len(feature_dict[key[0]])
train_n = int(ndata*train_portion)
idxs = np.array(range(ndata))
np.random.shuffle(idxs)
train_idx = idxs[:train_n]
test_idx = idxs[train_n:]
for key in feature_dict :
if key == label_key :
train_label[key] = {}
train_label[key] = np.array(feature_dict[key])[train_idx]
else :
train_feature[key] = {}
train_feature[key] = np.array(feature_dict[key])[train_idx]
test_feature, test_label = {}, {}
for key in feature_dict :
if key == label_key :
test_label[key] = {}
test_label[key] = np.array(feature_dict[key])[test_idx]
else :
test_feature[key] = {}
test_feature[key] = np.array(feature_dict[key])[test_idx]
return train_feature, train_label, test_feature, test_label
use_feature_name = ['sepal_length',
'sepal_width',
'petal_length',
'petal_width',
'species']
name_columns_category = []
name_columns_bucket = []
name_columns_numeric = ['sepal_length',
'sepal_width',
'petal_length',
'petal_width']
label_key ='species'
train_portion = 0.6
all_features = datasets()
for key in all_features:
print("'{:}',".format(key))
# CHOOSE INTEREST FEATURES FROM ALL FEATURES
used_features = {}
for key in all_features:
if key in use_feature_name :
used_features[key] = all_features[key]
inp_train_feature, inp_train_label, inp_test_feature, inp_test_label = splitdict(feature_dict = used_features,
train_portion = train_portion,
label_key = label_key)
import tensorflow as tf
# MAKE INPUT FUNCTION
# TRAIN DATA
input_fn_train = tf.estimator.inputs.numpy_input_fn(
x = inp_train_feature,
y = inp_train_label[label_key],
shuffle=True,
batch_size=128,
num_epochs=None
)
# TEST DATA
input_fn_test = tf.estimator.inputs.numpy_input_fn(
x = inp_test_feature,
y = inp_test_label[label_key],
shuffle=False,
batch_size=128,
num_epochs=1
)
# Define feature columns.
feature_columns_numeric, feature_columns_category, feature_columns_bucket = [], [], []
for key in inp_train_feature :
# Define numeric feature columns.
if key in name_columns_numeric :
feature_columns_numeric.append(tf.feature_column.numeric_column(key))
# Define categorycal feature columns.
elif key in name_columns_category :
uniq = (np.unique(inp_train_feature[key])).tolist()
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(key = key,
vocabulary_list = uniq)
embed_column = tf.feature_column.embedding_column(
categorical_column=cat_column,
dimension=len(uniq)
)
feature_columns_category.append(embed_column)
# Define bucket feature columns.
elif key in name_columns_bucket :
numeric_column = tf.feature_column.numeric_column(key)
# make bucket boundaries
arr = np.linspace(min(inp_train_feature[key]), max(inp_train_feature[key]), 1000)
n_bucket = 3
q = 1./(n_bucket+1.)
boundaries = []
for i in range(n_bucket):
boundaries.append(int(np.quantile(arr, q*(i+1))))
# Then, bucketize the numeric column on the years 1960, 1980, and 2000.
bucketized_feature_column = tf.feature_column.bucketized_column(
source_column = numeric_column,
boundaries = boundaries)
feature_columns_bucket.append(bucketized_feature_column)
feature_columns = feature_columns_numeric + feature_columns_category + feature_columns_bucket
# DEFINE ESTIMATOR
estimator= tf.estimator.DNNClassifier(
feature_columns = feature_columns,
# Two hidden layers
hidden_units=[512, 256],
optimizer='Adagrad', #'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'
activation_fn=tf.nn.relu, # relu. tanh, sigmoid
n_classes = len(np.unique(inp_train_label[label_key])),
# Model directory
model_dir = 'Iris')
# TRAIN MODEL
estimator.train(input_fn=input_fn_train, steps=5000)
# EVALUATE MODEL
print('-------------------------------------')
evaluate = estimator.evaluate(input_fn = input_fn_test)
print('-------------------------------------')
# PREDICT
pred = list(estimator.predict(input_fn = input_fn_test))
# VISUALIZE TESTING DAN PREDICTED
y_prob = [x['probabilities'] for x in pred]
y_pred = np.asarray([np.argmax(x) for x in y_prob])
y_real = inp_test_label[label_key]
ntrue = len(np.where(y_pred == y_real)[0])
acc = ntrue/float(len(y_real))
print('Accuracy = {:}'.format(acc))
| rofiqq/Machine-Learning | High_API/classifier/iris/iris.py | iris.py | py | 5,883 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.ran... |
12704422220 | #!/usr/bin/env python3
import asyncio
import discord
import os
client= discord.Client()
TOKEN = os.getenv('USER_TOKEN')
CHANNEL_ID = int(os.getenv('CHANNEL_ID'))
MESSAGE = os.getenv('MESSAGE')
def lambda_handler(event, context):
print("lambda start")
client.run(TOKEN, bot=False)
@client.event
async def on_ready():
print('%s has connected to Discord!' % client.user)
channel = client.get_channel(CHANNEL_ID)
if channel:
await channel.send(MESSAGE)
print("message sent")
else:
print("channel not found")
await client.close()
| mgla/lambda-discord-messager | lambda_function.py | lambda_function.py | py | 580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
... |
30489464890 | from .master import Master
import numpy as np
import poselib
import time
import math
import test_module.linecloud as lineCloudTest
import test_module.recontest as recontest
import utils.pose.pose_estimation as pe
import utils.pose.vector as vector
from utils.pose import dataset
from utils.pose import line
from utils.l2precon import calculate
from static import variable
np.random.seed(variable.RANDOM_SEED)
class OLC(Master):
def __init__(self, dataset_path, output_path):
self.pts_to_line = dict()
self.line_to_pts = dict()
self.line_3d = None
self.pts_2d_query = None # Images.txt
self.pts_3d_query = None # Points3D.txt
self.camera_dict_gt = None # cameras.txt
self.queryIds = None
self.queryNames = None
self.image_dict_gt = None
self.resultPose = list()
self.resultRecon = list()
self.map_type = "OLC"
self.points_3D_recon = list()
self.lines_3D_recon = list()
super().__init__(dataset_path, output_path)
self.pts_3d_ids = list(self.pts_3d_query.keys())
np.random.shuffle(self.pts_3d_ids)
def makeLineCloud(self):
print("OLC: Random distribution line cloud")
_pts_3d = np.array([v.xyz for v in self.pts_3d_query.values()])
_pts_ids = np.array([k for k in self.pts_3d_query.keys()])
self.points_3D, self.line_3d, self.ind_to_id, self.id_to_ind = line.drawlines_olc(_pts_3d,_pts_ids)
for i, k in enumerate(self.pts_3d_query.keys()):
self.pts_to_line[k] = self.line_3d[i]
self.line_to_pts[i] = k
def maskSparsity(self, sparisty_level):
new_shape = int(len(self.pts_3d_ids) * sparisty_level)
self.sparse_line_3d_ids = set(self.pts_3d_ids[:new_shape])
def matchCorrespondences(self, query_id):
connected_pts3d_idx = np.where(self.pts_2d_query[query_id].point3D_ids != -1)[0]
connected_pts3d_ids = self.pts_2d_query[query_id].point3D_ids[connected_pts3d_idx]
p2 = np.array([self.pts_3d_query[k].xyz for k in connected_pts3d_ids],dtype=np.float64)
x1 = np.array(self.pts_2d_query[query_id].xys[connected_pts3d_idx],dtype=np.float64)
pts_to_ind = {}
for _i, k in enumerate(connected_pts3d_ids):
pts_to_ind[k] = _i
if self.pts_3d_query[k].xyz[0] != p2[_i][0]:
raise Exception("Point to Index Match Error", k)
self.valid_pts_3d_ids = self.sparse_line_3d_ids.intersection(set(connected_pts3d_ids))
newIndex = []
_x2 = []
for _pid in self.valid_pts_3d_ids:
newIndex.append(pts_to_ind[_pid])
_x2.append(self.pts_to_line[_pid])
if newIndex:
newIndex = np.array(newIndex)
# p1: 2D Point
# x1: 2D Line
# p2: 3D Offset
# x2: 3D Line
self._x1 = x1[newIndex]
self._p2 = p2[newIndex]
self._x2 = np.array(_x2)
else:
self._x1 = np.array([])
self._p2 = np.array([])
self._x2 = np.array([])
print("Found correspondences: ", self._x1.shape[0])
def addNoise(self, noise_level):
super().addNoise(noise_level)
def estimatePose(self, query_id):
if self._x1.shape[0] >= 6:
gt_img = pe.get_GT_image(query_id, self.pts_2d_query, self.image_dict_gt)
cam_id = gt_img.camera_id
cam_p6l = [pe.convert_cam(self.camera_dict_gt[cam_id])]
res = poselib.estimate_p6l_relative_pose(self._x1, self._p2, self._x2, cam_p6l, cam_p6l, variable.RANSAC_OPTIONS, variable.BUNDLE_OPTIONS, variable.REFINE_OPTION)
super().savePoseAccuracy(res, gt_img, cam_p6l[0])
def savePose(self, sparisty_level, noise_level):
super().savePose(sparisty_level, noise_level)
def saveAllPoseCSV(self):
super().saveAllPoseCSV()
def recoverPts(self, estimator, sparsity_level, noise_level):
print("OLC recover image", "\n")
self.sparse_pts_3d_ids =[]
self.id_to_ind_recon = {}
self.ind_to_id_recon = {}
self.points_3D_recon = []
self.lines_3D_recon = []
for i in range(len(self.sparse_line_3d_ids)):
_pts_3d_id = self.line_to_pts[i]
self.sparse_pts_3d_ids.append(_pts_3d_id)
self.points_3D_recon.append(self.pts_3d_query[_pts_3d_id].xyz)
self.lines_3D_recon.append(self.pts_to_line[_pts_3d_id])
self.id_to_ind_recon[_pts_3d_id] = i
self.ind_to_id_recon[i] = _pts_3d_id
self.points_3D_recon = np.array(self.points_3D_recon)
self.lines_3D_recon = np.array(self.lines_3D_recon)
ref_iter = variable.REFINE_ITER
if estimator=='SPF':
# No swap
ests = calculate.coarse_est_spf(self.points_3D_recon, self.lines_3D_recon)
ests_pts = calculate.refine_est_spf(self.points_3D_recon, self.lines_3D_recon, ests, ref_iter)
info = [sparsity_level, noise_level, 0, estimator]
super().saveReconpoints(ests_pts, info)
if estimator=='TPF':
print("OLC should't be estimated with TPF")
pass
def reconTest(self,estimator):
#reconTest
recontest.recontest_pt_idx([self.points_3D_recon],[self.ind_to_id_recon],self.pts_3d_query)
def test(self,recover,esttype):
# recon test
print("Consistency test for",self.map_type)
if recover:
self.reconTest(esttype)
| Fusroda-h/ppl | domain/olc.py | olc.py | py | 5,682 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "static.variable.RANDOM_SEED",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "... |
74128409788 | from collections import deque
def find_correct(string):
stack = []
for c in string:
if c == "[" or c == "{" or c == "(":
stack.append(c)
else:
if "[" in stack and c == "]" and stack[-1] == "[":
stack.pop()
elif "{" in stack and c == "}" and stack[-1] == "{":
stack.pop()
elif "(" in stack and c == ")" and stack[-1] == "(":
stack.pop()
else:
return 0
return 1 if not stack else 0
def solution(s):
answer = 0
deq = deque(list(s))
for i in range(len(s)):
deq.rotate(-1)
string = "".join(deq)
answer += find_correct(string)
return answer
| Dayeon1351/TIL | programmers/level2/괄호회전하기/solution.py | solution.py | py | 781 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 23,
"usage_type": "call"
}
] |
9007439548 | """
Week 2 - Data mining
By Christopher Diaz Montoya
"""
# Problem 1!!
store=[] # Empty array to store values
for a in range (1000, 2000): # Loop to check over all numbers in range
if (a % 11 == 0) and not (a % 3 == 0):
# Above line makes sure if multiple of 11 and not of 3 execute line below
store.append(str(a)) # Stores numbers that met above requirements
print (*store, sep = ", ")
# Learnt the above print line from the website below to print output correctly
# https://www.kite.com/python/answers/how-to-print-a-list-without-brackets-in-python#:~:text=Use%20*%20to%20print%20a%20list,set%20sep%20to%20%22%2C%20%22%20.
# Problem 2!!
print("Please input a sentance: ") # Allows user to input sentance
sentance = input()
# Above line assigns input to the varibale called sentance
# Below 2 lines will be used as counters for upper and lower case letters
UpperCase = 0
LowerCase = 0
# For loop to check each character for the length of the string sentance
for char in range(len(sentance)):
# Below says if char is in the lower letter alphabet add and assigns to lower
# case counter else if in the upper case alphabet add to the upper counter
if(sentance[char]>='A' and sentance[char]<='Z'):
UpperCase += 1
elif(sentance[char]>='a' and sentance[char]<='z'):
# Learnt in my other module how to convert from lower case to upper case
# without libraries so played around with the code as it's like a range
# and that's how I got the above line
LowerCase += 1 # Add 1 to counter
print('Upper case = ', UpperCase)
print('Lower case = ', LowerCase)
# Above prints the count and I used the comma to print the string and counter
# int. As I only mentioned the alpahbets there is no issue with the space and
# is not counted by accident.
# Problem 3!!
# Below made a funtion that turns an int into a string
def NumToWord(a):
b = str(a) # Casts int into a string and stored in b
print(b) # Prints b which is casted into a string
print(type(b)) # Double check what daat tybe b is
# Below int is used to make sure input value is an integer, learnt last
# academic year.
num = int(input("Please enter a number: "))
NumToWord(num) # Calls functions and passes input "num" into the funciton.
# Problem 4!!
import itertools # Import from library to help iterate through all outcomes
# Below stored for easy access
subject = ["I", "You"]
verb = ["Read", "Borrow"]
ob = ["Shakerpeare's plays", "Shakespeare's poems"]
# Below prints and iterates over each possible out come from the lists
# mentioned whille the varibles stay in the same order. List ensures prints
# in the right way
print(list(itertools.product(subject, verb, ob)))
# https://www.codegrepper.com/code-examples/python/how+to+find+all+combinations+of+a+list+python
# Problem 5!! Part 1
import matplotlib.pyplot as plt # imported and given a shorter name
x, y = [1, 2, 3], [2, 4, 1] # Assigning values to varibles x and y
plt.xlabel("X axis", fontsize = 15) # Prints x label and size
plt.ylabel("Y axis", fontsize = 15) # Prints y label and size
plt.title("My first graph", fontsize = 20) # Prints title
# Learnt how to change size and label names from
# https://stackoverflow.com/questions/12444716/how-do-i-set-the-figure-title-and-axes-labels-font-size-in-matplotlib
# Some of above learnt from lectures and exra study help from uni.
# This plots the points on the graph
plt.plot(x, y)
plt.show() # This shows the graph
# Part 2
X = [] # Created empty lists to store values read from document
Y = []
a = open("test.txt", "r") # a is a variable which are the contents
for row in a: # Loops all rows in the txt file
row = row.split(" ") # splits numbers in file when it reads a space
X.append(row[0]) # First nunber is added to X
Y.append(int(row[1])) # Second number is added to Y
plt.xlabel("X axis", fontsize = 15) # Prints x label
plt.ylabel("Y axis", fontsize = 15) # Prints y label
plt.title("My second graph", fontsize = 20) # Prints title
plt.plot(X, Y) # This plots the points on the graph
plt.show() # This shows the graph
#https://www.geeksforgeeks.org/python-create-graph-from-text-file/
# Problem 6!!
# below importing relevant libraries
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv ("train.csv")
# Above imports and reads the data set
df.info() # Did this to see how many columns there are along with what data
# types are in the data set which are 3, along with being able to see which
# columns have missing dat
df["Loan_Status"].value_counts(normalize=True).plot.bar()
# Used to see the column which shows how many people got approved in a barchart
catColumns = ["Gender", "Married", "Dependents", "Education", "Self_\
Employed", "Property_Area", "Credit_History"]
for x in catColumns: # Loops over all data in each column
# Crosstab checks against another group of data I want to analyse against,
# in this case Loan_Status https://pbpython.com/pandas-crosstab.html against
# all the columns in Columns
y = pd.crosstab(df["Loan_Status"], df[x], normalize = "columns")
# https://www.journaldev.com/45109/normalize-data-in-python taught me how
# to normalize data and https://machinelearningmastery.com/rescaling-data-for-machine-learning-in-python-with-scikit-learn/#:~:text=Normalization%20refers%20to%20rescaling%20real,preparation%20of%20coefficients%20in%20regression.
# taught me what it does, makes all values inbetween 0 and 1.
print(y) # Prints output
y.plot(kind = "bar") # Plots bar chart for each column
df.boxplot(column = "ApplicantIncome", by = "Education") # Wanted to see the
# correlation between graduate income and non graduate income
numColumns = ["ApplicantIncome", "CoapplicantIncome", "LoanAmount", "Loan_Amount_Term"]
# I did above as I wanted to check if graduates earned more than non graduates
# Learnt this in lectue slides
for z in numColumns: # For loop to make a graph for each column
# for each loop until every column in numColumns has a graph
# shows column in numColumns against Loan_status
result = df.boxplot(column = z, by = "Loan_Status") # Plots graph
plt.show(result) # Shows graphs
# The graphs used in the abov loop were learnt from the lecture slides
| diaz080800/Python-programming | Week 2/Week2.py | Week2.py | py | 6,360 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.product",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotl... |
15802748190 | from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
api_patterns = [
url(r'^docs/', include_docs_urls(title='Documentation')),
url(r'^', include(('my_website.apps.youtube_download.urls', 'youtube_download'), namespace='youtube_download')),
]
urlpatterns = [
url(r'^api/auth/', include('django.contrib.auth.urls')),
url(r'^api/rest-auth/', include(('rest_auth.urls', 'youtube_download'), namespace='rest_auth')),
url(r'^api/rest-auth/registration/', include(('rest_auth.registration.urls', 'youtube_download'), namespace='rest_auth_registration')),
# Allauth
url(r'^api/accounts/', include('allauth.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/', include((api_patterns, 'youtube_download'), namespace='api')),
url(r'^api/admin/', admin.site.urls),
]
| zsoman/my-website | my_website/urls.py | urls.py | py | 902 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework.documentation.include_docs_urls",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
... |
23915032189 | from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from apps.data.models import Entry
from apps.data.forms import DataForm
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseNotFound
@login_required
def add(request):
form = None
if request.method == 'POST':
form = AddBookForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.title = request.title
instance.text = request.text
instance.save()
else:
form = AddBookForm()
ctx = {
'form': form
}
return render_to_response('data/create.html', ctx, context_instance=RequestContext(request))
@login_required
def entry(request, id):
# try:
entrie = Entry.objects.get(id=id)
ctx = {'entrie': entrie}
return render_to_response('data/entry.html', ctx, context_instance=RequestContext(request))
# except:
# return HttpResponse('<h1>Page was found</h1>')
@login_required
def create(request):
a = Entry()
form = DataForm(request.POST or None)
if form.is_valid():
form.save()
ctx = {
'form': form
}
return render_to_response('data/create.html', ctx, context_instance=RequestContext(request))
# Create your views here.
| msakhnik/just-read | apps/data/views.py | views.py | py | 1,497 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 11,
"us... |
33272837414 | import concurrent.futures
import timeit
import matplotlib.pyplot as plt
import numpy
from controller import Controller
def mainUtil():
result = []
for i in range(50):
c = Controller(300)
c.GradientDescendAlgorithm(0.000006, 1000)
result.append(c.testWhatYouHaveDone())
return result
if __name__ == '__main__':
start = timeit.default_timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
print('This takes anywhere from 5 to 12 minutes to run (depending on how powerful your machine is).\n\tGo grab some popcorn')
Q1 = executor.submit(mainUtil)
Q2 = executor.submit(mainUtil)
Q3 = executor.submit(mainUtil)
Q4 = executor.submit(mainUtil)
Q5 = executor.submit(mainUtil)
errorList = Q1.result() + Q2.result() + Q3.result() + Q4.result() + Q5.result()
print('In 250 runs you have achieved:')
print('\tMaximum Error: ', max(errorList))
print('\tMinimum Error: ', min(errorList))
print('\tAverage Error: ', numpy.average(errorList))
plt.plot(errorList, 'ro')
plt.show()
end = timeit.default_timer()
print('Time: ', (end - start) / 60) | CMihai998/Artificial-Intelligence | Lab7 - GDA/main.py | main.py | py | 1,133 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "controller.Controller",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures.ThreadPoolExecutor",
"line_number": 21,
"usage_type": "call"
},
... |
27614632298 | # coding:utf-8
from appium import webdriver
class Werdriver:
def get_driver(self):
configure = {
"platformName": "Android",
"deviceName": "PBV0216922007470",
"app": "/Users/luyunpeng/Downloads/ci_v1.5.0_2019-07-18_16-35_qa.apk",
"noReset": "true"
}
driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", configure)
return driver
if __name__ == '__main__':
test = Werdriver()
test.get_driver()
| lyp0129/study_appium | get_driver/test_driver.py | test_driver.py | py | 497 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "appium.webdriver.Remote",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 17,
"usage_type": "name"
}
] |
45364250336 | import pygame
import solveModuleNotFoundError
from Game import *
from Game.Scenes import *
from Game.Shared import *
class Breakout:
def __init__(self):
self.__lives = 5
self.__score = 0
self.__level = Level(self)
self.__level.load(0)
self.__pad = Pad((GameConstant.SCREEN_SIZE[0]/2,GameConstant.SCREEN_SIZE[1] - GameConstant.PAD_SIZE[1]),pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_PAD) , GameConstant.PAD_SIZE))
self.__balls = [
Ball((400,400) , pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_BALL) , GameConstant.BALL_SIZE) ,self)
]
pygame.init()
pygame.mixer.init()
pygame.display.set_caption("Brick Breaker")
pygame.mouse.set_visible(0)
self.__clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstant.SCREEN_SIZE , pygame.DOUBLEBUF, 32)
self.bg = pygame.transform.scale(pygame.image.load(GameConstant.BG).convert_alpha() , GameConstant.SCREEN_SIZE)
self.__scenes = (
PlayingGameScene(self),
GameOverScene(self),
HighscoreScene(self),
MenuScene(self)
)
self.__currentScene = 3
self.__sounds = (
pygame.mixer.Sound(GameConstant.SOUND_FILE_GAMEOVER),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK_LIFE),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_BRICK_SPEED),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_WALL),
pygame.mixer.Sound(GameConstant.SOUND_FILE_HIT_PAD)
)
def start(self):
while 1:
self.__clock.tick(60)
self.screen.fill((0,0,0))
self.screen.blit( self.bg, (0,0))
currentScene = self.__scenes[self.__currentScene]
currentScene.handleEvents(pygame.event.get())
currentScene.render()
pygame.display.update()
def changeScene(self , scene):
self.__currentScene = scene
def getLevel(self):
return self.__level
def getLives(self):
return self.__lives
def getScore(self):
return self.__score
def getBalls(self):
return self.__balls
def getPad(self):
return self.__pad
def playSound(self, soundClip):
sound = self.__sounds[soundClip]
sound.stop()
sound.play()
def increaseScore(self , score):
self.__score += score
def increaseLives(self):
self.__lives += 1
def reduceLives(self):
self.__lives -= 1
def reset(self):
self.__score = 0
self.__lives = 5
self.__level.reset()
self.__level.load(0)
Breakout().start()
| grapeJUICE1/Grape-Bricks | Game/Breakout.py | Breakout.py | py | 2,928 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pygame.transform.scale",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame... |
74227616828 | from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_news_sources,get_allArticles,get_headlines
from ..models import Sources, Articles
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# getting sources
business_sources = get_news_sources('business')
sports_sources = get_news_sources('sports')
technology_sources = get_news_sources('technology')
entertainment_sources = get_news_sources('entertainment')
# news_sources = get_news_sources('sources')
title = "Breaking News"
return render_template('index.html', title = title, business_sources = business_sources, sports_sources=sports_sources, technology_sources=technology_sources,entertainment_sources=entertainment_sources)
# @main.route('/articles')
# def articles():
# '''
# view article page
# '''
# articles = get_allArticles(id)
# return render_template("articles.html", id = id, articles = articles)
@main.route('/articles/<int:id>')
def articles(id):
"""_
to display news and article details
"""
articles = get_allArticles(id)
return render_template("articles.html", id = id, articles =articles)
@main.route('/headlines')
def headlines():
'''
view headline page
'''
#getting headlines
headline_id = get_headlines('id')
headline_name = get_headlines('name')
title = 'Top Headlines'
return render_template('headlines.html', title = title, headline_id= headline_id, headline_name=headline_name)
| chanaiagwata/News_API | app/main/views.py | views.py | py | 1,612 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "request.get_news_sources",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "request.get_news_sources",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "request.get_news_sources",
"line_number": 15,
"usage_type": "call"
},
{
"api_na... |
25508679765 | #!/usr/bin/env python3
import shutil
import psutil
import socket
import report_email
import time
import os
def check_disk_usage(disk):
disk_usage = shutil.disk_usage(disk)
free = (disk_usage.free / disk_usage.total) * 100
return free > 20
def check_cpu_usage():
usage = psutil.cpu_percent(1)
return usage < 80
def check_memory():
memory = psutil.virtual_memory()[1] / 10**6
return memory > 500
def check_localhost():
#print(socket.gethostbyname('localhost'))
if socket.gethostbyname('localhost') == '127.0.0.1':
return True
else:
return False
def alert(error):
sender = "automation@example.com"
receiver = "{}@example.com".format(os.environ.get('USER'))
subject = error
body = "Please check your system and resolve the issue as soon as possible."
message = report_email.generate(sender, receiver, subject, body, '')
report_email.send(message)
def main():
while True:
if not check_disk_usage('/'):
alert('Error - Available disk space is less than 20%')
if not check_cpu_usage():
alert('Error - CPU usage is over 80%')
if not check_memory():
alert('Error - Available memory is less than 500MB')
if not check_localhost():
alert('Error - localhost cannot be resolved to 127.0.0.1')
time.sleep(60)
if __name__ == "__main__":
main()
| paesgus/AutomationTI_finalproject | health_check.py | health_check.py | py | 1,330 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "shutil.disk_usage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "psutil.virtual_memory",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "socket.get... |
37379213866 | #影像命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_同一位置的序号(同一位置可能有多张,标个序号,从0开始)_年份(2021之类的)_img
#施工标签命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_年份(2021之类的)_conslabel
#分类标签命名:县(0表示西秀,1表示剑河县)_序号(在points列表中的序号,从0开始)_2021_classlabel
from osgeo import gdal,osr
import pickle
import os
import numpy
def getSRSPair(dataset):
prosrs=osr.SpatialReference()
prosrs.ImportFromWkt(dataset.GetProjection())
geosrs=prosrs.CloneGeogCS()
return prosrs,geosrs
def lonlat2geo(dataset,lon,lat):
prosrs,geosrs=getSRSPair(dataset)
ct=osr.CoordinateTransformation(geosrs,prosrs)
coords=ct.TransformPoint(lon,lat)
return coords[:2]
def geo2imagexy(dataset,x,y):
trans=dataset.GetGeoTransform()
a=numpy.array([[trans[1],trans[2]],[trans[4],trans[5]]])
b=numpy.array([x-trans[0],y-trans[3]])
result=numpy.linalg.solve(a,b)
return int(result[0]),int(result[1])
def getImageBound(dataset):
#[minx.maxx,miny,maxy]
trans=dataset.GetGeoTransform()
img_width,img_height=dataset.RasterXSize,dataset.RasterYSize
result=[min(trans[0],trans[0]+trans[1]*img_width),max(trans[0],trans[0]+trans[1]*img_width),min(trans[3],trans[3]+trans[5]*img_height),max(trans[3],trans[3]+trans[5]*img_height)]
return result
def getAllImage_tif(path):
result=[]
d=os.listdir(path)
for i in d:
if i.split('.')[-1] == 'tif':
result.append(i)
return result
def contain(bound,point):
if bound[0]<point[0]<bound[1] and bound[2]<point[1]<bound[3]:
return True
else:
return False
def clip_label_cons(county,year,path_pkl,path_src,path_dst,size=2048):
dataset=gdal.Open(path_src)
bound=getImageBound(dataset)
with open(path_pkl,'rb') as f:
points_dict=pickle.load(f)
if county==0:
points=points_dict['xixiu']
else:
points=points_dict['jianhe']
for j,point in enumerate(points,0):
x,y=lonlat2geo(dataset,point[0],point[1])
if contain(bound,(x,y)):
p=geo2imagexy(dataset,x,y)
if p[0]+size>dataset.RasterXSize or p[1]+size>dataset.RasterYSize:
continue
clip_image=dataset.ReadAsArray(p[0],p[1],size,size)
clip_image_path=path_dst+'\\'+str(county)+'_'+str(j)+'_'+str(year)+'_conslabel.tif'
clip_image_driver=gdal.GetDriverByName('GTiff')
clip_image_dataset=clip_image_driver.Create(clip_image_path,size,size,1,gdal.GDT_Float32)
clip_image_dataset.SetGeoTransform((x,dataset.GetGeoTransform()[1],0,y,0,dataset.GetGeoTransform()[5]))
clip_image_dataset.SetProjection(dataset.GetProjection())
clip_image_dataset.GetRasterBand(1).WriteArray(clip_image)
clip_image_dataset.FlushCache()
clip_image_dataset=None
def clip_label_class(county,year,path_pkl,path_src,path_dst,size=2048):
dataset=gdal.Open(path_src)
bound=getImageBound(dataset)
with open(path_pkl,'rb') as f:
points_dict=pickle.load(f)
if county==0:
points=points_dict['xixiu']
else:
points=points_dict['jianhe']
for j,point in enumerate(points,0):
x,y=lonlat2geo(dataset,point[0],point[1])
if contain(bound,(x,y)):
p=geo2imagexy(dataset,x,y)
if p[0]+size>dataset.RasterXSize or p[1]+size>dataset.RasterYSize:
continue
clip_image=dataset.ReadAsArray(p[0],p[1],size,size)
clip_image_path=path_dst+'\\'+str(county)+'_'+str(j)+'_'+str(year)+'_classlabel.tif'
clip_image_driver=gdal.GetDriverByName('GTiff')
clip_image_dataset=clip_image_driver.Create(clip_image_path,size,size,1,gdal.GDT_Float32)
clip_image_dataset.SetGeoTransform((x,dataset.GetGeoTransform()[1],0,y,0,dataset.GetGeoTransform()[5]))
clip_image_dataset.SetProjection(dataset.GetProjection())
clip_image_dataset.GetRasterBand(1).WriteArray(clip_image)
clip_image_dataset.FlushCache()
clip_image_dataset=None
if __name__=='__main__':
from matplotlib import pyplot
id_county=1
year=2021
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2021标签\label_2021.tif'
path_dst=r'H:\剑河县\2021标签裁剪\分类'
size=2048
clip_label_class(id_county,year,path_pkl,path_src,path_dst,size)
id_county=1
year=2021
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2021标签\label_2021_cons.tif'
path_dst=r'H:\剑河县\2021标签裁剪\施工'
size=2048
clip_label_cons(id_county,year,path_pkl,path_src,path_dst,size)
id_county=1
year=2020
path_pkl=r'K:\points.pkl'
path_src=r'H:\剑河县\2020标签\label_2020.tif'
path_dst=r'H:\剑河县\2020标签裁剪\施工'
size=2048
clip_label_cons(id_county,year,path_pkl,path_src,path_dst,size)
| faye0078/RS-ImgShp2Dataset | lee/clip_label.py | clip_label.py | py | 5,260 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "osgeo.osr.SpatialReference",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "osgeo.osr",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "osgeo.osr.CoordinateTransformation",
"line_number": 18,
"usage_type": "call"
},
{
"api_name"... |
1530038484 | import requests
import json
from bs4 import BeautifulSoup
def songwhip_it(url):
html = requests.get('https://songwhip.com/'+url).content
soup = BeautifulSoup(html, 'html.parser')
links_text = list(soup.findAll('script'))[2].get_text()
links_json = json.loads(links_text[links_text.index('{'):-1])['links']
return links_json
songwhip_it("https://open.spotify.com/track/4Aep3WGBQlpbKXkW7kfqcU")
| kartikye/q | linker.py | linker.py | py | 415 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
}
] |
28493662472 | ###### Librerias ######
import tkinter as tk
import Widgets as Wd
import Ecuaciones as Ec
import time as tm
import threading as hilos
import numpy as np
###### Modulos De Librerias ######
import tkinter.ttk as ttk
import tkinter.messagebox as MsB
import serial
import serial.tools.list_ports
import matplotlib.pyplot as plt
###### SubModulos De Librerias ######
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
board =serial.Serial(port='COM1', baudrate=9600)
tm.sleep(1)
board2 =serial.Serial(port='COM4', baudrate=9600)
tm.sleep(1)
def Show_Sliders(event): #Función Para Mostrar Sliders
Alter_Sliders('T', Pl_x.get())
Datos_Temp(0,0,0,1)
Wd.Aparecer([Pl_x, Pl_y, Pl_z, P_xi, P_yi, P_zi, P_x, P_y, P_z, P_inicial, P_final],
[1/16+0.025, 1/16+0.025, 1/16+0.025, 0, 0, 0, 1/16+0.01, 1/16+0.01, 1/16+0.01, 0, 2/16],
[1/6, 3/6-0.07, 0.693, 2/6-0.02, 3/6+0.075, 0.836, 2/6-0.02, 3/6+0.075, 0.836, 1/6-0.02, 0])
def Show_Codo(Iden, Valor): #Función Para Mostrar Codos
Wd.Aparecer([T_Codo, Despl_Codo], [4/16+0.02, 4/16+0.01], [3/7+0.02, 4/6])
def Show_Perfiles(event): #Función Para Mostrar Perfiles
Wd.Aparecer([Cuadratico, TrapezoidalI, TrapezoidalII], [6/16+0.04, 9/16+0.04, 12/16+0.04], [0, 0, 0])
def Show_Datos(): #Función Para Mostrar Los Sliders De Datos De Entrada
if Tipo.get()==1:
Wd.Ocultar([Vj_1, Vj_2, Vj_3, Aj_1, Aj_2, Aj_3, TAc_1, TAc_2, TAc_3, TVc_1, TVc_2, TVc_3])
if Tipo.get()==2:
Wd.Aparecer([Vj_1, Vj_2, Vj_3, TVc_1, TVc_2, TVc_3],
[9/16+0.04, 9/16+0.04, 9/16+0.04, 9/16+0.02, 9/16+0.02, 9/16+0.02],
[1/8+0.01, 3/8+0.04, 5/8+0.08, 1/7+0.12, 3/7+0.12, 5/7+0.12])
Wd.Ocultar([Aj_1, Aj_2, Aj_3, TAc_1, TAc_2, TAc_3])
#Calculos.Perf_Trape(T_f.get(),N_p.get(),0,0,0,1)
if Tipo.get()==3:
Wd.Aparecer([Aj_1, Aj_2, Aj_3, TAc_1, TAc_2, TAc_3],
[12/16+0.04, 12/16+0.04, 12/16+0.04, 12/16+0.02, 12/16+0.02, 12/16+0.02],
[1/8+0.01, 3/8+0.04, 5/8+0.08, 1/7+0.12, 3/7+0.12, 5/7+0.12])
Wd.Ocultar([Vj_1, Vj_2, Vj_3, TVc_1, TVc_2, TVc_3])
#Calculos.Perf_Trape(T_f.get(),N_p.get(),0,0,0,2)
Wd.Aparecer([T_f, N_p, TT_f, TN_p, Calcular_PT],
[6/16+0.04, 6/16+0.04, 6/16+0.02, 6/16+0.02, 6/16+0.04],
[1/8+0.01, 3/8+0.04, 1/7+0.012, 3/7+0.012, 6/8])
def Show_Graficas(Iden, Valor):
print(Valor)
bands=0
bandr=0
def Datos_Temp(xtemp, ytemp, ztemp, RW): #Función Para Guardar Los Valores Para Nuevo Punto Inicial
global bands
global bandr
if RW==0:
selection = Despl_Mani.get()
if selection == "Scara (PRR)":
temp_xs=xtemp
temp_ys=ytemp
temp_zs=ztemp
bands=1
else:
temp_xr=xtemp
temp_yr=ytemp
temp_zr=ztemp
bandr=1
else:
selection = Despl_Mani.get()
if selection == "Scara (PRR)":
if bands==1:
P_xi.config(text=temp_xs)
P_yi.config(text=temp_ys)
P_zi.config(text=temp_zs)
else:
P_xi.config(text=345.2)
P_yi.config(text=0)
P_zi.config(text=0)
else:
if bandr==1:
P_xi.config(text=temp_xr)
P_yi.config(text=temp_yr)
P_zi.config(text=temp_zr)
else:
P_xi.config(text=197)
P_yi.config(text=0)
P_zi.config(text=95.5)
def Alter_Sliders(Ident, Valor): #Función Para Alternos Los Sliders (Scara-Antropomórfico)
if Despl_Mani.get() == "Scara (PRR)":
Pl_x['from_']=-131.5
Pl_x['to']=375.5
Pl_z['from_']=0
Pl_z['to']=19
if Ident == 'A1':
Red_Slider(['S', 'T', Pl_y, Check_S_PL, 1/4-0.025, 1/3+0.22], Valor)
else:
Pl_x['from_']=-197
Pl_x['to']=197
if Ident == 'A1':
Red_Slider(['A1', 'T', Pl_y, Check_A_PL, 1/4-0.025, 2/3+0.15], Valor)
elif Ident == 'A2':
Red_Slider(['A2', 'T', Pl_z, Check_A_PL, 1/4-0.025, 2/3+0.15], Valor)
def Mensajes(Cual): #Función Para Seleccionar Mensaje Emergente A Mostrar
if Cual=="DK":
MsB.showinfo(
"Instrucciones Cinemática Directa",
"""
Sliders: Desplazar los slider para mover las
articulaciones del brazo robótico en tiempo real
para obtener las matrices individuales y total de
la cinemática directa. \n
Cuadro de Texto: Digitar el valor que se encuentre
en el rango de funcionamiento del robot para mover
las articulaciones del brazo robótico.
Luego presionar el botón de envió y obtener las
matrices individuales y total de la cinemática directa
en tiempo real.
""")
elif Cual=="IK":
MsB.showinfo(
"Instrucciones Cinemática Inversa",
"""
Deslizar cada slider para establecer la posición
del efector final, dar click en el botón "Calcular"
y finalizar seleccionando la configuración del codo
a utilizar para mover el manipulador. \n
Se debe tener en cuenta que las opciones de los codos
únicamente están disponibles sí los valores calculados
de las articulaciones no superan los límites mecánicos
""")
def Color(Bandera, Boton, Identi): #Función Para Alternan Color De Boton
#print-->board.write
if Bandera:
Boton["bg"]="red4"
board.write(Identi.encode() +b',1\n')
else:
Boton["bg"]="lime green"
board.write(Identi.encode() +b',0\n')
def Gripper(Identi): #Función Para Abrir o Cerrar Grippers
global Estado_S
global Estado_A
global Estado_R
if Identi=='E':
Estado_S=not Estado_S
Color(Estado_S,Gp_S,'E')
elif Identi=='A':
Estado_A=not Estado_A
Color(Estado_A,Gp_A,'A')
else:
Estado_R=not Estado_R
Color(Estado_R,Gp_R,'R')
def Red_Slider(Vec, Valor): #Función Para Redefinir Los Limites de Los Sliders De Cinematica Inversa
Ident=Vec[0]
if (Ident == 'S') or (Ident == 'A2') or (Ident =='A1'):
Pes=Vec[1]
Slider=Vec[2]
Check=Vec[3]
PosX=Vec[4]
PosY=Vec[5]
if Ident =='S': #Redefinir Slider "Py_S" De Scara
if Pes == 'I':
Variable=Check_S_Valor
elif Pes == 'T':
Variable=Check_ST_Valor
Check_A_PL.place_forget()
LimitY_S=Ec.Limites_Y_S(Valor)
Slider['from_']=str(LimitY_S[0])
Slider['to']=str(LimitY_S[1])
if LimitY_S[2] == 1 :
Check.place(relx=PosX, rely=PosY)
else:
Check.place_forget()
if Variable.get(): #Evalua El Checkbox "-", Para Valores Negativos
Slider['from_']=str(float(-1)*LimitY_S[1])
Slider['to']=str(float(-1)*LimitY_S[0])
if Ident =='A1': #Redefinir Slider "Py_A" De Antropomórfico
LimitY_A=Ec.Limites_Y_A(Valor)
Slider['from_']=str(LimitY_A[1])
Slider['to']=str(LimitY_A[0])
if Ident =='A2': #Redefinir Slider "Pz_A" De Antropomórfico
if Pes == 'I':
LimitZ=Ec.Limites_Z_A(Px_A.get(), Py_A.get())
Variable=Check_A_Valor
elif Pes == 'T':
LimitZ=Ec.Limites_Z_A(Pl_x.get(), Pl_y.get())
Variable=Check_AT_Valor
Check_S_PL.place_forget()
if LimitZ[2] == 1 :
Check.place(relx=PosX, rely=PosY)
if Variable.get(): #Evalua El Checkbox "inf", Para Valores Del Limite Inferior
Slider['from_']=str(LimitZ[1][1])
Slider['to']=str(LimitZ[1][0])
else:
Slider['from_']=str(LimitZ[0][1])
Slider['to']=str(LimitZ[0][0])
else:
Check.place_forget()
Slider['from_']=str(LimitZ[1])
Slider['to']=str(LimitZ[0])
def Cambio(Ident): #Función Para Detectar El Cambio De Los CheckBox
if Ident == 'S':
Red_Slider(['S', 'I', Py_S, Check_S, 3/16, 1/2+0.01], Px_S.get())
elif Ident == 'A2':
Red_Slider(['A2', 'I', Pz_A, Check_A, 3/16, 2/3+0.18], None)
elif Ident == 'ST':
Red_Slider(['S', 'T', Pl_y, Check_S_PL, 1/4-0.025, 1/3+0.22], Pl_x.get())
elif Ident == 'AT':
Red_Slider(['A2', 'T', Pl_z, Check_A_PL, 1/4-0.025, 2/3+0.15], None)
def Cine_Directa(Vector, Valor): #Función Para Enviar y Calcular Cinemática Directa Con Los Sliders
Identi=Vector[0]
if (bool(Identi.find('E')))==False:
Matriz=Ec.Parametros(1, Qs1_S.get(), Qs2_S.get(), Qs3_S.get(), None, None, None)
Wd.Llenado(Matriz, 1, 4)
elif (bool(Identi.find('A')))==False:
Matriz=Ec.Parametros(2, Qs1_A.get(), Qs2_A.get(), Qs3_A.get(), None, None, None)
Wd.Llenado(Matriz, 5, 8)
else:
Matriz=Ec.Parametros(3, Qs1_R.get(), Qs2_R.get(), Qs3_R.get(), Qs4_R.get(), Qs5_R.get(), Qs6_R.get())
Wd.Llenado(Matriz, 9, 15)
hilos.Thread(target=Wd.Barra.Carga, args=(Vector[1],)).start()
board.write(Identi.encode()+b','+ Valor.encode()+b'\n')
print(Identi.encode()+b','+ Valor.encode()+b'\n')
board2.write(Identi.encode()+b','+ Valor.encode()+b'\r\n')
def Cajas_DK(Vector): #Función Para Boton "Enviar". Se Calcula y Envia La Cinemática Directa Con Los Cuadros de Texto
Identi=Vector[0]
Valor=Vector[1]
Enviar(Vector)
if (bool(Identi[0].find('E')))==False:
Matriz=Ec.Parametros(1, float(Valor[0].get()), float(Valor[1].get()), float(Valor[2].get()), None, None, None)
Wd.Llenado(Matriz, 1, 4)
elif (bool(Identi[0].find('A')))==False:
Matriz=Ec.Parametros(2, float(Valor[0].get()), float(Valor[1].get()), float(Valor[2].get()), None, None, None)
Wd.Llenado(Matriz, 5, 8)
else:
Matriz=Ec.Parametros(3, float(Valor[0].get()), float(Valor[1].get()), float(Valor[2].get()), float(Valor[3].get()), float(Valor[4].get()), float(Valor[5].get()))
Wd.Llenado(Matriz, 9, 15)
def Cine_Inversa(Vector): #Función Para Calcular Cinematica Inversa Del Scara
Identi=Vector[0]
Codos=Vector[1]
if Identi=='S':
Vec_IK=Ec.Calculo_Inversa(1, float(Px_S.get()), float(Py_S.get()), float(Pz_S.get()))
Codos[0].Ubicacion(1/2,1/2,tk.N)
Codos[1].Ubicacion(2/3, 1/2, tk.N)
#Inserta Valores de Variables de Juntura en La Interfaz (Codo Abajo y Codo Arriba)
q1_S.set(str(int(Vec_IK[0]/10)))
q2_S_D.set(str(int(Vec_IK[1])))
q3_S_D.set(str(int(Vec_IK[2])))
q2_S_U.set(str(int(Vec_IK[3])))
q3_S_U.set(str(int(Vec_IK[4])))
elif Identi=='A':
Vec_IK=Ec.Calculo_Inversa(2, float(Px_A.get()), float(Py_A.get()), float(Pz_A.get()))
Codos[0].Ubicacion(1/2, 1/2, tk.N)
Codos[1].Ubicacion(2/3, 1/2, tk.N)
if Vec_IK[0]<(-1):
Vec_IK[0]=360+Vec_IK[0]
#Inserta Valores de Variables de Juntura en La Interfaz (Codo Abajo y Codo Arriba)
q1_A.set(str(int(Vec_IK[0])))
q2_A_D.set(str(int(Vec_IK[1])))
q3_A_D.set(str(int(Vec_IK[2])))
q2_A_U.set(str(int(Vec_IK[3])))
q3_A_U.set(str(int(Vec_IK[4])))
#Desabilitación de Botones de Envio Cinematica Inversa
if Vec_IK[5] or Vec_IK[6]: #indar indab
if Vec_IK[6] == 1:#indab
Codos[0].place_forget()
if Vec_IK[5] == 1:#indar
Codos[1].place_forget()
MsB.showwarning("Advertencia Selección Codo","""
Una o ambas soluciones supera los limites mecanicos.
Varie el valor del punto
""")
def Enviar(Vector): #Función Donde Se Envia Los Datos
Identi=Vector[0]
Valor=Vector[1]
for i in range (0,len(Identi)):
hilos.Thread(target=Wd.Barra.Carga, args=(Vector[2],)).start()
board.write(Identi[i].encode()+Valor[i].get().encode()+b'\n')
board2.write(Identi[i].encode()+Valor[i].get().encode()+b'\r\n')
tm.sleep(3)
def Jacobians(Barra): #Función Para Mostrar Los Jacobianos
j_S=Ec.Jacobianos(1, Qs1_S.get(), Qs2_S.get(), Qs3_S.get())
j_A=Ec.Jacobianos(2, Qs1_A.get(), Qs2_A.get(), Qs3_A.get())
Matriz=[j_S[0], j_S[1], j_A[0], j_A[1]]
hilos.Thread(target=Wd.Barra.Carga, args=(Barra,)).start()
Wd.Llenado_Jaco(Matriz, 1, 4)
def elec_manipulador():#Funcion Para Elección de Manipulador
selection=Despl_Mani.get()
if selection == "Scara (PRR)":
return 1
else:
return 2
def elec_codo():#Funcion Para Elección de codo
selection=Despl_Codo.get()
if selection == "Codo Abajo":
return 1
else:
return 2
def plot_3d(pos_final_x, pos_final_y, pos_final_z):
root_3d = tk.Tk()
root_3d.wm_title("Plot 3D Efector Final")
fig = Figure(figsize=(5, 5), dpi=100)
canvas = FigureCanvasTkAgg(fig, master=root_3d)
canvas.draw()
ax = fig.add_subplot(111, projection="3d")
ax.plot(pos_final_x, pos_final_y, pos_final_z)
toolbar = NavigationToolbar2Tk(canvas, root_3d)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
tk.mainloop()
def Envio_Pl(Vectores_1, Vectores_2, Vectores_3):
paso=(T_f.get()/(N_p.get()))
if Despl_Mani.get() == "Scara (PRR)":
board.write(b'Eb,'+"{:.4f}".format(int(Vectores_1[-1])).encode()+b'\n')
board2.write(b'Eb,'+str(int(Vectores_1[-1])).encode()+b'\r\n')
Time_Prisma=int(Vectores_1[-1])*(1.8)
tm.sleep (Time_Prisma)
Restante=T_f.get()-Time_Prisma
paso=(Restante/N_p.get())
for i in range(0,int(N_p.get())):
board.write(b'Ebr,'+"{:.4f}".format(int(Vectores_2[i])).encode()+b'\n')
board2.write(b'Ebr,'+str(int(Vectores_2[i])).encode()+b'\r\n')
tm.sleep(paso/2)
board.write(b'Eab,'+"{:.4f}".format(int(Vectores_3[i])).encode()+b'\n')
board2.write(b'Eab,'+str(int(Vectores_3[i])).encode()+b'\r\n')
tm.sleep(paso/2)
else:
for i in range(0,int(N_p.get())):
board.write(b'Ab,'+"{:.4f}".format(int(Vectores_1[i])).encode()+b'\n')
board2.write(b'Ab,'+str(int(Vectores_1[i])).encode()+b'\r\n')
tm.sleep(paso/3)
board.write(b'Abr,'+"{:.4f}".format(int(Vectores_2[i])).encode()+b'\n')
board2.write(b'Abr,'+str(int(Vectores_2[i])).encode()+b'\r\n')
tm.sleep(paso/3)
board.write(b'Aab,'+"{:.4f}".format(int(Vectores_3[i])).encode()+b'\n')
board2.write(b'Aab,'+str(int(Vectores_3[i])).encode()+b'\r\n')
tm.sleep(paso/3)
def But_Perfiles(Ident):#Funcion Para Calcular La Generación de Trayectorias
mani=elec_manipulador()
codo=elec_codo()
xini=float(P_xi.cget("text"))
yini=float(P_yi.cget("text"))
zini=float(P_zi.cget("text"))
xfin=float(Pl_x.get())
yfin=float(Pl_y.get())
zfin=float(Pl_z.get())
tip=Tipo.get()
tfin=T_f.get()
resolucion=N_p.get()
if tip == 2:
variable=[Vj_1.get(), Vj_2.get(), Vj_3.get()]
elif tip == 3:
variable=[Aj_1.get(), Aj_2.get(), Aj_3.get()]
else:
variable=[0, 0, 0]
Vectores=Wd.Perfil(tip, mani, codo, tfin, xini, yini, zini, xfin, yfin, zfin, resolucion, variable)
if Vectores[0] == 1: # vel
MsB.showwarning(title="error", message="La magnitud de la velocidad supera la condición. \n Varie el los valores de la velocidad crucero ")
for i in range (0, 3):
if Vectores[2]==0:
Vj_1["from_"]=(Vectores[1])+0.1
Vj_1["to"]=(Vectores[1]*2)-0.1
if Vectores[2]==1:
Vj_2["from_"]=(Vectores[1])+0.1
Vj_2["to"]=(Vectores[1]*2)-0.1
if Vectores[2]==2:
Vj_3["from_"]=(Vectores[1])+0.1
Vj_3["to"]=(Vectores[1]*2)-0.1
Vectores=Wd.Perfil(Tipo.get(), elec_manipulador(), elec_codo(), T_f.get(), float(P_xi.cget("text")), float(P_yi.cget("text")), float(P_zi.cget("text")), float(Pl_x.get()), float(Pl_y.get()), float(Pl_z.get()), N_p.get(), [Vj_1.get(), Vj_2.get(), Vj_3.get()])
elif Vectores[0] == 2: #acel
MsB.showwarning(title="error", message="La magnitud de la aceleración supera la condición. \n Varie el los valores de la aceleración crucero")
for i in range (0, 3):
if Vectores[2]==0:
Aj_1["from_"]=(Vectores[1])+0.1
Aj_1["to"]=(Vectores[1]*4)-0.1
if Vectores[2]==1:
Aj_2["from_"]=(Vectores[1])+0.1
Aj_2["to"]=(Vectores[1]*4)-0.1
if Vectores[2]==2:
Aj_3["from_"]=(Vectores[1])+0.1
Aj_3["to"]=(Vectores[1]*4)-0.1
Vectores=Wd.Perfil(Tipo.get(), elec_manipulador(), elec_codo(), T_f.get(), float(P_xi.cget("text")), float(P_yi.cget("text")), float(P_zi.cget("text")), float(Pl_x.get()), float(Pl_y.get()), float(Pl_z.get()), N_p.get(), [Aj_1.get(), Aj_2.get(), Aj_3.get()])
else:
posx=np.empty(resolucion)
posy=np.empty(resolucion)
posz=np.empty(resolucion)
for n in range(0, resolucion):
if mani == 1:
mat=Ec.Parametros(1, Vectores[1][n], Vectores[2][n], Vectores[3][n], None, None, None)
vect_pos=Ec.Vec('C', 3, None, mat[0])
posx[n]=vect_pos[0]
posy[n]=vect_pos[1]
posz[n]=vect_pos[2]
else:
mat=Ec.Parametros(2, Vectores[1][n], Vectores[2][n], Vectores[3][n], None, None, None)
vect_pos=Ec.Vec('C', 3, None, mat[0])
posx[n]=vect_pos[0]
posy[n]=vect_pos[1]
posz[n]=vect_pos[2]
#Thread(target=envio_graf1(Vectores[1],Vectores[2],Vectores[3])).start()
Gr1=Wd.Grafica(Fr_Graf, r'Posición $q_1$', "q[°]", 0, 0)
Gr2=Wd.Grafica(Fr_Graf, r'Posición $q_2$', "q[°]", 1/3, 0)
Gr3=Wd.Grafica(Fr_Graf, r'Posición $q_3$', "q[°]", 2/3, 0)
Gr4=Wd.Grafica(Fr_Graf, r'Velocidad $w_1$', r'w$[rad/s]$', 0, 1/2)
Gr5=Wd.Grafica(Fr_Graf, r'Velocidad $w_2$', r'w$[rad/s]$', 1/3, 1/2)
Gr6=Wd.Grafica(Fr_Graf, r'Velocidad $w_3$', r'w$[rad/s]$', 2/3, 1/2)
Gr1.Linea(resolucion, int(Vectores[1][0]), int(Vectores[1][-1]), int(T_f.get()), Vectores[1])
Gr2.Linea(resolucion, int(Vectores[2][0]), int(Vectores[2][-1]), int(T_f.get()), Vectores[2])
Gr3.Linea(resolucion, int(Vectores[3][0]), int(Vectores[3][-1]), int(T_f.get()), Vectores[3])
Gr4.Linea(resolucion, 0, Vectores[4][int(resolucion/2)], int(T_f.get()), Vectores[4])
Gr5.Linea(resolucion, 0, Vectores[5][int(resolucion/2)], int(T_f.get()), Vectores[5])
Gr6.Linea(resolucion, 0, Vectores[6][int(resolucion/2)], int(T_f.get()), Vectores[6])
P_xi.config(text=Pl_x.get())
P_yi.config(text=Pl_y.get())
P_zi.config(text=Pl_z.get())
Datos_Temp(P_xi.cget("text"), P_yi.cget("text"), P_zi.cget("text"), 0)
Envio_Pl(Vectores[1], Vectores[2], Vectores[3])
plot_3d(posx, posy, posz)
#Objetos Principales
Ventana = tk.Tk()
Ventana.title('Controles de Manipuladores Roboticos')
width=Ventana.winfo_screenwidth()
height= Ventana.winfo_screenheight()
Ventana.geometry("%dx%d" % (width, height))
Panel_Pestañas = ttk.Notebook(Ventana)
Panel_Pestañas.pack(fill='both',expand='yes')
#Variables
Nombres= tk.StringVar() #Variable String Para Nombres
Nombres.set("""
Dario Delgado - 1802992 \n
Brayan Ulloa - 1802861 \n
Fernando Llanes - 1802878 \n
Karla Baron - 1803648 \n
Sebastian Niño - 1803558
""")
Reposo= tk.StringVar() #Variable String Para Mensaje Reposo
Reposo.set("Parte de reposo \r termina en reposo: \r Ti=0; Vi=0; Vf=0")
Wd.Variables_Matrices(15, 4, 4, "DK") #Variables Matrices DK
Wd.Variables_Matrices(4, 6, 3, "Jaco") #Variables Matrices Jacobianos Scara-Antropomórfico
Wd.Variables_Matrices(2, 6, 6, "JacoR") #Variables Matrices Jacobianos R
Estado_S=False
Estado_A=False
Estado_R=False
Check_S_Valor=tk.BooleanVar()
Check_A_Valor=tk.BooleanVar()
Check_ST_Valor=tk.BooleanVar()
Check_AT_Valor=tk.BooleanVar()
#Pestañas
Pestaña_Info=Wd.Pestañas(Panel_Pestañas, 'Portada')
Pestaña_Scara=Wd.Pestañas(Panel_Pestañas, 'Robot Scara (P2R)')
Pestaña_Antro3R=Wd.Pestañas(Panel_Pestañas, 'Robot Antropomórfico (3R)')
Pestaña_Antro6R=Wd.Pestañas(Panel_Pestañas, 'Robot Antropomórfico (6R)')
Pestaña_Trayectorias_Jacobiano=Wd.Pestañas(Panel_Pestañas, 'Trayectorias Por Jacobiano Inverso')
Pestaña_Jacobianos=Wd.Pestañas(Panel_Pestañas, 'Jacobiano')
Pestaña_Trayectorias=Wd.Pestañas(Panel_Pestañas, 'Planeación De Trayectorias')
#Fuentes
Fuente_12 = Wd.Fuentes("Lucida Grande", 12)
Fuente_15 = Wd.Fuentes("Lucida Grande", 15)
Fuente_25 = Wd.Fuentes("Lucida Grande", 25)
Fuente_Num = Wd.Fuentes("Palatino Linotype", 18)
Fuente_Num2 = Wd.Fuentes("Palatino Linotype", 12)
Fuente_Slider= Wd.Fuentes("Bookman Old Style", 12)
##################################Pestaña 1########################################
Fi=Wd.Frame(Pestaña_Info, 'GUI Para Controlar Manipuladores Robóticos', Fuente_12, 1, 1, 0, 0, None) #Frame
Wd.Labels(Fi, Nombres, None, None, None, None, Fuente_25, None).Ubicacion(1/2, 1/2, tk.CENTER)#Label-Nombres
#Com=Wd.Boton(Fi, 20, 5, 'COM Close', None).Ubicacion(1/2, 7/8, tk.CENTER)
#Imagenes
Logo= Wd.Imagenes('./Imagenes/LOGOUMNG.png').zoom(2) #Logo UMNG
tk.Label(Fi, image=Logo).place(relx=1/4, rely=1/2, anchor=tk.CENTER)
Icono= Wd.Imagenes('./Imagenes/icon.png').zoom(2) #Icono Robot
tk.Label(Fi, image=Icono).place(relx=3/4, rely=1/2, anchor=tk.CENTER)
##################################Pestaña 2########################################
Fr_DK_S=Wd.Frame(Pestaña_Scara, 'Cinemática Directa', Fuente_12, 1, 5/8, 0, 0, None) #Frame Cinematica Directa
Fr_IK_S=Wd.Frame(Pestaña_Scara, 'Cinemática Inversa', Fuente_12, 1, 3/8, 0, 5/8, None) #Frame Cinematica Inversa
######Cinematica Directa######
#Barra De Progreso
Ba_S=Wd.Barra(Fr_IK_S, 300, 1/6, 0.98, 0.25, tk.E)
#Sliders
Qs1_S=Wd.Slider(Fr_DK_S, 1, 19, 1, 250, 34, 'Desplazamiento Base', Fuente_Slider, Cine_Directa, ['Eb',Ba_S])
Qs1_S.Ubicacion(0,0)
Qs2_S=Wd.Slider(Fr_DK_S, -90, 90, 10, 250, 34, 'Rotación Antebrazo', Fuente_Slider, Cine_Directa, ['Ebr',Ba_S])
Qs2_S.Ubicacion(0, 1/3)
Qs3_S=Wd.Slider(Fr_DK_S, -90, 90, 10, 250, 34, 'Rotación Brazo', Fuente_Slider, Cine_Directa, ['Eab',Ba_S])
Qs3_S.Ubicacion(0, 2/3)
Qt1_S=Wd.Editables(Fr_DK_S, Fuente_Num, 3/16, 0.11)
Qt2_S=Wd.Editables(Fr_DK_S, Fuente_Num, 3/16, 1/3+0.11)
Qt3_S=Wd.Editables(Fr_DK_S, Fuente_Num, 3/16, 2/3+0.11)
Qt_S=[Qt1_S, Qt2_S, Qt3_S]
#Matrices
Wd.Matrices(Fr_DK_S, "DK", 1, 4, 4, "Link 1", 1/2, 0, Fuente_12)
Wd.Matrices(Fr_DK_S, "DK", 2, 4, 4, "Link 2", 5/6, 0, Fuente_12)
Wd.Matrices(Fr_DK_S, "DK", 3, 4, 4, "Link 3", 1/2, 1/2, Fuente_12)
Wd.Matrices(Fr_DK_S, "DK", 4, 4, 4, "Total", 5/6, 1/2, Fuente_12)
#Botones
Wd.Boton(Fr_DK_S, None, None, "Instrucciones", "LightYellow2", Mensajes, 'DK').Ubicacion(1, 1, tk.SE)
Gp_S=Wd.Boton(Fr_DK_S, 15, 3, "Griper", "lime green", Gripper, 'E')
Gp_S.Ubicacion(4/6, 0.9, tk.CENTER)
Wd.Boton(Fr_DK_S, 12, 2, "Enviar", "ivory3", Cajas_DK, [['Eb,','Ebr,','Eab,'], Qt_S, Ba_S]).Ubicacion(1/4+0.02, 0.9, tk.W)
######Cinematica Inversa######
#Sliders
Py_S=Wd.Slider(Fr_IK_S, -90, 90, 0.5, 250, 20, 'Py', Fuente_Slider, Red_Slider, ['N','N'])
Py_S.Ubicacion(0, 1/3)
Pz_S=Wd.Slider(Fr_IK_S, 0, 190, 10, 250, 20, 'Pz', Fuente_Slider, Red_Slider, ['N','N'])
Pz_S.Ubicacion(0, 2/3)
Check_S=Wd.Check(Fr_IK_S, '-', 3/16, 1/3+0.18, Cambio, 'S', Check_S_Valor)
Px_S=Wd.Slider(Fr_IK_S, -101.5, 345, 0.5, 250, 20, 'Px', Fuente_Slider, Red_Slider, ['S', 'I', Py_S, Check_S, 3/16, 1/2+0.01])
Px_S.Ubicacion(0, 0)
#Codo Abajo
Co_D_S=Wd.Frame(Fr_IK_S, "Codo Abajo", Fuente_12, 1/10, 1/2, 1/2, 0, tk.N)
q1_S=tk.StringVar()
q2_S_D=tk.StringVar()
q3_S_D=tk.StringVar()
qs_S_D=[q1_S, q2_S_D, q3_S_D]
Wd.Labels(Co_D_S, None, "d₁", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 0, tk.NW)
Wd.Labels(Co_D_S, q1_S, None, None, None, None, Fuente_15, "white").Ubicacion(1, 0, tk.NE)
Wd.Labels(Co_D_S, None, "θ₂", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 1/3, tk.NW)
Wd.Labels(Co_D_S, q2_S_D, None, None, None, None, Fuente_15, "white").Ubicacion(1, 1/3, tk.NE)
Wd.Labels(Co_D_S, None, "θ₃", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 2/3, tk.NW)
Wd.Labels(Co_D_S, q3_S_D, None, None, None, None, Fuente_15, "white").Ubicacion(1, 2/3, tk.NE)
#Codo Arriba
Co_U_S=Wd.Frame(Fr_IK_S, "Codo Arriba", Fuente_12, 1/10, 1/2, 2/3, 0, tk.N)
q2_S_U=tk.StringVar()
q3_S_U=tk.StringVar()
qs_S_U=[q1_S, q2_S_U, q3_S_U]
Wd.Labels(Co_U_S, None, "d₁", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 0, tk.NW)
Wd.Labels(Co_U_S, q1_S, None, None, None, None, Fuente_15, "white").Ubicacion(1, 0, tk.NE)
Wd.Labels(Co_U_S, None, "θ₂", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 1/3, tk.NW)
Wd.Labels(Co_U_S, q2_S_U, None, None, None, None, Fuente_15, "white").Ubicacion(1, 1/3, tk.NE)
Wd.Labels(Co_U_S, None, "θ₃", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 2/3, tk.NW)
Wd.Labels(Co_U_S, q3_S_U, None, None, None, None, Fuente_15, "white").Ubicacion(1, 2/3, tk.NE)
#Botones
Wd.Boton(Fr_IK_S, None, None, "Instrucciones", "LightYellow2", Mensajes, 'IK').Ubicacion(1, 1, tk.SE)
CodoD_S=Wd.Boton(Fr_IK_S, 12, 2, "Codo Abajo", "ivory3", Enviar, [['Eb,','Ebr,','Eab,'], qs_S_D, Ba_S])
CodoU_S=Wd.Boton(Fr_IK_S, 12, 2, "Codo Arriba", "ivory3", Enviar, [['Eb,','Ebr,','Eab,'], qs_S_U, Ba_S])
Wd.Boton(Fr_IK_S, 12, 8, "Calcular", "dim gray", Cine_Inversa, ['S', [CodoD_S, CodoU_S]]).Ubicacion(1/4+0.02, 1/2, tk.W)
##################################Pestaña 3########################################
Fr_DK_A=Wd.Frame(Pestaña_Antro3R, 'Cinemática Directa', Fuente_12, 1, 5/8, 0, 0, None) #Frame Cinematica Directa
Fr_IK_A=Wd.Frame(Pestaña_Antro3R, 'Cinemática Inversa', Fuente_12, 1, 3/8, 0, 5/8, None) #Frame Cinematica Inversa
######Cinematica Directa######
#Barra De Progreso
Ba_A=Wd.Barra(Fr_IK_A, 300, 1/6, 0.98, 0.25, tk.E)
#Sliders
Qs1_A=Wd.Slider(Fr_DK_A, 0, 360, 10, 250, 34, 'Rotación Base', Fuente_Slider, Cine_Directa, ['Ab',Ba_A])
Qs1_A.Ubicacion(0, 0)
Qs2_A=Wd.Slider(Fr_DK_A, -90, 90, 10, 250, 34, 'Rotación Brazo', Fuente_Slider, Cine_Directa, ['Aab',Ba_A])
Qs2_A.Ubicacion(0, 2/3)
Qs3_A=Wd.Slider(Fr_DK_A, 0, 180, 10, 250, 34, 'Rotación Antebrazo', Fuente_Slider, Cine_Directa, ['Abr',Ba_A])
Qs3_A.Ubicacion(0, 1/3)
Qt1_A=Wd.Editables(Fr_DK_A,Fuente_Num, 3/16, 0.11)
Qt2_A=Wd.Editables(Fr_DK_A,Fuente_Num, 3/16, 1/3+0.11)
Qt3_A=Wd.Editables(Fr_DK_A,Fuente_Num, 3/16, 2/3+0.11)
Qt_A=[Qt1_A, Qt2_A, Qt3_A]
#Matrices
Wd.Matrices(Fr_DK_A, "DK", 5, 4, 4, "Link 1", 1/2, 0, Fuente_12)
Wd.Matrices(Fr_DK_A, "DK", 6, 4, 4, "Link 2", 5/6, 0, Fuente_12)
Wd.Matrices(Fr_DK_A, "DK", 7, 4, 4, "Link 3", 1/2, 1/2, Fuente_12)
Wd.Matrices(Fr_DK_A, "DK", 8, 4, 4, "Total", 5/6, 1/2, Fuente_12)
#Botones
Wd.Boton(Fr_DK_A, None, None, "Instrucciones", "LightYellow2", Mensajes, 'DK').Ubicacion(1, 1, tk.SE)
Gp_A=Wd.Boton(Fr_DK_A, 15, 3, "Griper", "lime green", Gripper, 'A')
Gp_A.Ubicacion(4/6, 0.9, tk.CENTER)
Wd.Boton(Fr_DK_A, 12, 2, "Enviar", "ivory3", Cajas_DK, [['Ab,','Abr,','Aab,'], Qt_A, Ba_A]).Ubicacion(1/4+0.02, 0.9, tk.W)
######Cinematica Inversa######
#Sliders
Pz_A=Wd.Slider(Fr_IK_A, None, None, 0.5, 250, 20, 'Pz', Fuente_Slider, Red_Slider, ['N','N'])
Pz_A.Ubicacion(0, 2/3)
Check_A=Wd.Check(Fr_IK_A, 'Inf', 3/16, 2/3+0.18, Cambio, 'A2', Check_A_Valor)
Py_A=Wd.Slider(Fr_IK_A, None, None, 0.5, 250, 20, 'Py', Fuente_Slider, Red_Slider, ['A2', 'I', Pz_A, Check_A, 3/16, 2/3+0.18])
Py_A.Ubicacion(0, 1/3)
Px_A=Wd.Slider(Fr_IK_A, -197, 197, 0.5, 250, 20, 'Px', Fuente_Slider, Red_Slider, ['A1', 'I', Py_A, None, None, None])
Px_A.Ubicacion(0, 0)
#Codo Abajo
Co_D_A=Wd.Frame(Fr_IK_A, "Codo Abajo", Fuente_12, 1/10, 1/2, 1/2, 0, tk.N)
q1_A=tk.StringVar()
q2_A_D=tk.StringVar()
q3_A_D=tk.StringVar()
qs_A_D=[q1_A, q2_A_D, q3_A_D]
Wd.Labels(Co_D_A, None, "θ₁", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 0, tk.NW)
Wd.Labels(Co_D_A, q1_A, None, None, None, None, Fuente_15, "white").Ubicacion(1, 0, tk.NE)
Wd.Labels(Co_D_A, None, "θ₂", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 1/3, tk.NW)
Wd.Labels(Co_D_A, q2_A_D, None, None, None, None, Fuente_15, "white").Ubicacion(1, 1/3, tk.NE)
Wd.Labels(Co_D_A, None, "θ₃", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 2/3, tk.NW)
Wd.Labels(Co_D_A, q3_A_D, None, None, None, None, Fuente_15, "white").Ubicacion(1, 2/3, tk.NE)
#Codo Arriba
Co_U_A=Wd.Frame(Fr_IK_A, "Codo Arriba", Fuente_12, 1/10, 1/2, 2/3, 0, tk.N)
q2_A_U=tk.StringVar()
q3_A_U=tk.StringVar()
qs_A_U=[q1_A, q2_A_U, q3_A_U]
Wd.Labels(Co_U_A, None, "θ₁", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 0, tk.NW)
Wd.Labels(Co_U_A, q1_A, None, None, None, None, Fuente_15, "white").Ubicacion(1, 0, tk.NE)
Wd.Labels(Co_U_A, None, "θ₂", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 1/3, tk.NW)
Wd.Labels(Co_U_A, q2_A_U, None, None, None, None, Fuente_15, "white").Ubicacion(1, 1/3, tk.NE)
Wd.Labels(Co_U_A, None, "θ₃", None, None, None, Fuente_15, "sandy brown").Ubicacion(0, 2/3, tk.NW)
Wd.Labels(Co_U_A, q3_A_U, None, None, None, None, Fuente_15, "white").Ubicacion(1, 2/3, tk.NE)
#Botones
Wd.Boton(Fr_IK_A, None, None, "Instrucciones", "LightYellow2", Mensajes, 'IK').Ubicacion(1, 1, tk.SE)
CodoD_A=Wd.Boton(Fr_IK_A, 12, 2, "Codo Abajo", "ivory3", Enviar, [['Ab,','Abr,','Aab,'], qs_A_D, Ba_A])
CodoU_A=Wd.Boton(Fr_IK_A, 12, 2, "Codo Arriba", "ivory3", Enviar, [['Ab,','Abr,','Aab,'], qs_A_U, Ba_A])
Wd.Boton(Fr_IK_A, 12, 8, "Calcular", "dim gray", Cine_Inversa, ['A', [CodoD_A, CodoU_A]]).Ubicacion(1/4+0.02, 1/2, tk.W)
##################################Pestaña 4########################################
#Desplegable
Despl_R=Wd.Desplegable(Pestaña_Antro6R, ["Cinemática Directa", "Cinemática Inversa"])
Despl_R.Ubicacion(0, 0)
Despl_R.bind("<<ComboboxSelected>>",Despl_R.Cambio)
Fr_DK_R=Despl_R.Frame_DK
Fr_IK_R=Despl_R.Frame_IK
#####Cinematica Directa######
#Barra De Progreso
Ba_R=Wd.Barra(Fr_DK_R, 200, 1/15, 0.98, 3/4, tk.NE)
#Sliders
Qs1_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Primera Base', Fuente_Slider, Cine_Directa, ['Rb1',Ba_R])
Qs1_R.Ubicacion(0, 0)
Qs2_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Primer Brazo', Fuente_Slider, Cine_Directa, ['Rbr1',Ba_R])
Qs2_R.Ubicacion(0, 1/6)
Qs3_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Segundo Brazo', Fuente_Slider, Cine_Directa, ['Rbr2',Ba_R])
Qs3_R.Ubicacion(0, 2/6)
Qs4_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Segunda Base', Fuente_Slider, Cine_Directa, ['Rb2',Ba_R])
Qs4_R.Ubicacion(0, 3/6)
Qs5_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Antebrazo', Fuente_Slider, Cine_Directa, ['Rab',Ba_R])
Qs5_R.Ubicacion(0, 4/6)
Qs6_R=Wd.Slider(Fr_DK_R,0, 360, 0.5, 250, 34, 'Rotación Muñeca', Fuente_Slider, Cine_Directa, ['Rm',Ba_R])
Qs6_R.Ubicacion(0, 5/6)
Qt1_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 1/18+0.014)
Qt2_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 4/18+0.014)
Qt3_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 7/18+0.014)
Qt4_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 10/18+0.014)
Qt5_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 13/18+0.014)
Qt6_R=Wd.Editables(Fr_DK_R, Fuente_Num, 3/16, 16/18+0.014)
Qt_R=[Qt1_R, Qt2_R, Qt3_R, Qt4_R, Qt5_R, Qt6_R]
#Matrices
Wd.Matrices(Fr_DK_R, "DK", 9, 4, 4, "Link 1", 1/2, 0, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 10, 4, 4, "Link 2", 5/6, 0, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 11, 4, 4, "Link 3", 1/2, 1/4, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 12, 4, 4, "Link 4", 5/6, 1/4, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 13, 4, 4, "Link 5", 1/2, 2/4, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 14, 4, 4, "Link 6", 5/6, 2/4, Fuente_12)
Wd.Matrices(Fr_DK_R, "DK", 15, 4, 4, "Total", 2/3, 3/4, Fuente_12)
#Botones
Wd.Boton(Fr_DK_R, None, None, "Instrucciones", "LightYellow2", Mensajes, 'DK').Ubicacion(1, 1, tk.SE)
Gp_R=Wd.Boton(Fr_DK_R, 15, 3, "Griper", "lime green", Gripper, 'R')
Gp_R.Ubicacion(7/16, 3/4+0.1, tk.N)
Wd.Boton(Fr_DK_R, 12, 2, "Enviar", "ivory3", Cajas_DK, [['Rb1,','Rbr1,','Rbr2,','Rb2,','Rab,','Rm,'], Qt_R, Ba_R]).Ubicacion(7/16, 3/4, tk.N)
######Cinematica Inversa######
#Sliders
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Px', Fuente_Slider, None, None).Ubicacion(0, 0)
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Py', Fuente_Slider, None, None).Ubicacion(0, 1/6)
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Pz', Fuente_Slider, None, None).Ubicacion(0, 2/6)
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Alfa', Fuente_Slider, None, None).Ubicacion(0, 3/6)
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Beta', Fuente_Slider, None, None).Ubicacion(0, 4/6)
# Wd.Slider(Fr_IK_R, -200, 200, 0.5, 250, 34, 'Gamma', Fuente_Slider, None, None).Ubicacion(0, 5/6)
#Botones
Wd.Boton(Fr_IK_R, None, None, "Instrucciones", "LightYellow2", Mensajes, 'IK').Ubicacion(1, 1, tk.SE)
##################################Pestaña 5########################################
Fr_T_J=Wd.Frame(Pestaña_Trayectorias_Jacobiano, 'Planificación de Trayectorias Por Jacobiano Inverso', Fuente_12, 1, 1, 0, 0, None) #Frame Jacobiano
##################################Pestaña 6########################################
Fr_J=Wd.Frame(Pestaña_Jacobianos, 'Jacobianos', Fuente_12, 1, 1, 0, 0, None) #Frame Jacobiano
#Barra De Progreso
Ba_J=Wd.Barra(Fr_J, 300, 1/15, 1/2, 1/3, tk.N)
#Matrices
Wd.Matrices(Fr_J, "Jaco", 1, 6, 3, "Jacobiano Scara Geométrico", 1/4, 0, Fuente_12)
Wd.Matrices(Fr_J, "Jaco", 2, 6, 3, "Jacobiano Scara Analítico", 3/4, 0, Fuente_12)
Wd.Matrices(Fr_J, "Jaco", 3, 6, 3, "Jacobiano Antropomórfico Geométrico", 1/4, 1/3, Fuente_12)
Wd.Matrices(Fr_J, "Jaco", 4, 6, 3, "Jacobiano Antropomórfico Analítico", 3/4, 1/3, Fuente_12)
Wd.Matrices(Fr_J, "JacoR", 1, 6, 6, "Jacobiano Antropomórfico 6R Geométrico", 1/4, 2/3, Fuente_12)
Wd.Matrices(Fr_J, "JacoR", 2, 6, 6, "Jacobiano Antropomórfico 6R Analítico", 3/4, 2/3, Fuente_12)
#Botones
#Wd.Boton(Fr_J, None, None, "Instrucciones", "LightYellow2").Ubicacion(1, 1, tk.SE)
Wd.Boton(Fr_J, 15, 3, "Mostrar", "dim gray", Jacobians, Ba_J).Ubicacion(1/2, 1/2, tk.N)
##################################Pestaña 7########################################
Fr_T=Wd.Frame(Pestaña_Trayectorias, 'Datos de Entrada', Fuente_12, 1, 1/4, 0, 0, None) #Frame Datos Trayectorias
#Desplegables
Despl_Mani=Wd.Desplegable(Fr_T, ["Scara (PRR)", "Antropomórfico (RRR)"])
Despl_Mani.Ubicacion(0, 0)
Despl_Mani.bind("<<ComboboxSelected>>",Show_Sliders)
Despl_Codo=Wd.Desplegable(Fr_T, ["Codo Arriba", "Codo Abajo"])
Despl_Codo.bind("<<ComboboxSelected>>",Show_Perfiles)
#Label Información Importante (Parte de Reposo)
Wd.Labels(Fr_T, Reposo, None, 1, "solid", None, Fuente_15, None).Ubicacion(4/16, 0, None)
#Puntos Iniciales-Finales
#Labels
P_xi=Wd.Labels(Fr_T, None, "0", 1, "solid", 12, None, None)
P_yi=Wd.Labels(Fr_T, None, "0", 1, "solid", 12, None, None)
P_zi=Wd.Labels(Fr_T, None, "0", 1, "solid", 12, None, None)
P_x= Wd.Labels(Fr_T, None, "Px", None, None, None, None, None)
P_y= Wd.Labels(Fr_T, None, "Py", None, None, None, None, None)
P_z= Wd.Labels(Fr_T, None, "Pz", None, None, None, None, None)
#Buttons
Tipo=tk.IntVar()
Cuadratico=Wd.Radio(Fr_T, "Perfil Cuadrático", Fuente_12, 1, Tipo, 15, Show_Datos)
TrapezoidalI=Wd.Radio(Fr_T, "Perfil Trapezoidal I", Fuente_12, 2, Tipo, 15, Show_Datos)
TrapezoidalII=Wd.Radio(Fr_T, "Perfil Trapezoidal II", Fuente_12, 3, Tipo, 15, Show_Datos)
Calcular_PT=Wd.Boton(Fr_T, 12, None, "Calcular", "dim gray", But_Perfiles, None)
#Wd.Boton(Fr_T, None, None, "Instrucciones", "LightYellow2").Ubicacion(1, 1, tk.SE)
#Barra De Progreso
Br_Pl=Wd.Barra(Fr_T, 150, 1/8, 5/16, 1, tk.S)
#Sliders
Check_S_PL=Wd.Check(Fr_T, '-', 1/4-0.025, 1/3+0.22, Cambio, 'ST', Check_ST_Valor)
Check_A_PL=Wd.Check(Fr_T, 'Inf', 1/4-0.025, 2/3+0.15, Cambio, 'AT', Check_AT_Valor)
Pl_x=Wd.Slider(Fr_T, None, None, 0.5, 180, 20, None, None, Alter_Sliders, 'A1')
Pl_y=Wd.Slider(Fr_T, None, None, 0.5, 180, 20, None, None, Alter_Sliders, 'A2')
Pl_z=Wd.Slider(Fr_T, None, None, 0.5, 180, 20, None, None, Show_Codo, None)
T_f=Wd.Slider(Fr_T, 15, 40, 1, 180, 20, None, None, Show_Graficas, None)
N_p=Wd.Slider(Fr_T, 10, 100, 10, 180, 20, None, None, Show_Graficas, None)
Vj_1=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
Vj_2=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
Vj_3=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
Aj_1=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
Aj_2=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
Aj_3=Wd.Slider(Fr_T, None, None, 0.2, 180, 20, None, None, Show_Graficas, None)
# #Titulos
P_inicial=Wd.Labels(Fr_T, None, "Puntos Iniciales", None, None, 12, Fuente_Num2, None)
P_final=Wd.Labels(Fr_T, None, "Puntos Finales", None, None, 12, Fuente_Num2, None)
T_Codo=Wd.Labels(Fr_T, None, "Elección Codo", None, None, 12, Fuente_Num2, None)
TT_f=Wd.Labels(Fr_T, None, "Tf", None, None, None, Fuente_Num2, None)
TN_p=Wd.Labels(Fr_T, None, "Np", None, None, None, Fuente_Num2, None)
TVc_1=Wd.Labels(Fr_T, None, "Vc1", None, None, None, Fuente_Num2, None)
TVc_2=Wd.Labels(Fr_T, None, "Vc2", None, None, None, Fuente_Num2, None)
TVc_3=Wd.Labels(Fr_T, None, "Vc3", None, None, None, Fuente_Num2, None)
TAc_1=Wd.Labels(Fr_T, None, "Ac1", None, None, None, Fuente_Num2, None)
TAc_2=Wd.Labels(Fr_T, None, "Ac2", None, None, None, Fuente_Num2, None)
TAc_3=Wd.Labels(Fr_T, None, "Ac3", None, None, None, Fuente_Num2, None)
Fr_Graf=Wd.Frame(Pestaña_Trayectorias, 'Gráficas', Fuente_12, 1, 3/4, 0, 1/4, None) #Frame Graficas
#Ventana.attributes('-fullscreen',True)
Ventana.mainloop() | daridel99/UMNG-robotica | Interfaz.py | Interfaz.py | py | 39,199 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number"... |
5449785498 | from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.utils import np_utils
(xtrain,ytrain),(xtest,ytest) = cifar10.load_data()
print('xtrain.shape',xtrain.shape)
print('ytrain.shape',ytrain.shape)
print('ytest.shape',ytest.shape)
print('xtest.shape',xtest.shape)
batchsize=200
cats = 10
nepoch = 100
xtrain = xtrain.reshape(50000,3072)
xtest = xtest.reshape(10000,3072)
xtrain = xtrain/255
xtest = xtest/255
ytrain = np_utils.to_categorical(ytrain,cats)
ytest = np_utils.to_categorical(ytest,cats)
model = Sequential()
model.add(Dense(units=10,input_shape=(3072,),activation='softmax',kernel_initializer='normal'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.05), metrics=['accuracy'])
model.summary()
history = model.fit(xtrain, ytrain, nb_epoch=nepoch, batch_size=batchsize, verbose=1)
# Evaluate
evaluation = model.evaluate(xtest, ytest, verbose=1)
print('Summary: Loss over the test dataset: %.2f, Accuracy: %.2f' % (evaluation[0], evaluation[1])) | daftengineer/kerasSagemaker | test.py | test.py | py | 1,101 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.datasets.cifar10.load_data",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "keras.datasets.cifar10",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "keras.utils.np_utils.to_categorical",
"line_number": 20,
"usage_type": "call"
},
... |
20908637143 | from python_app_configs import config
from python_generic_modules import se_os
from python_generic_modules import se_docker
import re
import os
import glob
import time
import jinja2
template1 = jinja2.Template("{% for i in range(0,last_num)%}zookeepernode{{ i }}.{{ domain }}:2181{% if not loop.last %},{% endif %}{% endfor %}")
zookeeper_nodes = template1.render(last_num=config.zookeeper_nodes,domain=config.domain_name)
def setup_kafka_dirs():
print('Setting up kafka bind_mount directories...\n')
os.chdir(config.dest_dir)
dir_glob = 'kafka' + '*'
dir_lst = glob.glob(dir_glob)
for i in dir_lst:
se_os.del_dir(str(i))
src_dir_path = os.path.join(config.data_dir,'kafka_conf')
for i in range(0,config.kafka_nodes):
dest_path_new = os.path.join(config.dest_dir,'kafkanode'+str(i))
se_os.copy_dir(src_dir_path,dest_path_new)
print('bind_mount directories setup compelete\n')
def config_kafka(i):
src_file_path = os.path.join(config.data_dir,'kafka_conf','server.properties')
dest_file_path = os.path.join(config.dest_dir,'kafkanode'+str(i),'server.properties')
param1 = re.compile(r'(.*)(broker.id)(.*)')
param2 = re.compile(r'(.*)(num.partitions)(.*)')
param3 = re.compile(r'(.*)(zookeeper.connect=)(.*)')
with open(src_file_path,mode='r') as file1:
with open(dest_file_path,mode='w') as file2:
for line in file1:
if param1.search(line):
line = param1.sub(r'\1\2{}'.format('='+str(i)), line)
file2.write(line)
continue
elif param2.search(line):
line = param2.sub(r'\1\2{}'.format('='+str(str(config.kafka_default_partitions))), line)
file2.write(line)
continue
elif param3.search(line):
line = param3.sub(r'\1\2{}'.format(zookeeper_nodes), line)
file2.write(line)
continue
else:
file2.write(line)
def launch_kafka():
print('\n====Running kafka_setup module====\n')
setup_kafka_dirs()
time.sleep(3)
print('\n====Running kafka_config module====\n')
for i in range(0,config.kafka_nodes):
print("Updating configs for node 'kafkanode{}'\n".format(i))
config_kafka(i)
time.sleep(3)
print('\n====Creating SE_Platform Network if not already created====\n')
hadoop_net = config.hadoop_network_range + '/24'
lst = config.hadoop_network_range.split('.')
lst[3]='1'
hadoop_gateway = '.'.join(lst)
se_docker.create_network('hadoopnet',hadoop_net,hadoop_gateway)
print('\n====Launching containers and attaching bind mounts====\n')
for i in range(0,config.kafka_nodes):
se_docker.launch_containers('kmahesh2611/kafka','/kafka_2.11-2.1.0/bin/kafka-server-start.sh /kafka_2.11-2.1.0/config/server.properties','kafkanode' + str(i) + '.' + config.domain_name,'kafkanode' +str(i) + '.' + config.domain_name,{os.path.join(config.dest_dir,'kafkanode'+str(i)):{'bind':'/kafka_2.11-2.1.0/config','mode':'rw'}},'hadoopnet',True,True)
print('Wait for 10 seconds....')
time.sleep(10)
print('\n====Verify if containers are running====\n')
num = 0
for i in se_docker.get_all_containers():
if 'kafkanode' in i.name:
num = num + 1
if 'running' in i.status:
print('{} : {}'.format(i.name,i.status))
else:
print('Error: Container "{}" is in status "{}"\n'.format(i.name,i.status))
print('Exiting script\n')
sys.exit(1)
if num == 0:
print('No container found starting with name "kafkanode"')
print('Exiting script\n')
sys.exit(1)
### Creating Kafka topics ###
print('\n====Creating Kafka Topics====\n')
for i in config.kafka_topics:
print(se_docker.exec_command('kafkanode0' + '.' + config.domain_name,"/kafka_2.11-2.1.0/bin/kafka-topics.sh --create --zookeeper {} --replication-factor {} --partitions {} --topic {}".format(zookeeper_nodes,str(config.kafka_nodes),str(config.kafka_default_partitions),i)))
print("Created topics: {}\n".format([topics for topics in config.kafka_topics]))
def del_kafka_containers():
print('\n====Stopping and deleting Containers for kafka====\n')
for i in se_docker.get_all_containers():
if 'kafkanode' in i.name:
print('Stopping and deleting Container: {}\n'.format(i.name))
i.remove(force=True)
| karthikmahesh2611/docker_hadoop | python_hadoop_modules/kafka.py | kafka.py | py | 4,120 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jinja2.Template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "python_app_configs.config.zookeeper_nodes",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "python_app_configs.config",
"line_number": 11,
"usage_type": "name"
},
... |
74987096188 | from deep_rl_for_swarms.common import explained_variance, zipsame, dataset
from deep_rl_for_swarms.common import logger
import deep_rl_for_swarms.common.tf_util as U
import tensorflow as tf, numpy as np
import time
import os
from deep_rl_for_swarms.common import colorize
from mpi4py import MPI
from collections import deque
from deep_rl_for_swarms.common.mpi_adam import MpiAdam
from deep_rl_for_swarms.common.cg import cg
from contextlib import contextmanager
from deep_rl_for_swarms.common.act_wrapper import ActWrapper
import sys
from gym import spaces
import matplotlib.pyplot as plt
def traj_segment_generator(pi, env, horizon, stochastic):
# Initialize state variables
t = 0
n_agents = env.nr_agents
new = True
ob = env.reset()
cur_ep_ret = 0
cur_ep_len = 0
ep_rets = []
ep_lens = []
time_steps = []
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros([horizon, n_agents], 'float32')
vpreds = np.zeros([horizon, n_agents], 'float32')
news = np.zeros([horizon, n_agents], 'int32')
if isinstance(env.action_space, spaces.Box):
ac = np.vstack([env.action_space.sample() for _ in range(n_agents)]) # Used only to initialize vectors!!
acs = np.array([ac for _ in range(horizon)])
elif isinstance(env.action_space, spaces.Discrete):
ac = np.array([env.action_space.sample() for _ in range(n_agents)])
acs = np.zeros([horizon, n_agents], 'int32') # For discrete actions
else:
raise NotImplementedError
prevacs = acs.copy()
time = np.zeros(horizon, 'float32') # To store the time of acting
# Info to be saved in the logger
keys_to_save = ['attackers_caught', 'attackers_not_caught', 'mean_total_rwd', 'total_rwd']
if env.attack_mode == 'phy':
keys_to_save.extend(['phy_fc_error_rate'])
if env.attack_mode == 'mac':
keys_to_save.extend(['total_mac_tx', 'total_mac_col', 'total_bits_tx', 'prop_t_tx', 'mean_prop_bits_tx_at',
'mean_prop_bits_tx_no'])
info_indiv = []
while True:
prevac = ac
ac, vpred = pi.act(stochastic, np.vstack(ob))
if isinstance(env.action_space, spaces.Box):
ac = np.clip(ac, env.action_space.low, env.action_space.high) # To ensure actions are in the right limit!
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
info_total = {}
for key in keys_to_save:
aux = 0
for i in range(len(info_indiv)):
aux += info_indiv[i][key]
info_total[key] = aux / len(info_indiv)
if isinstance(env.action_space, spaces.Box):
yield [
dict(
ob=np.array(obs[:, na, :]),
rew=np.array(rews[:, na]),
vpred=np.array(vpreds[:, na]),
new=np.array(news[:, na]),
ac=np.array(acs[:, na, :]),
prevac=np.array(prevacs[:, na, :]),
nextvpred=vpred[na] * (1 - new),
ep_rets=[epr[na] for epr in ep_rets],
ep_lens=ep_lens,
time_steps=np.array(time_steps),
time=time,
) for na in range(n_agents)
], info_total
elif isinstance(env.action_space, spaces.Discrete):
yield [
dict(
ob=np.array(obs[:, na, :]),
rew=np.array(rews[:, na]),
vpred=np.array(vpreds[:, na]),
new=np.array(news[:, na]),
ac=np.array(acs[:, na]),
prevac=np.array(prevacs[:, na]),
nextvpred=vpred[na] * (1 - new),
ep_rets=[epr[na] for epr in ep_rets],
ep_lens=ep_lens,
time_steps=np.array(time_steps),
time=time
) for na in range(n_agents)
], info_total
else:
raise NotImplementedError
_, vpred = pi.act(stochastic, ob)
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
time_steps = []
info_indiv = []
i = t % horizon
time_steps.append(t)
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
if env.attack_mode == 'mac':
time[i] = sum(env.t_counter)
elif env.attack_mode == 'phy':
time[i] = env.timestep
else:
raise RuntimeError('Environment not recognized')
ob, rew, new, info = env.step(ac)
rews[i] = rew
#mask_undetected[i] = np.logical_not(env.banned[0:env.nr_agents])
cur_ep_ret += rew
cur_ep_len += 1
if new:
info_indiv.append(info)
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
sys.stdout.write('\r Current horizon length = ' + str((t + 1) % horizon) + '/' + str(horizon))
sys.stdout.flush()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
new = [np.append(p["new"], 0) for p in seg] # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = [np.append(p["vpred"], p["nextvpred"]) for p in seg]
for i, p in enumerate(seg):
T = len(p["rew"])
p["adv"] = gaelam = np.empty(T, 'float32')
rew = p["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - new[i][t + 1]
delta = rew[t] + gamma * vpred[i][t + 1] * nonterminal - vpred[i][t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
p["tdlamret"] = p["adv"] + p["vpred"]
'''
def add_vtarg_and_adv(seg, gamma, lam): # Modified version to include time!
new = [np.append(p["new"], 0) for p in seg] # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = [np.append(p["vpred"], p["nextvpred"]) for p in seg]
for i, p in enumerate(seg):
T = len(p["rew"])
p["adv"] = gaelam = np.empty(T, 'float32')
rew = p["rew"]
lastgaelam = 0
time = np.append(p['time'], p['time'][-1] + 1) # Increase the final time by 1 to obtain the differential time
difft = time[1:] - time[0: -1]
for t in reversed(range(T)):
nonterminal = 1 - new[i][t + 1]
gt = gamma ** difft[t] # Note that when difft is negative, nonterminal is 0!!
lt = lam ** difft[t]
delta = rew[t] + gt * vpred[i][t + 1] * nonterminal - vpred[i][t]
gaelam[t] = lastgaelam = delta + gt * lt * nonterminal * lastgaelam
p["tdlamret"] = p["adv"] + p["vpred"]
'''
def learn(env, policy_fn, *,
timesteps_per_batch, # what to train on
max_kl, cg_iters,
gamma, lam, # advantage estimation
entcoeff=0.0,
cg_damping=1e-2,
vf_stepsize=3e-4,
vf_iters =3,
max_timesteps=0, max_episodes=0, max_iters=0, # time constraint
callback=None,
save_dir=None,
save_flag=False,
plot_flag=False
):
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space)
oldpi = policy_fn("oldpi", ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = entcoeff * meanent
vferr = tf.reduce_mean(tf.square(pi.vpred - ret))
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold (advantage--> Next line)
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")] # Policy variables
var_list.extend([v for v in all_var_list if v.name.split("/")[1].startswith("me")]) # Mean embedding variables
vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")] # Value function variables
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum(g * tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)
for (oldv, newv) in
zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if rank == 0:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta'))
else:
yield
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
return out
act_params = {
'name': "pi",
'ob_space': ob_space,
'ac_space': ac_space,
}
pi = ActWrapper(pi, act_params)
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
vfadam.sync()
print("Init param sum", th_init.sum(), flush=True)
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
if max_timesteps:
print(colorize(str(100 * timesteps_so_far / max_timesteps) + ' % of timesteps', color='magenta'))
elif max_episodes:
print(colorize(str(100 * episodes_so_far / max_episodes) + ' % of episodes', color='magenta'))
elif max_iters:
print(colorize(str(100 * iters_so_far / max_iters) + ' % of iters', color='magenta'))
logger.log("********** Iteration %i ************" % iters_so_far)
with timed("sampling"):
seg, info = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
ob = np.concatenate([s['ob'] for s in seg], axis=0)
ac = np.concatenate([s['ac'] for s in seg], axis=0)
atarg = np.concatenate([s['adv'] for s in seg], axis=0)
tdlamret = np.concatenate([s['tdlamret'] for s in seg], axis=0)
vpredbefore = np.concatenate([s["vpred"] for s in seg], axis=0) # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
# if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
# if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
args = ob, ac, atarg
fvpargs = [arr[::5] for arr in args]
def fisher_vector_product(p):
return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p
assign_old_eq_new() # set old parameter values to new parameter values
with timed("computegrad"):
*lossbefore, g = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log("Got zero gradient. not updating")
else:
with timed("cg"):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = np.sqrt(shs / max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lm
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
set_from_flat(thnew)
meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(meanlosses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl > max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if nworkers > 1 and iters_so_far % 20 == 0:
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) # list of tuples
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
with timed("vf"):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((ob, tdlamret),
include_final_partial_batch=False, batch_size=64):
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
lrlocal = (seg[0]["ep_lens"], seg[0]["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
# Add info values
logger.record_tabular("AttC", info['attackers_caught'])
logger.record_tabular("AttNC", info['attackers_not_caught'])
logger.record_tabular("MtR", info['mean_total_rwd']) # Mean total reward
logger.record_tabular("TtR", info['total_rwd']) # Total reward
if env.attack_mode == 'phy':
logger.record_tabular("Fce", info['phy_fc_error_rate'])
if env.attack_mode == 'mac':
logger.record_tabular("Tmt", info['total_mac_tx'])
logger.record_tabular("Tmc", info['total_mac_col'])
logger.record_tabular("Tbt", info['total_bits_tx'])
logger.record_tabular("Ptt", info['prop_t_tx'])
logger.record_tabular("MpbtA", info['mean_prop_bits_tx_at'])
logger.record_tabular("MpbtN", info['mean_prop_bits_tx_no'])
if rank == 0:
logger.dump_tabular()
if save_flag:
pi.save(os.path.normpath(save_dir + '/models_trpo/model_{}.pkl'.format(iters_so_far)))
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_] | jparras/dla | deep_rl_for_swarms/rl_algo/trpo_mpi/trpo_mpi_attack.py | trpo_mpi_attack.py | py | 17,701 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
34142194304 | import numpy as np
import scipy.constants
from pathlib import Path
class ElectricAcceleration:
bodies = []
def __init__(self, bodies):
"""This will allow the list of particles from the Accelerator module to be inserted letting the ElectricAcceleration class calculate their acceleration"""
self.bodies = bodies
def acceleration(self, mag1, mag2, mag3, sinelec1, sinelec2, sinelec3):
"""The values of the electic and magnetic field set in the Accelerator file are imported here"""
constantelectric=np.array([sinelec1, sinelec2, sinelec3])
magnetic=np.array([mag1, mag2, mag3])
magnitudeMagnetic=np.linalg.norm(magnetic)
for particle1 in self.bodies:
"""The electric field due to particle-particle interactions and the acceleration are set to be arrays"""
electricSection=np.array([0., 0., 0.])
acceleration = np.array([0., 0., 0.])
"""The charge mass and velocity of the particles are set to be their values as calculated in the particle class"""
c1 = particle1.charge
m1 = particle1.mass
v1 = particle1.velocity
#kineticE = 0
for particle2 in self.bodies:
if particle1 != particle2:
"""This allows the calculation of the acceleration due to the electric and magnetic fields for each body in the system"""
m2 = particle2.mass
c2 = particle2.charge
v2 = particle2.velocity
"""This calculates the distance between the accelerating body and the body causing the acceleration, this will only apply when 2 or more charged particles are present"""
r = np.array(particle1.position) - np.array(particle2.position)
magnitudeR = np.linalg.norm(r)
const=1/(4*scipy.constants.pi*scipy.constants.epsilon_0)
#electricSection=np.array([1,1,0])
"""This calculates the electric field acting on a charged particle due to each other charged particle in the system"""
electric=np.array([const*(c2/magnitudeR**2)])
electricSection += ((electric/magnitudeR)*r)
#kineticE=np.linalg.norm(0.5*m1*v1**2)
#update magnetic with input functions
"""This combines the effects of the constant sinusoidal electric field and the effect due to other charged particles"""
totalelectric=electricSection+constantelectric
"""The value for the total electric field is then used in loretz equation to calculate the acceleration due to both the electric and magnetic fields"""
qvb=np.cross((c1*v1), magnetic)
acceleration=(((c1*totalelectric)+(qvb))/m1)+acceleration
#particle1.kineticE=kineticE
"""This sets the acceleration of the particle to be the previously calculated value for the current time step"""
particle1.acceleration = acceleration
#print(acceleration)
#for particle1 in self.bodies:
# """This allows the calculation of the angular and linear momentum of the system"""
# angularMomentum = 0
# momentum = 0
#for particle2 in self.bodies:
# if particle1 != particle2:
# m1 = particle1.mass
# r = np.array(particle1.position) - np.array(particle2.position)
# momentum = m1*np.linalg.norm(particle1.velocity)
# angularMomentum = momentum * np.linalg.norm(r)
#particle1.momentum = momentum
#particle1.angularMomentum = angularMomentum | Lancaster-Physics-Phys389-2020/phys389-2020-project-twgrainger | LidlFieldV1.py | LidlFieldV1.py | py | 3,815 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_nu... |
16398002041 | import os
import pygame
from Engine import MainMenu
from Entities.Maps.SimpleCheck import SimpleCheck, ConditionsType
class BlockChecks(SimpleCheck):
def __init__(self, ident, name, positions, linked_map):
SimpleCheck.__init__(self, ident, name, positions, linked_map, True)
self.position_logic_indicator = None
self.surface_logic_indicator = None
self.list_checks = []
self.show_checks = False
self.all_logic = False
self.logic_cpt = 0
self.focused = False
def add_check(self, check):
self.list_checks.append(check)
self.update()
def get_checks(self):
return self.list_checks
def update(self):
self.logic_cpt = 0
self.all_logic = True
self.checked = True
self.focused = False
for check in self.list_checks:
check.update()
if not check.hide and not check.checked:
if not check.checked:
self.checked = False
if not self.focused:
self.focused = check.focused
if check.state == ConditionsType.LOGIC:
self.logic_cpt += 1
else:
self.all_logic = False
font = self.map.tracker.core_service.get_font("mapFont")
map_font_path = os.path.join(self.map.tracker.core_service.get_tracker_temp_path(), font["Name"])
font_number = self.map.tracker.core_service.get_font("mapFontChecksNumber")
font_path = os.path.join(self.map.tracker.core_service.get_tracker_temp_path(), font_number["Name"])
groups_datas = self.map.tracker.tracker_json_data[4]["SizeGroupChecks"]
zoom = self.map.tracker.core_service.zoom
index_positions = self.map.index_positions
self.pin_rect = pygame.Rect(
(index_positions[0] * zoom) + (self.positions["x"] * zoom),
(index_positions[1] * zoom) + (self.positions["y"] * zoom),
groups_datas["w"] * zoom,
groups_datas["h"] * zoom
)
color = "Done" if self.checked else (
"Logic" if self.all_logic else ("HaveLogic" if self.logic_cpt > 0 else "NotLogic"))
self.pin_color = self.map.tracker.core_service.get_color_from_font(font, color)
temp_surface = pygame.Surface((0, 0), pygame.SRCALPHA, 32).convert_alpha()
self.surface_logic_indicator, self.position_logic_indicator = MainMenu.MainMenu.draw_text(
text=f"{self.logic_cpt}",
font_name=font_path,
color=self.map.tracker.core_service.get_color_from_font(font_number, "Normal"),
font_size=font_number["Size"] * zoom,
surface=temp_surface,
position=(self.pin_rect.x, self.pin_rect.y),
outline=0.5 * zoom
)
rect = self.surface_logic_indicator.get_rect()
x_number = self.pin_rect.x + (self.pin_rect.w / 2) - (rect.w / 2) + (0.5 * zoom)
y_number = self.pin_rect.y + (self.pin_rect.h / 2) - (rect.h / 2) + (1.5 * zoom)
self.position_logic_indicator = (x_number, y_number)
def draw(self, screen):
if not self.all_check_hidden():
font = self.map.tracker.core_service.get_font("mapFont")
border_color = self.map.tracker.core_service.get_color_from_font(font, "Focused") if self.focused else (0, 0, 0)
self.draw_rect(screen, self.pin_color, border_color, self.pin_rect, 2 * self.map.tracker.core_service.zoom)
if self.logic_cpt > 0:
screen.blit(self.surface_logic_indicator, self.position_logic_indicator)
def left_click(self, mouse_position):
if not self.map.current_block_checks:
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
self.update()
self.map.current_block_checks = self
self.map.update()
def right_click(self, mouse_position):
tracker = None
for check in self.list_checks:
check.checked = not self.checked
check.update()
tracker = check.tracker
self.update()
if tracker:
tracker.current_map.update()
def wheel_click(self, mouse_position):
tracker = None
for check in self.list_checks:
check.focused = not check.focused
check.update()
tracker = check.tracker
self.update()
if tracker:
tracker.current_map.update()
def get_rect(self):
return self.pin_rect
@staticmethod
def draw_rect(surface, fill_color, outline_color, rect, border=1):
surface.fill(outline_color, rect)
surface.fill(fill_color, rect.inflate(-border * 2, -border * 2))
def get_data(self):
checks_datas = []
for check in self.list_checks:
checks_datas.append(check.get_data())
data = {
"id": self.id,
"name": self.name,
"checks_datas": checks_datas
}
return data
def set_data(self, datas):
i = 0
for data in datas["checks_datas"]:
for check in self.list_checks:
if (check.id == data["id"]) and (check.name == data["name"]):
i = i + 1
check.set_data(data)
break
def all_check_hidden(self):
hidden = 0
for check in self.list_checks:
if check.hide:
hidden = hidden + 1
return hidden == len(self.list_checks) | linsorak/LinSoTracker | Entities/Maps/BlockChecks.py | BlockChecks.py | py | 5,551 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "Entities.Maps.SimpleCheck.SimpleCheck",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Entities.Maps.SimpleCheck.SimpleCheck.__init__",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Entities.Maps.SimpleCheck.SimpleCheck",
"line_number": 11... |
23186461899 | """FECo3: Python bindings to a .fec file parser written in Rust."""
from __future__ import annotations
import os
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, NamedTuple
from . import _feco3, _version
if TYPE_CHECKING:
import pyarrow as pa
__version__ = _version.get_version()
"""Version string for this package."""
class Header(NamedTuple):
"""The header of a [FecFile][feco3.FecFile].
Attributes:
fec_version: The version of the FEC file format.
software_name: The name of the software that generated the file.
software_version: The version of the software that generated the file.
This isn't present in some older FEC files.
report_id: If this .fec file is an amendment to a previous filing,
the filing number of the original.
report_number: If this .fec file is an amendment to a previous filing,
which number amendement this is (1, 2, 3 etc)
"""
fec_version: str
software_name: str
software_version: str | None
report_id: str | None
report_number: str | None
class Cover(NamedTuple):
"""The Cover Line of an [FecFile][feco3.FecFile].
Attributes:
form_type: The form type of the filing, eg. "F3"
filer_committee_id: The FEC-assigned ID of the committee that filed the report,
eg "C00618371"
"""
form_type: str
filer_committee_id: str
class FecFile:
"""An FEC file."""
def __init__(self, src: str | os.PathLike) -> None:
"""Create a new FecFile.
This doesn't do any reading or parsing until you access one of the members.
Args:
src: A path or a URL to an FEC file.
If a string that starts with "http://" or "https://", it will be
treated as a URL. Otherwise, it will be treated as a path.
"""
if isinstance(src, str) and (
src.startswith("http://") or src.startswith("https://")
):
self._src = src
self._wrapped = _feco3.FecFile.from_https(self._src)
else:
self._src = Path(src)
self._wrapped = _feco3.FecFile.from_path(self._src)
@cached_property
def header(self) -> Header:
"""The [Header][feco3.Header] of the FEC file.
The first time this is accessed, the FEC file will be read and parsed as
far as needed. Subsequent accesses will return the same object.
"""
h = self._wrapped.header
return Header(
fec_version=h.fec_version,
software_name=h.software_name,
software_version=h.software_version,
report_id=h.report_id,
report_number=h.report_number,
)
@cached_property
def cover(self) -> Cover:
"""The [Cover][feco3.Cover] of the FEC file.
The first time this is accessed, the FEC file will be read and parsed as
far as needed. Subsequent accesses will return the same object.
"""
c = self._wrapped.cover
return Cover(
form_type=c.form_type,
filer_committee_id=c.filer_committee_id,
)
def to_parquets(self, out_dir: str | os.PathLike) -> None:
"""Write all itemizations in this FEC file to parquet files.
There will be one parquet file for each record type, eg. ``sa11.parquet``.
"""
parser = _feco3.ParquetProcessor(out_dir)
parser.process(self._wrapped)
def to_csvs(self, out_dir: str | os.PathLike) -> None:
"""Write all itemizations in this FEC file to CSV files.
There will be one CSV file for each record type, eg. ``sa11.csv``.
"""
parser = _feco3.CsvProcessor(out_dir)
parser.process(self._wrapped)
def __repr__(self) -> str:
src_str = f"src={self._src!r}"
return f"{self.__class__.__name__}({src_str})"
# This is what rust parquet uses as a batch size
# https://docs.rs/parquet/40.0.0/src/parquet/file/properties.rs.html#83
DEFAULT_PYARROW_RECORD_BATCH_MAX_SIZE = 1024 * 1024
class ItemizationBatch(NamedTuple):
"""A batch of itemizations.
Attributes:
code: The code of the itemization type, eg. "SA11AI"
records: A [pyarrow.RecordBatch][pyarrow.RecordBatch] of itemizations.
"""
code: str
records: pa.RecordBatch
class PyarrowBatcher:
"""
Iterates an [FecFile](feco3.FecFile) and yields [ItemizationBatch](feco3.ItemizationBatch)s of itemizations.
""" # noqa: E501
def __init__(self, fec_file: FecFile, max_batch_size: int | None = None) -> None:
"""Create a new PyarrowBatcher.
Args:
fec_file: The [FecFile][feco3.FecFile] to iterate.
max_batch_size: The max rows per [pyarrow.RecordBatch][pyarrow.RecordBatch].
Defaults to 1024 * 1024, which is what rust parquet uses.
"""
self._fec_file = fec_file
if max_batch_size is None:
max_batch_size = DEFAULT_PYARROW_RECORD_BATCH_MAX_SIZE
self._wrapped = _feco3.PyarrowBatcher(max_batch_size)
def __iter__(self) -> PyarrowBatcher:
return self
def __next__(self) -> ItemizationBatch:
"""Get the next batch of itemizations from the FEC file."""
pair = self._wrapped.next_batch(self._fec_file._wrapped)
if pair is None:
raise StopIteration
code, batch = pair
return ItemizationBatch(code, batch)
| NickCrews/feco3 | python/src/feco3/__init__.py | __init__.py | py | 5,512 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "os.PathLike"... |
73549678908 | __doc__ = """
Script for collection of training data for deep learning image recognition.
Saving standardised pictures of detected faces from webcam stream to given folder.
Ver 1.1 -- collect_faces.py
Author: Aslak Einbu February 2020.
"""
import os
import cv2
import datetime
import imutils
import time
import numpy as np
# Loading neural net model for face detection
net = cv2.dnn.readNetFromCaffe("model/face_detect/deploy.prototxt.txt",
"model/face_detect/res10_300x300_ssd_iter_140000.caffemodel")
# Setting folder name for saving of detected images.
person = input("Hvem er personen?")
bildepath = f'/tmp/dnn/{person}'
if not os.path.exists(bildepath):
os.makedirs(bildepath)
bildepath = f'/tmp/dnn/{person}'
def main():
"""
Analysing webcam video stream and displaying detected faces.
Applies deep neural net model for detection of faces in in image.
Saves images of detected faces to given folder (stops saving after 1000 pictures).
"""
antall = 0
sistetid = time.time()
stdtxt = "Ingen fjes observert!"
dcttxt = "Fjes observert!"
noen = False
camera = cv2.VideoCapture(0)
print("Analyserer webcam bildestrøm...")
print(f'Lagrer alle passfoto i {bildepath}.')
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
detekt_txt = stdtxt
frame = imutils.resize(frame, width=500)
lager = frame.copy()
# Detecting faces:
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence < 0.7:
continue
detekt_txt = dcttxt
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
text = "{:.1f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
try:
fjes = lager[startY:endY, startX:endX]
fjes = cv2.resize(fjes, (100, 120))
# Saving image of face
if (time.time() - sistetid) > 0.5:
sistetid = time.time()
if antall < 1000:
cv2.imwrite(f'{bildepath}/{str(time.time())}.jpg', fjes)
antall = antall + 1
print(f'\rAntall bilder lagra: {antall}', sep='', end='', flush=True)
except:
pass
noen = True
if (noen):
try:
frame[255:375, 0:100] = fjes
cv2.putText(frame, "Siste person", (10, 270), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1)
except:
pass
cv2.putText(frame, detekt_txt, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
cv2.putText(frame, datetime.datetime.now().strftime(" %d %B %Y %I:%M:%S%p"), (4, 40), cv2.FONT_HERSHEY_SIMPLEX,
0.4, (0, 0, 0), 1)
cv2.putText(frame, f'Bilder lagra:{antall}', (10, 250), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1)
cv2.imshow("Fjes", frame)
cv2.moveWindow("Fjes", 1450, 100)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| aslake/family_deep_learning | collect_faces.py | collect_faces.py | py | 3,826 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
19092857489 | from collections import namedtuple
import csv
import gzip
import logging
import sys
import urllib.parse
csv.field_size_limit(sys.maxsize)
logging.basicConfig(level=logging.INFO)
Switch = namedtuple("Switch", ['srclang', 'targetlang', 'country', 'qid', 'title', 'datetime', 'usertype', 'title_country_src_count'])
Session = namedtuple('Session', ['usrhash', 'country', 'pageviews', 'usertype'])
Pageview = namedtuple('Pageview', ['dt', 'proj', 'title', 'wd', 'referer'])
EDIT_STR = "EDITATTEMPT"
usertypes = ['reader', 'editor']
def tsv_to_sessions(tsv, trim=False):
"""Convert TSV file of pageviews to reader sessions.
Each line corresponds to a pageview and the file is sorted by user and then time.
Fields order is: hashed user ID, wikipedia project, page title, page ID, datettime, IP country, referer, Wikidata ID
For example:
00000a5795ba512... enwiki Columbidae 63355 2019-02-16T11:31:53 Norway https://www.google.com/ Q10856
00000a5795ba512... enwiki Anarchism 12 2019-02-16T11:32:05 Norway https://en.wikipedia.org/ Q6199
This yields a Session object where:
session.usrhash = '00000a5795ba512...'
session.country = 'Norway'
session.pageviews = [(dt='2019-02-16T11:31:53', proj='enwiki', title='Columbidae', wd='Q10856', referer='google'),
(dt='2019-02-16T11:32:05', proj='enwiki', title='Anarchism', wd='Q6199', referer='enwiki')]
"""
expected_header = ['user', 'project', 'page_title', 'page_id', 'dt', 'country', 'referer', 'item_id']
usr_idx = expected_header.index('user')
proj_idx = expected_header.index('project')
title_idx = expected_header.index('page_title')
dt_idx = expected_header.index('dt')
country_idx = expected_header.index("country")
referer_idx = expected_header.index('referer')
wd_idx = expected_header.index("item_id")
malformed_lines = 0
i = 0
with gzip.open(tsv, 'rt') as fin:
assert next(fin).strip().split("\t") == expected_header
curr_usr = None
country = None
usertype = 'reader'
session = []
for i, line in enumerate(fin):
line = line.strip().split("\t")
try:
usr = line[usr_idx]
proj = line[proj_idx]
title = line[title_idx]
dt = line[dt_idx]
ref = ref_class(line[referer_idx])
except IndexError:
malformed_lines += 1
continue
try:
wd_item = line[wd_idx]
except IndexError:
wd_item = None
pv = Pageview(dt, proj, title, wd_item, ref)
if usr == curr_usr:
if title == EDIT_STR:
usertype = 'editor'
else:
session.append(pv)
else:
if curr_usr:
if trim:
trim_session(session)
yield(Session(curr_usr, country, session, usertype=usertype))
curr_usr = usr
country = line[country_idx]
if title == EDIT_STR:
usertype = 'editor'
session = []
else:
usertype = 'reader'
session = [pv]
if curr_usr:
if trim:
trim_session(session)
yield (Session(curr_usr, country, session, usertype=usertype))
print("{0} total lines. {1} malformed.".format(i, malformed_lines))
def ref_class(referer):
dom = urllib.parse.urlparse(referer).netloc
if 'wikipedia' in dom:
return dom.split('.')[0].replace('-', '_') + 'wiki'
elif 'google' in dom:
return 'google'
else:
if dom.startswith('www.'):
dom = dom[4:]
return dom
def trim_session(pvs):
"""Remove duplicate page views (matching title and project).
For a given session, this retains only the first view of a given page title on a given project.
Parameters:
pvs: list of page view objects for a given reader's session
Returns:
Nothing. The page views are modified in place.
"""
# only report based on first pageview of page
user_unique_pvs = set()
pvs_to_remove = []
for i in range(0, len(pvs)):
pv_id = '{0}-{1}'.format(pvs[i].proj, pvs[i].title)
if pv_id in user_unique_pvs:
pvs_to_remove.append(i)
user_unique_pvs.add(pv_id)
for i in range(len(pvs_to_remove)-1, -1, -1):
pvs.pop(pvs_to_remove[i])
def get_lang_switch(pvs, wikidbs=(), ref_match=False):
"""Get pairs of page views that are language switches.
Parameters:
pvs: list of page view objects for a given reader's session
wikidbs: if empty, all language switches return. Otherwise, only language switches that involve languages
included in wikidbs will be retained.
Returns:
switches: list of tuples, where each tuple corresponds to two page views of a single Wikidata item
across two different projects.
If a session is:
[(dt='2019-02-16T11:31:53', proj='enwiki', title='Columbidae', wd='Q10856'),
(dt='2019-02-16T11:32:05', proj='enwiki', title='Anarchism', wd='Q6199'),
(dt='2019-02-16T11:32:13', proj='eswiki', title='Columbidae', wd='Q10856')]
Then the switches would be of the form [(0, 2)]
"""
switches = []
# at least two different projects viewed in the session
if len(set([p.proj for p in pvs])) > 1:
# find all wikidata items viewed in multiple languages
# preserve which one was viewed first
for i in range(0, len(pvs) - 1):
for j in range(i+1, len(pvs)):
diff_proj = pvs[i].proj != pvs[j].proj
same_item = pvs[i].wd and pvs[i].wd == pvs[j].wd
if diff_proj and same_item:
if not wikidbs or pvs[i].proj in wikidbs or pvs[j].proj in wikidbs:
if ref_match:
if pvs[i].proj == pvs[j].referer:
switches.append((i, j))
else:
switches.append((i, j))
break
return switches
def get_nonlang_switch(pvs, wikidb, switches=(), direction="from"):
"""Get page views in a language that are not switches of the specified direction.
Finds pages in a language that the user did not switch to/from (depending on direction parameter).
User must have at least one language switch with specified wikidb and direction in their session though
to indicate that they might have switched.
Parameters:
pvs: list of page view objects for a given reader's session
wikidb: Only language non-switches that involve this language will be retained.
switches: if precalculated, this speeds up processing
direction: "from" indicates the language switch must have had wikidb as the origin project.
"to" indicates the language switch must have had wikidb as the destination project.
Returns:
no_switches: list of page view indices.
For this session and wikidb = "enwiki" and direction = "from":
[(dt=2019-02-16T11:31:53, proj=enwiki, title='Columbidae', wd='Q10856'),
(dt=2019-02-16T11:32:05, proj=enwiki, title='Anarchism', wd='Q6199'),
(dt=2019-02-16T11:32:13, proj=eswiki, title='Columbidae', wd='Q10856')]
Then the no_switches would be of the form: [1]
If direction was "to" or wikidb was "eswiki" then no page views would be returned.
"""
no_switches = []
# at least two different projects viewed in the session
if len(set([p.proj for p in pvs])) > 1:
if switches:
all_switches = switches
else:
all_switches = get_lang_switch(pvs, [wikidb])
# did user have any switches of form:
# direction == "from": wikidb -> other language
# direction == "to": other language -> wikidb
dir_switches_in_lang = set()
for f,t in all_switches:
# switched from wikidb -> other project
if direction == "from" and pvs[f].proj == wikidb:
dir_switches_in_lang.add(f)
# switched from other project -> wikidb
elif direction == "to" and pvs[t].proj == wikidb:
dir_switches_in_lang.add(t)
if dir_switches_in_lang:
# find all wikidata items not viewed in multiple languages
# preserve which one was viewed first
for i in range(0, len(pvs)):
if pvs[i].proj == wikidb and i not in dir_switches_in_lang:
no_switches.append(i)
return no_switches | geohci/language-switching | session_utils.py | session_utils.py | py | 8,958 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "csv.field_size_limit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",... |
43346750218 | if __name__ == '__main__':
from ovh import *
import argparse
import logging
logger = logging.getLogger("ovh/download_db")
parser = argparse.ArgumentParser(description='Creates N workers on the OVH cloud.')
parser.add_argument('--db-name', default='Contrastive_DPG_v2', help='name for MySQL DB')
parser.add_argument('--db-path', default='../databases/', help='Path to database backup files')
args = parser.parse_args()
novac = get_nova_client()
master = get_master_instance(novac)
ssh_master = get_ssh_client(master)
download_db(ssh_master, args.db_name, args.db_path)
for instance in novac.servers.list():
logger.info(f"Downloading experiments from {instance.name}")
rsync_experiments(
instance.addresses["Ext-Net"][0]["addr"],
local_experiments_path=f'../experiments/remote/{instance.name}'
)
| charleswilmot/Contrastive_DPG | src/ovh_download_db.py | ovh_download_db.py | py | 899 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
}
] |
40128549954 | # included from libs/mincostflow.py
"""
Min Cost Flow
"""
# derived: https://atcoder.jp/contests/practice2/submissions/16726003
from heapq import heappush, heappop
class MinCostFlow():
def __init__(self, n):
self.n = n
self.graph = [[] for _ in range(n)]
self.pos = []
def add_edge(self, fr, to, cap, cost):
#assert 0 <= fr < self.n
#assert 0 <= to < self.n
m = len(self.pos)
self.pos.append((fr, len(self.graph[fr])))
self.graph[fr].append([to, len(self.graph[to]), cap, cost])
self.graph[to].append([fr, len(self.graph[fr]) - 1, 0, -cost])
return m
def get_edge(self, idx):
#assert 0 <= idx < len(self.pos)
to, rev, cap, cost = self.graph[self.pos[idx][0]][self.pos[idx][1]]
_rev_to, _rev_rev, rev_cap, _rev_cost = self.graph[to][rev]
return self.pos[idx][0], to, cap + rev_cap, rev_cap, cost
def edges(self):
for i in range(len(self.pos)):
yield self.get_edge(i)
def dual_ref(self, s, t):
dist = [2**63 - 1] * self.n
dist[s] = 0
vis = [0] * self.n
self.pv = [-1] * self.n
self.pe = [-1] * self.n
queue = []
heappush(queue, (0, s))
while queue:
k, v = heappop(queue)
if vis[v]:
continue
vis[v] = True
if v == t:
break
for i in range(len(self.graph[v])):
to, _rev, cap, cost = self.graph[v][i]
if vis[to] or cap == 0:
continue
cost += self.dual[v] - self.dual[to]
if dist[to] - dist[v] > cost:
dist[to] = dist[v] + cost
self.pv[to] = v
self.pe[to] = i
heappush(queue, (dist[to], to))
if not vis[t]:
return False
for v in range(self.n):
if not vis[v]:
continue
self.dual[v] -= dist[t] - dist[v]
return True
def flow(self, s, t):
return self.flow_with_limit(s, t, 2**63 - 1)
def flow_with_limit(self, s, t, limit):
return self.slope_with_limit(s, t, limit)[-1]
def slope(self, s, t):
return self.slope_with_limit(s, t, 2**63 - 1)
def slope_with_limit(self, s, t, limit):
#assert 0 <= s < self.n
#assert 0 <= t < self.n
#assert s != t
flow = 0
cost = 0
prev_cost = -1
res = [(flow, cost)]
self.dual = [0] * self.n
while flow < limit:
if not self.dual_ref(s, t):
break
c = limit - flow
v = t
while v != s:
c = min(c, self.graph[self.pv[v]][self.pe[v]][2])
v = self.pv[v]
v = t
while v != s:
_to, rev, _cap, _ = self.graph[self.pv[v]][self.pe[v]]
self.graph[self.pv[v]][self.pe[v]][2] -= c
self.graph[v][rev][2] += c
v = self.pv[v]
d = -self.dual[s]
flow += c
cost += c * d
if prev_cost == d:
res.pop()
res.append((flow, cost))
prev_cost = cost
return res
# end of libs/mincostflow.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, AS, BS, RS):
global mcf
INF = 10 ** 5
mcf = MinCostFlow(N + 5)
start = N
goal = N + 1
round = N + 2
for i in range(3):
mcf.add_edge(start, round + i, M, 0)
for i in range(3):
for j in range(N):
r = AS[j] * (BS[j] ** (i + 1)) % RS[i]
mcf.add_edge(round + i, j, 1, INF - r)
for j in range(N):
cs = [AS[j] * (BS[j] ** (k + 1)) for k in range(3)]
cs.append(0)
for k in range(3):
c = cs[k] - cs[k-1]
mcf.add_edge(j, goal, 1, c)
return INF * (3 * M) - mcf.flow(start, goal)[-1]
def main():
# parse input
N, M = map(int, input().split())
AS = list(map(int, input().split()))
BS = list(map(int, input().split()))
RS = list(map(int, input().split()))
print(solve(N, M, AS, BS, RS))
# tests
T1 = """
2 1
3 2
3 3
100000 100000 100000
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
81
"""
T2 = """
4 2
2 4 3 3
4 2 3 3
100000 100000 100000
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
210
"""
T3 = """
20 19
3 2 3 4 3 3 2 3 2 2 3 3 4 3 2 4 4 3 3 4
2 3 4 2 4 3 3 2 4 2 4 3 3 2 3 4 4 4 2 2
3 4 5
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
-1417
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| nishio/atcoder | PAST3/o.py | o.py | py | 5,401 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "heapq.heappush",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_nu... |
74179568189 | '''
To run test: move into same directory as spotify_api.py file
'''
import unittest
import spotify_api
import spotipy
import pandas as pd
from spotipy.oauth2 import SpotifyClientCredentials
client_id = 'ea776b5b86c54bd188d71ec087b194d3'
client_secret = '1e0fcbac137c4d3eb2d4cc190693792a' # keep this hidden
redirect_uri = 'http://localhost:' # will be changed
class TestSpotify(unittest.TestCase):
client_credentials_manager = None
sp = None
@classmethod
def setUpClass(cls):
cls.client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
cls.sp = spotipy.Spotify(client_credentials_manager=cls.client_credentials_manager)
def testGetArtistInfoReturns(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
self.assertIsNotNone(info)
def testGetArtistInfo(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
self.assertEqual(5, len(info)) # make sure the number of albums recorded is correct
def testArtistToDF(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
self.assertEqual(58, len(df)) # make sure the number of albums recorded is correct
def testDFToDict(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
d = spotify_api.artist_df_to_dict(df, "Bad Suns")
self.assertEqual(13, len(d)) # make sure the number of albums recorded is correct
def testDFToSongs(self):
an_dict = dict()
au_dict = dict()
artist = "Bad Suns"
info = spotify_api.get_artist_info(self.sp, artist, an_dict, au_dict)
df = spotify_api.artist_to_csv("Bad Suns", info)
songs = spotify_api.artist_df_to_songs(df, "Bad Suns")
self.assertEqual(13, len(songs)) # make sure the number of albums recorded is correct
unittest.main()
| dylanmccoy/songtrackr | tests/spotify_unittest.py | spotify_unittest.py | py | 2,431 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "spotipy.oauth2.SpotifyClientCredentials",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "spotipy.Spotify",
"line_number": 23,
"usage_type": "call"
},
{
"ap... |
70327957627 | #!/usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from math import sqrt, atan2, exp, atan, cos, sin, acos, pi, asin, atan2, floor
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from time import sleep
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
global x_n, y_n, theta_n
class Node():
def __init__(self, value, x, y):
self.value = value
self.gCost = 0
self.hCost = 0
self.x = x
self.y = y
self.parent = None
self.path = None
def fCost(self):
return self.gCost + self.hCost
class Astar():
def __init__(self, map, goal, start, cell_size):
self.map = map
self.goal = goal
self.start = start
self.cell_size = cell_size
self.goal2d = self.goal_to_node2d()
self.rows = len(self.map)
self.cols = len(self.map[0])
self.targetNode = self.map[self.goal2d[0]][self.goal2d[1]]
#self.map[self.goal2d[0]][self.goal2d[1]] = 2
#self.map[self.goal2d[1]][self.goal2d[0]].value = 2
#print(*self.map)
def goal_to_node2d(self):
#goal: x,y
goal2d = np.array([0,0])
goal2d[0] = self.goal[0]/self.cell_size
goal2d[1] = -self.goal[1]/self.cell_size
return goal2d
def node2d_to_goal(self, cell):
x = cell.x*self.cell_size + cell_size/2
y = -cell.y*self.cell_size - cell_size/2
return (x,y)
def isGoalValid(self):
if(self.map[self.goal2d[0]][self.goal2d[1]].value == 1):
return False
elif(self.map[self.goal2d[0]][self.goal2d[1]].value == 0):
return True
def getNeighbors(self, node):
neighbors = []
for x in range(-1,2):
for y in range(-1,2):
if(x == 0 and y == 0):
continue
checkX = node.x + x
checkY = node.y + y
#print('check:',x,y)
if(checkX >= 0 and checkX < self.rows and checkY >= 0 and checkY < self.cols):
neighbors.append(self.map[checkX][checkY])
return neighbors
def getDistance(self, nodeA, nodeB):
distX = abs(nodeA.x - nodeB.x)
distY = abs(nodeA.y - nodeB.y)
if(distX > distY):
return 14*distY + 10*(distX - distY)
else:
return 14*distX + 10*(distY - distX)
def tracePath(self, startNode, endNode):
path = []
currentNode = endNode
while(currentNode is not startNode):
path.append(currentNode)
currentNode = currentNode.parent
#print('node:', currentNode)
path.reverse()
#print('path:',path)
return path
def findPath(self):
openSet = []
closeSet = []
print(vars(self.map[self.start[0]][self.start[1]]))
startNode = self.map[self.start[0]][self.start[1]]
openSet.append(startNode)
while(len(openSet) > 0):
currentNode = openSet[0]
for i in range(1,len(openSet)):
#print(openSet[i].fCost())
if(openSet[i].fCost() < currentNode.fCost() or (openSet[i].fCost() == currentNode.fCost() and openSet[i].hCost < currentNode.hCost)):
currentNode = openSet[i]
#print('in while: ', currentNode.x, currentNode.y, currentNode.fCost())
#print('goal: ', self.goal2d[0] , self.goal2d[1])
openSet.remove(currentNode)
closeSet.append(currentNode)
if(currentNode.x == self.goal2d[0] and currentNode.y == self.goal2d[1]):
print('search done')
self.path = self.tracePath(startNode, self.targetNode)
return
neighbors = self.getNeighbors(currentNode)
for neighbor in neighbors:
#print(vars(neighbor))
if(neighbor.value == 1 or (neighbor in closeSet)):
print('continue')
continue
newMovementCostToNeighbor = currentNode.gCost + self.getDistance(currentNode, neighbor)
if(newMovementCostToNeighbor < neighbor.gCost or not (neighbor in openSet)):
neighbor.gCost = newMovementCostToNeighbor
neighbor.hCost = self.getDistance(neighbor, self.targetNode)
neighbor.parent = currentNode
#print(neighbor.gCost)
if(neighbor not in openSet):
openSet.append(neighbor)
print('next')
def plotGrid(self):
for i in range(len(self.map)):
line = []
linefCost = []
for j in range(len(self.map[0])):
line.append(self.map[i][j].value)
linefCost.append(self.map[i][j].gCost)
#print(line)
print(linefCost)
def refference_trajectory(x_goal, y_goal):
x_ref = x_goal
y_ref = y_goal
Vx_ref = 0
Vy_ref = 0
return (x_ref, y_ref, Vx_ref, Vy_ref)
# Rotina para a geracao da entrada de controle
def trajectory_controller(x_ref, y_ref, Vx_ref, Vy_ref, Kp, Usat):
global x_n, y_n, theta_n
Ux = Vx_ref + Kp * (x_ref - x_n)
Uy = Vy_ref + Kp * (y_ref - y_n)
absU = sqrt(Ux ** 2 + Uy ** 2)
if (absU > Usat):
Ux = Usat * Ux / absU
Uy = Usat * Uy / absU
return (Ux, Uy)
# Rotina feedback linearization
def feedback_linearization(Ux, Uy, d):
global x_n, y_n, theta_n
VX = cos(theta_n) * Ux + sin(theta_n) * Uy
WZ = (-sin(theta_n) / d) * Ux + (cos(theta_n) / d) * Uy
return (VX, WZ)
# Rotina callback para a obtencao da pose do robo
def callback_pose(data):
global x_n, y_n, theta_n
x_n = data.pose.pose.position.x # posicao 'x' do robo no mundo
y_n = data.pose.pose.position.y # posicao 'y' do robo no mundo
x_q = data.pose.pose.orientation.x
y_q = data.pose.pose.orientation.y
z_q = data.pose.pose.orientation.z
w_q = data.pose.pose.orientation.w
euler = euler_from_quaternion([x_q, y_q, z_q, w_q])
theta_n = euler[2] # orientaco 'theta' do robo no mundo
return
def calcDistance(x_n, y_n, x_d, y_d):
return sqrt(((x_d - x_n)**2 + (y_d - y_n)**2))
def readImage(cell_size):
fig = plt.figure(figsize=(8,8), dpi=100)
img = 1 - mpimg.imread('../worlds/map_1.png')
# Apenas para garantir que só teremos esses dois valores
threshold = 0.5
img[img > threshold] = 1
img[img<= threshold] = 0
map_dims = np.array([60, 60]) # Cave
# Escala Pixel/Metro
print(img.shape)
sy, sx = img.shape[0:2] / map_dims
# Tamanho da célula do nosso Grid (em metros)
rows, cols = (map_dims / cell_size).astype(int)
#grid = np.zeros((rows, cols))
grid = [[Node(0,0,0) for x in range(cols)] for y in range(rows)]
# Preenchendo o Grid
for r in range(rows):
for c in range(cols):
xi = int(c*cell_size*sx)
xf = int(xi + cell_size*sx)
yi = int(r*cell_size*sy)
yf = int(yi + cell_size*sy)
value = np.sum(img[yi:yf,xi:xf])
if(value > threshold):
value = 1
else:
value = 0
node = Node(value, r, c)
grid[r][c] = node
return grid
def control(poses):
#Tempo de simulacao no stage
global x_n, y_n
freq = 100
Usat = 5
d = 0.8
Kp = 1
#Define uma variavel que controlar a a frequencia de execucao deste no
rate = rospy.Rate(freq)
vel = Twist()
sleep(0.2)
# O programa do no consiste no codigo dentro deste while
for pose in poses:
print(pose)
x_goal = pose[0]
y_goal = pose[1]
# Incrementa o tempo
dist = calcDistance(x_n,y_n,x_goal,y_goal)
while(dist > 0.5):
[x_ref, y_ref, Vx_ref, Vy_ref] = refference_trajectory(x_goal, y_goal)
[Ux, Uy] = trajectory_controller(x_ref, y_ref, Vx_ref, Vy_ref, Kp, Usat)
[V_forward, w_z] = feedback_linearization(Ux, Uy, d)
vel.linear.x = V_forward
vel.angular.z = w_z
pub_stage.publish(vel)
dist = calcDistance(x_n, y_n, x_goal, y_goal)
#Espera por um tempo de forma a manter a frequencia desejada
rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node("Astar_node") #inicializa o no "este no"
pub_stage = rospy.Publisher("/cmd_vel", Twist, queue_size=1) #declaracao do topico para comando de velocidade
rospy.Subscriber("/base_pose_ground_truth", Odometry, callback_pose) #declaracao do topico onde sera lido o estado do robo
cell_size = 2
x_goal, y_goal = input('(x_goal, y_goal)').split()
x_goal, y_goal = [float(i) for i in [x_goal, y_goal]]
grid = readImage(cell_size)
start_x = floor(x_n/cell_size)
start_y = floor(-y_n/cell_size)
print('pose: ', start_x, start_y)
Astar = Astar(grid, np.array([x_goal, y_goal]), np.array([start_x,start_y]), cell_size)
if(not Astar.isGoalValid()):
print('Posicao de alvo invalida')
exit()
Astar.findPath()
path = Astar.path
planConverted = []
for node in path:
pose = Astar.node2d_to_goal(node)
planConverted.append(pose)
print(pose)
Astar.plotGrid()
control(planConverted)
except rospy.ROSInterruptException:
pass
| lucca-leao/path-planning | scripts/Astar.py | Astar.py | py | 9,706 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 167,
... |
36407185173 | import pytest
from logging import getLogger
from barbucket.domain_model.types import *
_logger = getLogger(__name__)
_logger.debug(f"--------- ---------- Testing Types")
def test_api_correct() -> None:
_logger.debug(f"---------- Test: test_api_correct")
try:
test_api = Api.IB
except AttributeError as e:
assert False, e
def test_api_incorrect() -> None:
_logger.debug(f"---------- Test: test_api_incorrect")
with pytest.raises(AttributeError):
test_api = Api.NON_EXIST # type: ignore
def test_exchange_correct() -> None:
_logger.debug(f"---------- Test: test_exchange_correct")
try:
test_exchange = Exchange.XETRA
except AttributeError as e:
assert False, e
def test_exchange_incorrect() -> None:
_logger.debug(f"---------- Test: test_exchange_incorrect")
with pytest.raises(AttributeError):
test_exchange = Exchange.NON_EXIST # type: ignore
def test_stock_type_correct() -> None:
_logger.debug(f"---------- Test: test_stock_type_correct")
try:
test_contract_type = StockType.ETF
except AttributeError as e:
assert False, e
def test_stock_type_incorrect() -> None:
_logger.debug(f"---------- Test: test_stock_type_incorrect")
with pytest.raises(AttributeError):
test_contract_type = StockType.NON_EXIST # type: ignore
def test_get_api_notation_for_exchange() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_exchange")
trans = ApiNotationTranslator()
expected = "IBIS"
actual = trans.get_api_notation_for_exchange(
exchange=Exchange.XETRA,
api=Api.IB)
assert actual == expected
def test_get_exchange_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_exchange_from_api_notation")
trans = ApiNotationTranslator()
expected = Exchange.XETRA
actual = trans.get_exchange_from_api_notation(
name="IBIS",
api=Api.IB)
assert actual == expected
def test_get_api_notation_for_contract_type() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_contract_type")
trans = ApiNotationTranslator()
expected = "COMMON"
actual = trans.get_api_notation_for_stock_type(
stock_type=StockType.COMMON_STOCK,
api=Api.IB)
assert actual == expected
def test_get_contract_type_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_contract_type_from_api_notation")
trans = ApiNotationTranslator()
expected = StockType.COMMON_STOCK
actual = trans.get_stock_type_from_api_notation(
name="COMMON",
api=Api.IB)
assert actual == expected
def test_get_api_notation_for_ticker_symbol() -> None:
_logger.debug(f"---------- Test: test_get_api_notation_for_ticker_symbol")
trans = ApiNotationTranslator()
expected = "AB CD"
actual = trans.get_api_notation_for_ticker_symbol(
ticker_symbol=TickerSymbol(name="AB_CD"),
api=Api.IB)
assert actual == expected
def test_get_ticker_symbol_from_api_notation() -> None:
_logger.debug(f"---------- Test: test_get_ticker_symbol_from_api_notation")
trans = ApiNotationTranslator()
ticker_symbol = trans.get_ticker_symbol_from_api_notation(
name="AB CD",
api=Api.IB)
assert type(ticker_symbol) == TickerSymbol
assert ticker_symbol.name == "AB_CD"
| mcreutz/barbucket | tests/domain_model/test_types.py | test_types.py | py | 3,398 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"lin... |
38200290752 | import speech_recognition as sr
import pyttsx3
import screen_brightness_control as sbc
import geocoder
from geopy.geocoders import Nominatim
r = sr.Recognizer()
def SpeakText(command):
engine = pyttsx3.init()
engine.say(command)
engine.runAndWait()
while(1):
try:
with sr.Microphone() as source2:
r.adjust_for_ambient_noise(source2, duration=0.2)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
MyText = MyText.lower()
print(MyText)
SpeakText(MyText)
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
except sr.UnknownValueError:
print("unknown error occured") | Priyanshu360-cpu/Machine-Learning | repeat_audio.py | repeat_audio.py | py | 746 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyttsx3.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 15,
"usage_type": "call"
},
{
"api_name":... |
41682684008 | import torch
#Linear regression for f(x) = 4x+3
X= torch.tensor([1,2,3,4,5,6,7,8,9,10], dtype=torch.float32)
Y=torch.tensor([7,11,15,19,23,27,31,35,39,43], dtype= torch.float32)
w= torch.tensor(0.0,dtype=torch.float32,requires_grad=True)
def forward(x):
return (w*x)+3
def loss(y,y_exp):
return ((y_exp-y)**2).mean()
testVal = 100
print(f'Prediction before training: f({testVal}) = {forward(testVal).item():.3f}')
learningRate = 0.01
numTrainings=25
for training in range(numTrainings):
y_exp=forward(X)
error = loss(Y,y_exp)
error.backward()
with torch.no_grad():
w -= learningRate* w.grad
w.grad.zero_()
print(f'training {training+1}: W = {w.item():.3f}, loss = {error.item():.3f}')
print(f'Prediction after all training of f({testVal}) = {forward(testVal).item():.3f}') | kylej21/PyTorchProjects | linearRegression/linearReg.py | linearReg.py | py | 818 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.tensor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_n... |
71053381947 | import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.propagate = False # do not propagate logs to previously defined root logger (if any).
formatter = logging.Formatter('%(asctime)s - %(levelname)s(%(name)s): %(message)s')
# console
consH = logging.StreamHandler()
consH.setFormatter(formatter)
consH.setLevel(logging.INFO)
logger.addHandler(consH)
# file handler
request_file_handler = True
log = logger
resume_result_json = True
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
AA_abb_dict = {"<unk>": 0, "<pad>": 1, "<start>": 2, "<eos>": 3, "A": 4, "C": 5, "D": 6, "E": 7,
"F": 8, "G": 9, "H": 10, "I": 11, "K": 12, "L": 13, "M": 14, "N": 15, "P": 16,
"Q": 17, "R": 18, "S": 19, "T": 20, "V": 21, "W": 22, "Y": 23}
AA_abb_dict_T = {v:k for k, v in AA_abb_dict.items()}
AA_dict = {"<unk>": 0, "<pad>": 1, "<start>": 2, "<eos>": 3, "ALA": 4, "CYS": 5, "ASP": 6, "GLU": 7,
"PHE": 8, "GLY": 9, "HIS": 10, "ILE": 11, "LYS": 12, "LEU": 13, "MET": 14, "ASN": 15,"PRO": 16,
"GLN": 17, "ARG": 18, "SER": 19, "THR": 20, "VAL": 21, "TRP": 22, "TYR": 23}
pep_max_length_uniprot = 40
pep_max_length_pepbdb = 40
EGCM_max_length = 400
process_pepbdb = True
process_uniprot = False
pepbdb_source = "/home/chens/data/pepbdb/pepbdb"
pepbdb_processed = 'pepbdb_sorted.json'
uniprot_yes_source = '/home/chens/data/uniprot/uniprot-reviewed_yes.fasta'
uniprot_no_source = '/home/chens/data/uniprot/uniprot-reviewed_no.fasta'
uniprot_processed = 'uniprot.json'
test_result = 'test_result.json'
EGCM_max_value = 100
EGCM_embeded_length = 50
train_mode = 'finetune' #or pretrain
pretrained_model = 'output/07-14/pretrain_model_57.pt'
fintuned_model = 'output/07-14-21/finetune_model_59.pt'
final_model = 'output/07-15-12/z_gen_model_59.pt'
savepath='output/{}'
tbpath = 'tb/default'
generated_savepath = 'generated.json'
batch_size = 16
total_epoch = 60
sample_num = 20
def _cfg_import_export(cfg_interactor, cfg_, prefix='', mode='fill_parser'):
""" Iterate through cfg_ module/object. For known variables import/export
from cfg_interactor (dict, argparser, or argparse namespace) """
for k in dir(cfg_):
if k[0] == '_': continue # hidden
v = getattr(cfg_, k)
if type(v) in [float, str, int, bool]:
if mode == 'fill_parser':
cfg_interactor.add_argument('--{}{}'.format(prefix, k), type=type(v), help='default: {}'.format(v))
elif mode == 'fill_dict':
cfg_interactor['{}{}'.format(prefix, k)] = v
elif mode == 'override':
prek = '{}{}'.format(prefix, k)
if prek in cfg_interactor:
setattr(cfg_, k, getattr(cfg_interactor, prek))
elif type(v) == Bunch: # recurse; descend into Bunch
_cfg_import_export(cfg_interactor, v, prefix=prefix + k + '.', mode=mode)
def _override_config(args, cfg):
""" call _cfg_import_export in override mode, update cfg from:
(1) contents of config_json (taken from (a) loadpath if not auto, or (2) savepath)
(2) from command line args
"""
config_json = vars(args).get('config_json', '')
_cfg_import_export(args, cfg, mode='override')
vae = Bunch(
batch_size=1,
lr=1e-3,
# TODO lrate decay with scheduler
s_iter=0,
n_iter=200000,
beta=Bunch(
start=Bunch(val=1.0, iter=0),
end=Bunch(val=2.0, iter=10000)
),
lambda_logvar_L1=0.0, # default from https://openreview.net/pdf?id=r157GIJvz
lambda_logvar_KL=1e-3, # default from https://openreview.net/pdf?id=r157GIJvz
z_regu_loss='mmdrf', # kl (vae) | mmd (wae) | mmdrf (wae)
cheaplog_every=500, # cheap tensorboard logging eg training metrics
expsvlog_every=20000, # expensive logging: model checkpoint, heldout set evals, word emb logging
chkpt_path='./output/{}/{}_model_{}.pt',
clip_grad=5.0,
)
vae.beta.start.iter = vae.s_iter
vae.beta.end.iter = vae.s_iter + vae.n_iter // 5
model = Bunch(
z_dim=100,
c_dim=2,
emb_dim=150,
pretrained_emb=None, # set True to load from dataset_unl.get_vocab_vectors()
freeze_embeddings=False,
flow=0,
flow_type='',
E_args=Bunch(
h_dim=80, # 20 for amp, 64 for yelp
biGRU=True,
layers=1,
p_dropout=0.0
),
G_args=Bunch(
G_class='gru',
GRU_args=Bunch(
# h_dim = (z_dim + c_dim) for now. TODO parametrize this?
p_word_dropout=0.3,
p_out_dropout=0.3,
skip_connetions=False,
),
deconv_args=Bunch(
max_seq_len=pep_max_length_pepbdb if train_mode=='finetune' else pep_max_length_uniprot,
num_filters=100,
kernel_size=4,
num_deconv_layers=3,
useRNN=False,
temperature=1.0,
use_batch_norm=True,
num_conv_layers=2,
add_final_conv_layer=True,
),
),
C_args=Bunch(
min_filter_width=3,
max_filter_width=5,
num_filters=100,
dropout=0.5
)
)
# config for the losses, constant during training & phases
losses = Bunch(
wae_mmd=Bunch(
sigma=7.0, # ~ O( sqrt(z_dim) )
kernel='gaussian',
# for method = rf
rf_dim=500,
rf_resample=False
),
)
| ChenShengsGitHub/structure-based-peptide-generator | cfg.py | cfg.py | py | 5,610 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.StreamHan... |
40238915211 |
from setuptools import setup, find_packages
requires = [
'buildbot',
'python-debian',
'xunitparser',
]
setup(
name='buildbot-junit',
version='0.1',
description='Junit for buildbot',
author='Andrey Stolbuhin',
author_email='an.stol99@gmail.com',
url='https://github.com/ZeeeL/buildbot-junit',
keywords='buildbot xunit junit steps shellcommand',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
| ZeeeL/buildbot-junit | setup.py | setup.py | py | 535 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 18,
"usage_type": "call"
}
] |
23378159417 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils as utils
LRELU_SLOPE = 0.1
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def init_weights(m, mean=0.0, std=0.01):
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(mean, std)
class res_block1(nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.h = h
self.convs1 = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x): # lrelu -> cnn1 -> lrelu -> cnn2 -> residual x
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
utils.remove_weight_norm(l)
for l in self.convs2:
utils.remove_weight_norm(l)
class res_block2(nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.h = h
self.convs = nn.ModuleList([
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
utils.weight_norm(nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
utils.remove_weight_norm(l)
class generator(nn.Module):
def __init__(self, h):
super().__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = utils.weight_norm(nn.Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = res_block1 if h.resblock == '1' else res_block2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(utils.weight_norm(
nn.ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = utils.weight_norm(nn.Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x) # This is the first layer that upsamples the number of channels from 80 to 8192
for i in range(self.num_upsamples): # Stacks the transpose-conv + resblocks 'num_upsamples' times.
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x) # Decreases the num of channels
xs = None
for j in range(self.num_kernels): # Each iteration inputs into the resblocks
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels # In the end, all the individual outputs from the resblocks is meaned.
# After all the resblocks, the final output is the dim of 32 in the current configuration.
x = F.leaky_relu(x)
x = self.conv_post(x) # Takes the 32 input channels and gives 1 channel of output
x = torch.tanh(x)
return x # Final output is (bs, 1, 2097152) for default config.
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
utils.remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
utils.remove_weight_norm(self.conv_pre)
utils.remove_weight_norm(self.conv_post)
class discriminator_p(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = utils.weight_norm if use_spectral_norm == False else utils.spectral_norm
self.convs = nn.ModuleList([
norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = list()
b, c, t = x.shape
if t % self.period != 0:
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), 'reflect')
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class multi_period_discriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
discriminator_p(i) for i in [2, 3, 5, 7, 11]
])
def forward(self, y, y_hat): # Takes actual out (y) and fake out (y_hat)
y_d_rs, y_d_gs, fmap_rs, fmap_gs = list(), list(), list(), list()
for i, d in enumerate(self.discriminators): # each discriminator has a different kernel size (but 1 depth) to compute only 1 period of audio.
y_d_r, fmap_r = d(y) # calculates discrimination score for real (hence, 'r'). Look, I didn't pick the variables names okay.
y_d_g, fmap_g = d(y_hat) # 'g' stands for generated
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class discriminator_s(nn.Module):
def __init__(self, use_spectral_norm=False):
super().__init__()
norm_f = utils.weight_norm if use_spectral_norm == False else utils.spectral_norm
self.convs = nn.ModuleList([
norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)),
norm_f(nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = list()
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class multi_scale_discriminator(nn.Module):
def __init__(self):
super().__init__()
self.discriminators = nn.ModuleList([
discriminator_s(use_spectral_norm=True),
discriminator_s(),
discriminator_s(),
])
self.meanpools = nn.ModuleList([
nn.AvgPool1d(4, 2, padding=2),
nn.AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat): # in MSD, you do not reshape the input data to differentiate between different period of the input audio.
y_d_rs, y_d_gs, fmap_rs, fmap_gs = list(), list(), list(), list()
for i, d in enumerate(self.discriminators):
if i != 0: # you do not average-pool the raw audio. Also, you use spectral_norm on the raw audio.
y = self.meanpools[i-1](y) # average-pooling the inputs
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y) # discrimination scores for the inputs
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r) # fmap are the feature maps. It's audio
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g): # it is the mean absolute error a.k.a L1 Loss
loss = 0 # all the losses calculated is added to the total loss.
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2 # 2 is just a factor added to increase the influence of this loss to the overall loss
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss, r_losses, g_losses = 0, list(), list()
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2) # real loss
g_loss = torch.mean(dg**2) # gen loss
loss += (r_loss + g_loss) # GAN Loss
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss, gen_losses = 0, list()
for dg in disc_outputs:
l = torch.mean((1-dg)**2) # GAN Loss for generators
gen_losses.append(l)
loss += l
return loss, gen_losses
if __name__ == '__main__':
model = multi_period_discriminator()
[print(model.discriminators[i].period) for i in range(5)] | uuzall/hifi_gan | model.py | model.py | py | 10,620 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Conv1d",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
19124524287 | from google.appengine.ext import ndb
from components import utils
import gae_ts_mon
import config
import model
FIELD_BUCKET = 'bucket'
# Override default target fields for app-global metrics.
GLOBAL_TARGET_FIELDS = {
'job_name': '', # module name
'hostname': '', # version
'task_num': 0, # instance ID
}
CREATE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/created',
description='Build creation',
)
START_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/started',
description='Build start',
)
COMPLETE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/completed',
description='Build completion, including success, failure and cancellation',
)
HEARTBEAT_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/heartbeats',
description='Failures to extend a build lease'
)
LEASE_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/leases',
description='Successful build lease extension',
)
LEASE_EXPIRATION_COUNT = gae_ts_mon.CounterMetric(
'buildbucket/builds/lease_expired',
description='Build lease expirations'
)
CURRENTLY_PENDING = gae_ts_mon.GaugeMetric(
'buildbucket/builds/pending',
description='Number of pending builds',
)
CURRENTLY_RUNNING = gae_ts_mon.GaugeMetric(
'buildbucket/builds/running',
description='Number of running builds'
)
LEASE_LATENCY = gae_ts_mon.NonCumulativeDistributionMetric(
'buildbucket/builds/never_leased_duration',
description=(
'Duration between a build is created and it is leased for the first time'),
)
SCHEDULING_LATENCY = gae_ts_mon.NonCumulativeDistributionMetric(
'buildbucket/builds/scheduling_duration',
description='Duration of a build remaining in SCHEDULED state',
)
def fields_for(build, **extra):
if build:
tags = dict(t.split(':', 1) for t in build.tags)
fields = {
'builder': tags.get('builder', ''),
'user_agent': tags.get('user_agent', ''),
FIELD_BUCKET: build.bucket,
}
else:
fields = {
'builder': '',
'user_agent': '',
FIELD_BUCKET: '<no bucket>',
}
fields.update(extra)
return fields
def increment(metric, build, **fields): # pragma: no cover
"""Increments a counter metric."""
metric.increment(fields_for(build, **fields))
def increment_complete_count(build): # pragma: no cover
assert build
assert build.status == model.BuildStatus.COMPLETED
increment(
COMPLETE_COUNT,
build,
result=str(build.result),
failure_reason=str(build.failure_reason or ''),
cancelation_reason=str(build.cancelation_reason or ''),
)
@ndb.tasklet
def set_build_status_metric(metric, bucket, status):
q = model.Build.query(
model.Build.bucket == bucket,
model.Build.status == status)
value = yield q.count_async()
metric.set(value, {FIELD_BUCKET: bucket}, target_fields=GLOBAL_TARGET_FIELDS)
@ndb.tasklet
def set_build_latency(metric, bucket, must_be_never_leased):
q = model.Build.query(
model.Build.bucket == bucket,
model.Build.status == model.BuildStatus.SCHEDULED,
)
if must_be_never_leased:
q = q.filter(model.Build.never_leased == True)
else:
# Reuse the index that has never_leased
q = q.filter(model.Build.never_leased.IN((True, False)))
now = utils.utcnow()
dist = gae_ts_mon.Distribution(gae_ts_mon.GeometricBucketer())
for e in q.iter(projection=[model.Build.create_time]):
latency = (now - e.create_time).total_seconds()
dist.add(latency)
if dist.count == 0:
dist.add(0)
metric.set(dist, {FIELD_BUCKET: bucket}, target_fields=GLOBAL_TARGET_FIELDS)
# Metrics that are per-app rather than per-instance.
GLOBAL_METRICS = [
CURRENTLY_PENDING,
CURRENTLY_RUNNING,
LEASE_LATENCY,
SCHEDULING_LATENCY,
]
def update_global_metrics():
"""Updates the metrics in GLOBAL_METRICS."""
futures = []
for b in config.get_buckets_async().get_result():
futures.extend([
set_build_status_metric(
CURRENTLY_PENDING, b.name, model.BuildStatus.SCHEDULED),
set_build_status_metric(
CURRENTLY_RUNNING, b.name, model.BuildStatus.STARTED),
set_build_latency(LEASE_LATENCY, b.name, True),
set_build_latency(SCHEDULING_LATENCY, b.name, False),
])
for f in futures:
f.check_success()
| mithro/chromium-infra | appengine/cr-buildbucket/metrics.py | metrics.py | py | 4,246 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gae_ts_mon.CounterMetric",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gae_ts_mon.CounterMetric",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gae_ts_mon.CounterMetric",
"line_number": 26,
"usage_type": "call"
},
{
"api_na... |
30078414055 | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical
print(tf.__version__)
train = pd.read_csv(r"sign_mnist_train.csv")
test = pd.read_csv(r"sign_mnist_test.csv")
print(train.head())
train_labels = train['label']
test_labels = test['label']
del train['label']
del test['label']
train_images = train.values
test_images = test.values
print(train_images.shape, test_images.shape)
# Normalize the data
x_train = train_images / 255.0
x_test = test_images / 255.0
# Reshaping the data from 1-D to 3-D as required through input by CNN's
x_train = x_train.reshape(-1,28,28,1)
x_test = x_test.reshape(-1,28,28,1)
y_train = to_categorical(train_labels)
y_test = to_categorical(test_labels)
print("image shape-", x_train.shape, x_test.shape)
print("label shape-", y_train.shape, y_test.shape)
model = Sequential()
model.add(layers.Conv2D(32, (3,3), input_shape=(28,28,1), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(32, (3,3), activation='relu'))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(25, activation='softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, validation_data = (x_test, y_test), batch_size=128)
model.evaluate(x_test, y_test)
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
# graph
plot_graphs(history, 'accuracy')
plot_graphs(history, 'loss')
| daxjain789/Sign-Language-MNIST-with-CNN | sign_language.py | sign_language.py | py | 2,118 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.__version__",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflo... |
45483801886 | import pickle
import numpy as np
import random
import os
import pandas as pd
import yaml
import copy
from tqdm import tqdm
from . import utils
from . import visual
import xarray as xr
from .proxy import ProxyDatabase
from .gridded import Dataset
from .utils import (
pp,
p_header,
p_hint,
p_success,
p_fail,
p_warning,
cfg_abspath,
cwd_abspath,
geo_mean,
nino_indices,
calc_tpi,
global_hemispheric_means,
)
from .da import (
enkf_update_array,
cov_localization,
)
class ReconJob:
''' Reconstruction Job
General rule of loading parameters: load from the YAML first if available, then update with the parameters in the function calling,
so the latter has a higher priority
'''
def __init__(self, configs=None, proxydb=None, prior=None, obs=None):
self.configs = configs
self.proxydb = proxydb
self.prior = prior
self.obs = obs
def copy(self):
return copy.deepcopy(self)
def load_configs(self, cfg_path=None, job_dirpath=None, verbose=False):
''' Load the configuration YAML file
self.configs will be updated
Parameters
----------
cfg_path : str
the path of a configuration YAML file
'''
pwd = os.path.dirname(__file__)
if cfg_path is None:
cfg_path = os.path.abspath(os.path.join(pwd, './cfg/cfg_template.yml'))
self.cfg_path = cfg_path
if verbose: p_header(f'LMRt: job.load_configs() >>> loading reconstruction configurations from: {cfg_path}')
self.configs = yaml.safe_load(open(cfg_path, 'r'))
if verbose: p_success(f'LMRt: job.load_configs() >>> job.configs created')
if job_dirpath is None:
if os.path.isabs(self.configs['job_dirpath']):
job_dirpath = self.configs['job_dirpath']
else:
job_dirpath = cfg_abspath(self.cfg_path, self.configs['job_dirpath'])
else:
job_dirpath = cwd_abspath(job_dirpath)
self.configs['job_dirpath'] = job_dirpath
os.makedirs(job_dirpath, exist_ok=True)
if verbose:
p_header(f'LMRt: job.load_configs() >>> job.configs["job_dirpath"] = {job_dirpath}')
p_success(f'LMRt: job.load_configs() >>> {job_dirpath} created')
pp.pprint(self.configs)
def load_proxydb(self, path=None, verbose=False, load_df_kws=None):
''' Load the proxy database
self.proxydb will be updated
Parameters
----------
proxydb_path : str
if given, should point to a pickle file with a Pandas DataFrame underlying
'''
# update self.configs with not None parameters in the function calling
if path is None:
if os.path.isabs(self.configs['proxydb_path']):
path = self.configs['proxydb_path']
else:
path = cfg_abspath(self.cfg_path, self.configs['proxydb_path'])
else:
path = cwd_abspath(path)
self.configs['proxydb_path'] = path
if verbose: p_header(f'LMRt: job.load_proxydb() >>> job.configs["proxydb_path"] = {path}')
# load proxy database
proxydb = ProxyDatabase()
proxydb_df = pd.read_pickle(self.configs['proxydb_path'])
load_df_kws = {} if load_df_kws is None else load_df_kws.copy()
proxydb.load_df(proxydb_df, ptype_psm=self.configs['ptype_psm'],
ptype_season=self.configs['ptype_season'], verbose=verbose, **load_df_kws)
if verbose: p_success(f'LMRt: job.load_proxydb() >>> {proxydb.nrec} records loaded')
proxydb.source = self.configs['proxydb_path']
self.proxydb = proxydb
if verbose: p_success(f'LMRt: job.load_proxydb() >>> job.proxydb created')
def filter_proxydb(self, ptype_psm=None, dt=1, pids=None, verbose=False):
if ptype_psm is None:
ptype_psm = self.configs['ptype_psm']
else:
self.configs['ptype_psm'] = ptype_psm
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> job.configs["ptype_psm"] = {ptype_psm}')
proxydb = self.proxydb.copy()
if self.configs['ptype_psm'] is not None:
ptype_list = list(self.configs['ptype_psm'].keys())
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> filtering proxy records according to: {ptype_list}')
proxydb.filter_ptype(ptype_list, inplace=True)
proxydb.filter_dt(dt, inplace=True)
if pids is not None:
self.configs['assim_pids'] = pids
if verbose: p_header(f'LMRt: job.filter_proxydb() >>> job.configs["assim_pids"] = {pids}')
if 'assim_pids' in self.configs and self.configs['assim_pids'] is not None:
proxydb.filter_pids(self.configs['assim_pids'], inplace=True)
if verbose: p_success(f'LMRt: job.filter_proxydb() >>> {proxydb.nrec} records remaining')
self.proxydb = proxydb
def seasonalize_proxydb(self, ptype_season=None, verbose=False):
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.seasonalize_proxydb() >>> job.configs["ptype_season"] = {ptype_season}')
proxydb = self.proxydb.copy()
if self.configs['ptype_season'] is not None:
if verbose: p_header(f'LMRt: job.seasonalize_proxydb() >>> seasonalizing proxy records according to: {self.configs["ptype_season"]}')
proxydb.seasonalize(self.configs['ptype_season'], inplace=True)
if verbose: p_success(f'LMRt: job.seasonalize_proxydb() >>> {proxydb.nrec} records remaining')
self.proxydb = proxydb
if verbose: p_success(f'LMRt: job.seasonalize_proxydb() >>> job.proxydb updated')
def load_prior(self, path_dict=None, varname_dict=None, verbose=False, anom_period=None):
''' Load model prior fields
Parameters
----------
path_dict: dict
a dict of environmental variables
varname_dict: dict
a dict to map variable names, e.g. {'tas': 'sst'} means 'tas' is named 'sst' in the input NetCDF file
'''
# update self.configs with not None parameters in the function calling
if path_dict is None:
path_dict = cfg_abspath(self.cfg_path, self.configs['prior_path'])
self.configs['prior_path'] = path_dict
else:
self.configs['prior_path'] = cwd_abspath(path_dict)
if verbose: p_header(f'LMRt: job.load_prior() >>> job.configs["prior_path"] = {path_dict}')
if anom_period is None:
anom_period = self.configs['anom_period']
else:
self.configs['anom_period'] = anom_period
if verbose: p_header(f'LMRt: job.load_prior() >>> job.configs["anom_period"] = {anom_period}')
vn_dict = {
'time': 'time',
'lat': 'lat',
'lon': 'lon',
}
if 'prior_varname' in self.configs:
vn_dict.update(self.configs['prior_varname'])
if varname_dict is not None:
vn_dict.update(varname_dict)
self.configs['prior_varname'] = vn_dict
# load data
if verbose: p_header(f'LMRt: job.load_prior() >>> loading model prior fields from: {self.configs["prior_path"]}')
ds = Dataset()
ds.load_nc(self.configs['prior_path'], varname_dict=self.configs['prior_varname'], anom_period=anom_period, inplace=True)
if verbose:
p_hint('LMRt: job.load_prior() >>> raw prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.load_prior() >>> job.prior created')
def seasonalize_ds_for_psm(self, ds_type=None, seasonalized_ds_path=None, save_path=None, ptype_season=None, verbose=False):
if seasonalized_ds_path is not None and os.path.exists(seasonalized_ds_path):
with open(seasonalized_ds_path, 'rb') as f:
if ds_type == 'prior':
self.seasonalized_prior = pickle.load(f)
elif ds_type == 'obs':
self.seasonalized_obs = pickle.load(f)
else:
raise ValueError('Wrong ds_type')
else:
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.seasonalize_ds_for_psm() >>> job.configs["ptype_season"] = {ptype_season}')
all_seasons = []
for ptype, season in ptype_season.items():
if isinstance(season[0], list):
# when ptype_season[pobj.ptype] contains multiple seasonality possibilities
for sn in season:
if sn not in all_seasons:
all_seasons.append(sn)
else:
# when ptype_season[pobj.ptype] contains only one seasonality possibility
if season not in all_seasons:
all_seasons.append(season)
# print(all_seasons)
if ds_type == 'prior':
ds = self.prior.copy()
elif ds_type == 'obs':
ds = self.obs.copy()
else:
raise ValueError('Wrong ds_type')
seasonalized_ds = {}
for season in all_seasons:
if verbose: p_header(f'LMRt: job.seasonalize_ds_for_psm() >>> Seasonalizing variables from {ds_type} with season: {season}')
season_tag = '_'.join(str(s) for s in season)
seasonalized_ds[season_tag] = ds.seasonalize(season, inplace=False)
if ds_type == 'prior':
self.seasonalized_prior = seasonalized_ds
elif ds_type == 'obs':
self.seasonalized_obs = seasonalized_ds
else:
raise ValueError('Wrong ds_type')
if save_path is not None:
with open(save_path, 'wb') as f:
pickle.dump(seasonalized_ds, f)
if verbose: p_success(f'LMRt: job.seasonalize_ds_for_psm() >>> job.seasonalized_{ds_type} created')
def seasonalize_prior(self, season=None, verbose=False):
if season is None:
season = self.configs['prior_season']
else:
self.configs['prior_season'] = season
if verbose: p_header(f'LMRt: job.seasonalize_prior() >>> job.configs["prior_season"] = {season}')
ds = self.prior.copy()
ds.seasonalize(self.configs['prior_season'], inplace=True)
if verbose:
p_hint(f'LMRt: job.seasonalize_prior() >>> seasonalized prior w/ season {season}')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.seasonalize_prior() >>> job.prior updated')
def regrid_prior(self, ntrunc=None, verbose=False):
if ntrunc is None:
ntrunc = self.configs['prior_regrid_ntrunc']
self.configs['prior_regrid_ntrunc'] = ntrunc
ds = self.prior.copy()
ds.regrid(self.configs['prior_regrid_ntrunc'], inplace=True)
if verbose:
p_hint('LMRt: job.regrid_prior() >>> regridded prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.regrid_prior() >>> job.prior updated')
def crop_prior(self, domain_range=None, verbose=False):
''' Take a smaller domain for reconstruction
Parameters
----------
domain_range : list
[lat_min, lat_max, lon_min, lon_max]
'''
if domain_range is None:
if 'prior_crop_domain_range' not in self.configs:
self.configs['prior_crop_domain_range'] = None
else:
domain_range = self.configs['prior_crop_domain_range']
else:
self.configs['prior_crop_domain_range'] = domain_range
if self.configs['prior_crop_domain_range'] is None:
if verbose: p_success(f'LMRt: job.crop_prior() >>> job.prior not updated as the domain_range is set to None')
else:
ds = self.prior.copy()
ds.crop(self.configs['prior_crop_domain_range'], inplace=True)
if verbose:
p_hint('LMRt: job.crop_prior() >>> cutted prior')
print(ds)
self.prior = ds
if verbose: p_success(f'LMRt: job.crop_prior() >>> job.prior updated')
def load_obs(self, path_dict=None, varname_dict=None, verbose=False, anom_period=None):
''' Load instrumental observations fields
Parameters
----------
path_dict: dict
a dict of environmental variables
varname_dict: dict
a dict to map variable names, e.g. {'tas': 'sst'} means 'tas' is named 'sst' in the input NetCDF file
'''
if path_dict is None:
obs_path = cfg_abspath(self.cfg_path, self.configs['obs_path'])
else:
obs_path = cwd_abspath(path_dict)
self.configs['obs_path'] = obs_path
if anom_period is None:
anom_period = self.configs['anom_period']
else:
self.configs['obs_anom_period'] = anom_period
if verbose: p_header(f'LMRt: job.load_obs() >>> job.configs["anom_period"] = {anom_period}')
vn_dict = {
'time': 'time',
'lat': 'lat',
'lon': 'lon',
}
if 'obs_varname' in self.configs:
vn_dict.update(self.configs['obs_varname'])
if varname_dict is not None:
vn_dict.update(varname_dict)
self.configs['obs_varname'] = vn_dict
if verbose: p_header(f'LMRt: job.load_obs() >>> loading instrumental observation fields from: {self.configs["obs_path"]}')
ds = Dataset()
ds.load_nc(self.configs['obs_path'], varname_dict=vn_dict, anom_period=anom_period, inplace=True)
self.obs = ds
if verbose: p_success(f'LMRt: job.load_obs() >>> job.obs created')
def calibrate_psm(self, ptype_season=None,
seasonalized_prior_path=None, prior_loc_path=None,
seasonalized_obs_path=None, obs_loc_path=None,
calibed_psm_path=None, calib_period=None, verbose=False):
if ptype_season is None:
ptype_season = self.configs['ptype_season']
else:
self.configs['ptype_season'] = ptype_season
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["ptype_season"] = {ptype_season}')
ptype_season = {k:self.configs['ptype_season'][k] for k in self.configs['ptype_psm'].keys()}
# set paths for precalculated data
if 'prepcalc' not in self.configs:
self.configs['precalc'] = {}
if seasonalized_prior_path is None:
seasonalized_prior_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'seasonalized_prior.pkl'))
self.configs['precalc']['seasonalized_prior_path'] = seasonalized_prior_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["seasonalized_prior_path"] = {seasonalized_prior_path}')
if seasonalized_obs_path is None:
seasonalized_obs_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'seasonalized_obs.pkl'))
self.configs['precalc']['seasonalized_obs_path'] = seasonalized_obs_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["seasonalized_obs_path"] = {seasonalized_obs_path}')
if prior_loc_path is None:
prior_loc_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'prior_loc.pkl'))
self.configs['precalc']['prior_loc_path'] = prior_loc_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["prior_loc_path"] = {prior_loc_path}')
if obs_loc_path is None:
obs_loc_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'obs_loc.pkl'))
self.configs['precalc']['obs_loc_path'] = obs_loc_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["obs_loc_path"] = {obs_loc_path}')
if calibed_psm_path is None:
calibed_psm_path = os.path.abspath(os.path.join(self.configs['job_dirpath'], 'calibed_psm.pkl'))
self.configs['precalc']['calibed_psm_path'] = calibed_psm_path
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["precalc"]["calibed_psm_path"] = {calibed_psm_path}')
for ds_type, seasonalized_path, loc_path in zip(
['prior', 'obs'], [seasonalized_prior_path, seasonalized_obs_path], [prior_loc_path, obs_loc_path]):
# seasonalize ds for PSM calibration
self.seasonalize_ds_for_psm(ds_type=ds_type, ptype_season=ptype_season,
seasonalized_ds_path=seasonalized_path, save_path=seasonalized_path, verbose=verbose)
if ds_type == 'prior':
ds = self.prior
seasonalized_ds = self.seasonalized_prior
elif ds_type == 'obs':
ds = self.obs
seasonalized_ds = self.seasonalized_obs
# get modeled environmental variables at proxy locales from prior
psm_types = set([v for k, v in self.configs['ptype_psm'].items()])
if 'bilinear' in psm_types:
var_names = ['tas', 'pr']
else:
var_names = ['tas']
self.proxydb.find_nearest_loc(var_names, ds=ds, ds_type=ds_type, ds_loc_path=loc_path, save_path=loc_path, verbose=verbose)
self.proxydb.get_var_from_ds(seasonalized_ds, ptype_season, ds_type=ds_type, verbose=verbose)
# initialize PSM
self.proxydb.init_psm(verbose=verbose)
# calibrate PSM
if calib_period is None:
calib_period = self.configs['psm_calib_period']
else:
self.configs['psm_calib_period'] = calib_period
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> job.configs["psm_calib_period"] = {calib_period}')
if verbose: p_header(f'LMRt: job.calibrate_psm() >>> PSM calibration period: {calib_period}')
if calibed_psm_path is not None and os.path.exists(calibed_psm_path):
self.proxydb.calib_psm(calib_period=calib_period, calibed_psm_path=calibed_psm_path, verbose=verbose)
else:
self.proxydb.calib_psm(calib_period=calib_period, save_path=calibed_psm_path, verbose=verbose)
def forward_psm(self, verbose=False):
self.proxydb.forward_psm(verbose=verbose)
def gen_Xb(self, recon_vars=None, verbose=False):
''' Generate Xb
'''
if not hasattr(self, 'prior_sample_years'):
raise ValueError('job.prior_sample_years not existing, please run job.gen_Ye() first!')
if recon_vars is None:
recon_vars = self.configs['recon_vars']
else:
self.configs['recon_vars'] = recon_vars
if verbose: p_header(f'LMRt: job.gen_Xb() >>> job.configs["recon_vars"] = {recon_vars}')
if type(recon_vars) is str:
# contains only one variable
recon_vars = [recon_vars]
vn_1st = recon_vars[0]
self.prior_sample_idx = [list(self.prior.fields[vn_1st].time).index(yr) for yr in self.prior_sample_years]
if verbose: p_success(f'LMRt: job.gen_Xb() >>> job.prior_sample_idx created')
nens = np.size(self.prior_sample_years)
Xb_var_irow = {} # index of rows in Xb to store the specific var
loc = 0
for vn in recon_vars:
nt, nlat, nlon = np.shape(self.prior.fields[vn].value)
lats, lons = self.prior.fields[vn].lat, self.prior.fields[vn].lon
lon2d, lat2d = np.meshgrid(lons, lats)
fd_coords = np.ndarray((nlat*nlon, 2))
fd_coords[:, 0] = lat2d.flatten()
fd_coords[:, 1] = lon2d.flatten()
fd = self.prior.fields[vn].value[self.prior_sample_idx]
fd = np.moveaxis(fd, 0, -1)
fd_flat = fd.reshape((nlat*nlon, nens))
if vn == vn_1st:
Xb = fd_flat
Xb_coords = fd_coords
else:
Xb = np.concatenate((Xb, fd_flat), axis=0)
Xb_coords = np.concatenate((Xb_coords, fd_coords), axis=0)
Xb_var_irow[vn] = [loc, loc+nlat*nlon-1]
loc += nlat*nlon
self.Xb = Xb
self.Xb_coords = Xb_coords
self.Xb_var_irow = Xb_var_irow
if verbose:
p_success(f'LMRt: job.gen_Xb() >>> job.Xb created')
p_success(f'LMRt: job.gen_Xb() >>> job.Xb_coords created')
p_success(f'LMRt: job.gen_Xb() >>> job.Xb_var_irow created')
def gen_Ye(self, proxy_frac=None, nens=None, verbose=False, seed=0):
''' Generate Ye
'''
if proxy_frac is None:
proxy_frac = self.configs['proxy_frac']
else:
self.configs['proxy_frac'] = proxy_frac
if verbose: p_header(f'LMRt: job.gen_Ye() >>> job.configs["proxy_frac"] = {proxy_frac}')
if nens is None:
nens = self.configs['recon_nens']
else:
self.configs['recon_nens'] = nens
if verbose: p_header(f'LMRt: job.gen_Xb() >>> job.configs["recon_nens"] = {nens}')
self.proxydb.split(proxy_frac, verbose=verbose, seed=seed)
vn_1st = list(self.prior.fields.keys())[0]
time = self.prior.fields[vn_1st].time
Ye_assim_df = pd.DataFrame(index=time)
Ye_eval_df = pd.DataFrame(index=time)
Ye_assim_lat = []
Ye_assim_lon = []
Ye_eval_lat = []
Ye_eval_lon = []
Ye_assim_coords = np.ndarray((self.proxydb.assim.nrec, 2))
Ye_eval_coords = np.ndarray((self.proxydb.eval.nrec, 2))
for pid, pobj in self.proxydb.assim.records.items():
series = pd.Series(index=pobj.ye_time, data=pobj.ye_value)
Ye_assim_df[pid] = series
Ye_assim_lat.append(pobj.lat)
Ye_assim_lon.append(pobj.lon)
Ye_assim_df.dropna(inplace=True)
Ye_assim_coords[:, 0] = Ye_assim_lat
Ye_assim_coords[:, 1] = Ye_assim_lon
for pid, pobj in self.proxydb.eval.records.items():
series = pd.Series(index=pobj.ye_time, data=pobj.ye_value)
Ye_eval_df[pid] = series
Ye_eval_lat.append(pobj.lat)
Ye_eval_lon.append(pobj.lon)
Ye_eval_df.dropna(inplace=True)
Ye_eval_coords[:, 0] = Ye_eval_lat
Ye_eval_coords[:, 1] = Ye_eval_lon
Ye_df = pd.concat([Ye_assim_df, Ye_eval_df], axis=1).dropna()
self.Ye_df = Ye_df
nt = len(Ye_df)
self.Ye_assim_df = Ye_assim_df
self.Ye_eval_df = Ye_eval_df
random.seed(seed)
sample_idx = random.sample(list(range(nt)), nens)
self.prior_sample_years = Ye_df.index[sample_idx].values
if verbose:
p_success(f'LMRt: job.gen_Ye() >>> job.prior_sample_years created')
# use self.prior_sample_idx for sampling
self.Ye_assim = np.array(Ye_assim_df)[sample_idx].T
self.Ye_eval = np.array(Ye_eval_df)[sample_idx].T
self.Ye_assim_coords = Ye_assim_coords
self.Ye_eval_coords = Ye_eval_coords
if verbose:
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval_df created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_assim_coords created')
p_success(f'LMRt: job.gen_Ye() >>> job.Ye_eval_coords created')
def update_yr(self, target_yr, Xb_aug, Xb_aug_coords, recon_loc_rad, recon_timescale=1, verbose=False, debug=False):
start_yr = target_yr - recon_timescale/2
end_yr = target_yr + recon_timescale/2
Xb = np.copy(Xb_aug)
i = 0
for pid, pobj in self.proxydb.assim.records.items():
mask = (pobj.time >= start_yr) & (pobj.time <= end_yr)
nYobs = np.sum(mask)
if nYobs == 0:
i += 1
continue # skip to next proxy record
Yobs = pobj.value[mask].mean()
loc = cov_localization(recon_loc_rad, pobj, Xb_aug_coords)
Ye = Xb[i - (self.proxydb.assim.nrec+self.proxydb.eval.nrec)]
ob_err = pobj.R / nYobs
Xa = enkf_update_array(Xb, Yobs, Ye, ob_err, loc=loc, debug=debug)
if debug:
Xb_mean = Xb[:-(self.proxydb.assim.nrec+self.proxydb.eval.nrec)].mean()
Xa_mean = Xa[:-(self.proxydb.assim.nrec+self.proxydb.eval.nrec)].mean()
innov = Yobs - Ye.mean()
if np.abs(innov / Yobs) > 1:
print(pid, i - (self.proxydb.assim.nrec+self.proxydb.eval.nrec))
print(f'\tXb_mean: {Xb_mean:.2f}, Xa_mean: {Xa_mean:.2f}')
print(f'\tInnovation: {innov:.2f}, ob_err: {ob_err:.2f}, Yobs: {Yobs:.2f}, Ye_mean: {Ye.mean():.2f}')
Xbvar = Xb.var(axis=1, ddof=1)
Xavar = Xa.var(axis=1, ddof=1)
vardiff = Xavar - Xbvar
if (not np.isfinite(np.min(vardiff))) or (not np.isfinite(np.max(vardiff))):
raise ValueError('Reconstruction has blown-up. Exiting!')
if debug: print('min/max change in variance: ('+str(np.min(vardiff))+','+str(np.max(vardiff))+')')
i += 1
Xb = Xa
return Xb
def run_da(self, recon_period=None, recon_loc_rad=None, recon_timescale=None, verbose=False, debug=False):
if recon_period is None:
recon_period = self.configs['recon_period']
else:
self.configs['recon_period'] = recon_period
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_period"] = {recon_period}')
if recon_timescale is None:
recon_timescale = self.configs['recon_timescale']
else:
self.configs['recon_timescale'] = recon_timescale
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_timescale"] = {recon_timescale}')
if recon_loc_rad is None:
recon_loc_rad = self.configs['recon_loc_rad']
else:
self.configs['recon_loc_rad'] = recon_loc_rad
if verbose: p_header(f'LMRt: job.run_da() >>> job.configs["recon_loc_rad"] = {recon_loc_rad}')
recon_yrs = np.arange(recon_period[0], recon_period[-1]+1)
Xb_aug = np.append(self.Xb, self.Ye_assim, axis=0)
Xb_aug = np.append(Xb_aug, self.Ye_eval, axis=0)
Xb_aug_coords = np.append(self.Xb_coords, self.Ye_assim_coords, axis=0)
Xb_aug_coords = np.append(Xb_aug_coords, self.Ye_eval_coords, axis=0)
nt = np.size(recon_yrs)
nrow, nens = np.shape(Xb_aug)
Xa = np.ndarray((nt, nrow, nens))
for yr_idx, target_yr in enumerate(tqdm(recon_yrs, desc='KF updating')):
Xa[yr_idx] = self.update_yr(target_yr, Xb_aug, Xb_aug_coords, recon_loc_rad, recon_timescale, verbose=verbose, debug=debug)
recon_fields = {}
for vn, irow in self.Xb_var_irow.items():
_, nlat, nlon = np.shape(self.prior.fields[vn].value)
recon_fields[vn] = Xa[:, irow[0]:irow[-1]+1, :].reshape((nt, nlat, nlon, nens))
recon_fields[vn] = np.moveaxis(recon_fields[vn], -1, 1)
self.recon_fields = recon_fields
if verbose: p_success(f'LMRt: job.run_da() >>> job.recon_fields created')
def save_recon(self, save_path, compress_dict={'zlib': True, 'least_significant_digit': 1}, verbose=False,
output_geo_mean=False, target_lats=[], target_lons=[], output_full_ens=False, dtype=np.float32):
output_dict = {}
for vn, fd in self.recon_fields.items():
nyr, nens, nlat, nlon = np.shape(fd)
if output_full_ens:
output_var = np.array(fd, dtype=dtype)
output_dict[vn] = (('year', 'ens', 'lat', 'lon'), output_var)
else:
output_var = np.array(fd.mean(axis=1), dtype=dtype)
output_dict[vn] = (('year', 'lat', 'lon'), output_var)
lats, lons = self.prior.fields[vn].lat, self.prior.fields[vn].lon
try:
gm_ens = np.ndarray((nyr, nens), dtype=dtype)
nhm_ens = np.ndarray((nyr, nens), dtype=dtype)
shm_ens = np.ndarray((nyr, nens), dtype=dtype)
for k in range(nens):
gm_ens[:,k], nhm_ens[:,k], shm_ens[:,k] = global_hemispheric_means(fd[:,k,:,:], lats)
output_dict[f'{vn}_gm_ens'] = (('year', 'ens'), gm_ens)
output_dict[f'{vn}_nhm_ens'] = (('year', 'ens'), nhm_ens)
output_dict[f'{vn}_shm_ens'] = (('year', 'ens'), shm_ens)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> Global hemispheric means cannot be calculated')
if vn == 'tas':
try:
nino_ind = nino_indices(fd, lats, lons)
nino12 = nino_ind['nino1+2']
nino3 = nino_ind['nino3']
nino34 = nino_ind['nino3.4']
nino4 = nino_ind['nino4']
wpi = nino_ind['wpi']
nino12 = np.array(nino12, dtype=dtype)
nino3 = np.array(nino3, dtype=dtype)
nino34 = np.array(nino34, dtype=dtype)
nino4 = np.array(nino4, dtype=dtype)
output_dict['nino1+2'] = (('year', 'ens'), nino12)
output_dict['nino3'] = (('year', 'ens'), nino3)
output_dict['nino3.4'] = (('year', 'ens'), nino34)
output_dict['nino4'] = (('year', 'ens'), nino4)
output_dict['wpi'] = (('year', 'ens'), wpi)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> NINO or West Pacific Indices cannot be calculated')
# calculate tripole index (TPI)
try:
tpi = calc_tpi(fd, lats, lons)
tpi = np.array(tpi, dtype=dtype)
output_dict['tpi'] = (('year', 'ens'), tpi)
except:
if verbose: p_warning(f'LMRt: job.save_recon() >>> Tripole Index (TPI) cannot be calculated')
if output_geo_mean:
geo_mean_ts = geo_mean(fd, lats, lons, target_lats, target_lons)
output_dict['geo_mean'] = (('year', 'ens'), geo_mean_ts)
ds = xr.Dataset(
data_vars=output_dict,
coords={
'year': np.arange(self.configs['recon_period'][0], self.configs['recon_period'][1]+1),
'ens': np.arange(nens),
'lat': lats,
'lon': lons,
})
if compress_dict is not None:
encoding_dict = {}
for k in output_dict.keys():
encoding_dict[k] = compress_dict
ds.to_netcdf(save_path, encoding=encoding_dict)
else:
ds.to_netcdf(save_path)
if verbose: p_header(f'LMRt: job.save_recon() >>> Reconstructed fields saved to: {save_path}')
def prepare(self, job_dirpath=None, proxydb_path=None, ptype_psm=None, ptype_season=None, verbose=False,
prior_path=None, prior_varname_dict=None, prior_season=None, prior_regrid_ntrunc=None,
obs_path=None, obs_varname_dict=None, anom_period=None,
calib_period=None, seasonalized_prior_path=None, seasonalized_obs_path=None,
prior_loc_path=None, obs_loc_path=None, calibed_psm_path=None, prep_savepath=None):
if job_dirpath is None:
job_dirpath = self.configs['job_dirpath']
else:
self.configs['job_dirpath'] = job_dirpath
if verbose: p_header(f'LMRt: job.prepare() >>> job.configs["job_dirpath"] = {job_dirpath}')
os.makedirs(job_dirpath, exist_ok=True)
if prep_savepath is None:
prep_savepath = os.path.join(job_dirpath, f'job.pkl')
else:
if 'precalc' not in self.configs:
self.configs['precalc'] = {}
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose: p_header(f'LMRt: job.prepare() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
if os.path.exists(prep_savepath):
job_prep = pd.read_pickle(prep_savepath)
if verbose: p_header(f'LMRt: job.prepare() >>> Prepration data loaded from: {prep_savepath}')
self.proxydb = job_prep.proxydb
self.prior = job_prep.prior
self.obs = job_prep.obs
del(job_prep)
else:
# load & process proxy database
self.load_proxydb(path=proxydb_path, verbose=verbose)
self.filter_proxydb(ptype_psm=ptype_psm, verbose=verbose)
self.seasonalize_proxydb(ptype_season=ptype_season, verbose=verbose)
# load prior & obs
self.load_prior(path_dict=prior_path, varname_dict=prior_varname_dict, anom_period=anom_period, verbose=verbose)
self.load_obs(path_dict=obs_path, varname_dict=obs_varname_dict, anom_period=anom_period, verbose=verbose)
# calibrate & forward PSM
self.calibrate_psm(
seasonalized_prior_path=seasonalized_prior_path,
seasonalized_obs_path=seasonalized_obs_path,
prior_loc_path=prior_loc_path,
obs_loc_path=obs_loc_path,
calibed_psm_path=calibed_psm_path,
calib_period=calib_period,
verbose=verbose,
)
self.forward_psm(verbose=verbose)
# seasonalize & regrid prior
del(self.seasonalized_prior)
del(self.seasonalized_obs)
self.seasonalize_prior(season=prior_season, verbose=verbose)
self.regrid_prior(ntrunc=prior_regrid_ntrunc, verbose=verbose)
# save result
pd.to_pickle(self, prep_savepath)
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose:
p_header(f'LMRt: job.prepare() >>> Prepration data saved to: {prep_savepath}')
p_header(f'LMRt: job.prepare() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
def save(self, prep_savepath=None, verbose=False):
if hasattr(self, 'seasonalized_prior'):
del(self.seasonalized_prior)
if hasattr(self, 'seasonalized_obs'):
del(self.seasonalized_obs)
if prep_savepath is None:
prep_savepath = os.path.join(self.configs['job_dirpath'], f'job.pkl')
if 'prepcalc' not in self.configs:
self.configs['precalc'] = {}
pd.to_pickle(self, prep_savepath)
self.configs['precalc']['prep_savepath'] = prep_savepath
if verbose:
p_header(f'LMRt: job.save_job() >>> Prepration data saved to: {prep_savepath}')
p_header(f'LMRt: job.save_job() >>> job.configs["precalc"]["prep_savepath"] = {prep_savepath}')
def run(self, recon_seeds=None, recon_vars=None, recon_period=None, recon_timescale=None, recon_loc_rad=None,
nens=None, proxy_frac=None, verbose=False, save_configs=True,
compress_dict={'zlib': True, 'least_significant_digit': 1},
output_geo_mean=False, target_lats=[], target_lons=[],
output_full_ens=False, dtype=np.float32):
job_dirpath = self.configs["job_dirpath"]
if recon_seeds is None:
recon_seeds = self.configs['recon_seeds']
else:
self.configs['recon_seeds'] = np.array(recon_seeds).tolist()
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_seeds"] = {recon_seeds}')
if recon_vars is None:
recon_vars = self.configs['recon_vars']
else:
self.configs['recon_vars'] = recon_vars
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_vars"] = {recon_vars}')
if type(recon_vars) is str:
# contains only one variable
recon_vars = [recon_vars]
if nens is None:
nens = self.configs['recon_nens']
else:
self.configs['recon_nens'] = nens
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_nens"] = {nens}')
if proxy_frac is None:
proxy_frac = self.configs['proxy_frac']
else:
self.configs['proxy_frac'] = proxy_frac
if verbose: p_header(f'LMRt: job.run() >>> job.configs["proxy_frac"] = {proxy_frac}')
if recon_period is None:
recon_period = self.configs['recon_period']
else:
self.configs['recon_period'] = recon_period
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_period"] = {recon_period}')
if recon_timescale is None:
recon_timescale = self.configs['recon_timescale']
else:
self.configs['recon_timescale'] = recon_timescale
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_timescale"] = {recon_timescale}')
if recon_loc_rad is None:
recon_loc_rad = self.configs['recon_loc_rad']
else:
self.configs['recon_loc_rad'] = recon_loc_rad
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_loc_rad"] = {recon_loc_rad}')
# add settings for data saving to configs
self.configs['save_settings'] = {}
self.configs['save_settings']['compress_dict'] = compress_dict
self.configs['save_settings']['output_geo_mean'] = output_geo_mean
self.configs['save_settings']['target_lats'] = target_lats
self.configs['save_settings']['target_lons'] = target_lons
self.configs['save_settings']['output_full_ens'] = output_full_ens
if dtype is np.float32:
self.configs['save_settings']['dtype'] = 32
elif dtype is np.float64:
self.configs['save_settings']['dtype'] = 64
else:
raise ValueError('Wrong dtype!')
if verbose: p_header(f'LMRt: job.run() >>> job.configs["save_settings"] = {self.configs["save_settings"]}')
os.makedirs(job_dirpath, exist_ok=True)
if save_configs:
cfg_savepath = os.path.join(job_dirpath, f'job_configs.yml')
with open(cfg_savepath, 'w') as f:
yaml.dump(self.configs, f)
if verbose: p_header(f'LMRt: job.run() >>> job.configs saved to: {cfg_savepath}')
for seed in recon_seeds:
p_header(f'LMRt: job.run() >>> seed: {seed} | max: {recon_seeds[-1]}')
recon_savepath = os.path.join(job_dirpath, f'job_r{seed:02d}_recon.nc')
if os.path.exists(recon_savepath):
p_header(f'LMRt: job.run() >>> reconstruction existed at: {recon_savepath}')
continue
else:
self.gen_Ye(proxy_frac=proxy_frac, nens=nens, seed=seed)
self.gen_Xb(recon_vars=recon_vars)
idx_savepath = os.path.join(job_dirpath, f'job_r{seed:02d}_idx.pkl')
pd.to_pickle([self.prior_sample_idx, self.proxydb.calibed_idx_assim, self.proxydb.calibed_idx_eval], idx_savepath)
if verbose: p_header(f'LMRt: job.run() >>> randomized indices for prior and proxies saved to: {idx_savepath}')
print(self.proxydb.assim)
self.run_da(recon_period=recon_period, recon_timescale=recon_timescale, recon_loc_rad=recon_loc_rad)
self.save_recon(recon_savepath, compress_dict=compress_dict, output_geo_mean=output_geo_mean, verbose=verbose,
target_lats=target_lats, target_lons=target_lons, output_full_ens=output_full_ens, dtype=dtype)
p_header(f'LMRt: job.run() >>> DONE!')
def run_cfg(self, cfg_path, job_dirpath=None, recon_seeds=None, verbose=False, save_configs=True):
self.load_configs(cfg_path, verbose=verbose)
if job_dirpath is None:
if os.path.isabs(self.configs['job_dirpath']):
job_dirpath = self.configs['job_dirpath']
else:
job_dirpath = cfg_abspath(self.cfg_path, self.configs['job_dirpath'])
else:
job_dirpath = cwd_abspath(job_dirpath)
self.configs['job_dirpath'] = job_dirpath
os.makedirs(job_dirpath, exist_ok=True)
if verbose:
p_header(f'LMRt: job.load_configs() >>> job.configs["job_dirpath"] = {job_dirpath}')
p_success(f'LMRt: job.load_configs() >>> {job_dirpath} created')
proxydb_path = cfg_abspath(cfg_path, self.configs['proxydb_path'])
ptype_psm = self.configs['ptype_psm']
ptype_season = self.configs['ptype_season']
prior_path = cfg_abspath(cfg_path, self.configs['prior_path'])
prior_varname_dict = self.configs['prior_varname']
prior_season = self.configs['prior_season']
prior_regrid_ntrunc = self.configs['prior_regrid_ntrunc']
prior_crop_domain_range = self.configs['prior_crop_domain_range'] if 'prior_crop_domain_range' in self.configs else None
obs_path = cfg_abspath(cfg_path, self.configs['obs_path'])
obs_varname_dict = self.configs['obs_varname']
anom_period = self.configs['anom_period']
psm_calib_period = self.configs['psm_calib_period']
try:
seasonalized_prior_path = self.configs['precalc']['seasonalized_prior_path']
seasonalized_obs_path = self.configs['precalc']['seasonalized_obs_path']
prior_loc_path = self.configs['precalc']['prior_loc_path']
obs_loc_path = self.configs['precalc']['obs_loc_path']
calibed_psm_path = self.configs['precalc']['calibed_psm_path']
prep_savepath = self.configs['precalc']['prep_savepath']
except:
seasonalized_prior_path = None
seasonalized_obs_path = None
prior_loc_path = None
obs_loc_path = None
calibed_psm_path = None
prep_savepath = None
if recon_seeds is None:
recon_seeds = self.configs['recon_seeds']
else:
self.configs['recon_seeds'] = np.array(recon_seeds).tolist()
if verbose: p_header(f'LMRt: job.run() >>> job.configs["recon_seeds"] = {recon_seeds}')
recon_vars = self.configs['recon_vars']
recon_period = self.configs['recon_period']
recon_timescale = self.configs['recon_timescale']
recon_loc_rad = self.configs['recon_loc_rad']
recon_nens = self.configs['recon_nens']
proxy_frac = self.configs['proxy_frac']
try:
compress_dict = self.configs['save_settings']['compress_dict']
output_geo_mean = self.configs['save_settings']['output_geo_mean']
target_lats = self.configs['save_settings']['target_lats']
target_lons = self.configs['save_settings']['target_lons']
output_full_ens = self.configs['save_settings']['output_full_ens']
dtype_int = self.configs['save_settings']['dtype']
if dtype_int == 32:
dtype = np.float32
elif dtype_int == 64:
dtype = np.float64
else:
raise ValueError(f'Wrong dtype in: {cfg_path}! Should be either 32 or 64.')
except:
compress_dict={'zlib': True, 'least_significant_digit': 1}
output_geo_mean=False
target_lats=[]
target_lons=[]
output_full_ens=False
dtype=np.float32
self.prepare(job_dirpath, prep_savepath=prep_savepath, proxydb_path=proxydb_path, ptype_psm=ptype_psm, ptype_season=ptype_season,
prior_path=prior_path, prior_varname_dict=prior_varname_dict, prior_season=prior_season, prior_regrid_ntrunc=prior_regrid_ntrunc,
obs_path=obs_path, obs_varname_dict=obs_varname_dict, anom_period=anom_period,
calib_period=psm_calib_period, seasonalized_prior_path=seasonalized_prior_path, seasonalized_obs_path=seasonalized_obs_path,
prior_loc_path=prior_loc_path, obs_loc_path=obs_loc_path, calibed_psm_path=calibed_psm_path, verbose=verbose)
# crop the domain if set to
if prior_crop_domain_range is not None:
self.crop_prior(prior_crop_domain_range, verbose=verbose)
self.save(prep_savepath=prep_savepath, verbose=verbose)
self.run(recon_seeds=recon_seeds, recon_vars=recon_vars, recon_period=recon_period, save_configs=save_configs,
recon_timescale=recon_timescale, recon_loc_rad=recon_loc_rad, nens=recon_nens, proxy_frac=proxy_frac, verbose=verbose,
compress_dict=compress_dict, output_geo_mean=output_geo_mean, target_lats=target_lats, target_lons=target_lons,
output_full_ens=output_full_ens, dtype=dtype)
| fzhu2e/LMRt | LMRt/reconjob.py | reconjob.py | py | 45,613 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "copy.deepcopy",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"lin... |
43347425408 | import numpy as np
import os
from collections import defaultdict, namedtuple
import re
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class Collection(object):
def __init__(self, path):
self.path = path
self.name = path.strip("/").split("/")[-1]
self.data = defaultdict(list)
self.RunDescription = namedtuple('RunDescription', ['n_sources', 'dim_sources', 'dim_shared', 'dim_correlate', 'dim_latent'])
self.RunData = namedtuple('RunData', ['sources', 'shared'])
for filename in os.listdir(self.path):
self.add_data(filename)
def add_data(self, filename):
match = re.match("[0-9]+_[0-9]+_[0-9]+_[0-9]+_[0-9]+_[0-9]+_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+).npz", filename)
if match:
new_data = np.load(self.path + '/' + filename)
run_description = self.RunDescription(*(int(x) for x in match.groups()))
run_data = self.RunData(new_data["sources"], new_data["shared"])
self.data[run_description].append(run_data)
def get_final_reconstruction_errors_means_stds(self):
sources_data = {
run_description: np.array([run_data.sources for run_data in run_data_list])
for run_description, run_data_list in self.data.items()
}
shared_data = {
run_description: np.array([run_data.shared for run_data in run_data_list])
for run_description, run_data_list in self.data.items()
}
sources_means = {
run_description: np.mean(data)
for run_description, data in sources_data.items()
}
sources_stds = {
run_description: np.std(np.mean(data, axis=-1))
for run_description, data in sources_data.items()
}
shared_means = {
run_description: np.mean(data)
for run_description, data in shared_data.items()
}
shared_stds = {
run_description: np.std(np.mean(data, axis=-1))
for run_description, data in shared_data.items()
}
return sources_means, sources_stds, shared_means, shared_stds
def plot_wrt_latent_dim(self, ax, legend=True, lasts=3, inset=False, ylabel=False, title='exclusive'):
sources_means, sources_stds, shared_means, shared_stds = self.get_final_reconstruction_errors_means_stds()
keys = list(sources_means.keys())
keys.sort(key=lambda x: x.dim_latent)
x = np.array([key.dim_latent for key in keys])
sources_means = np.array([sources_means[key] for key in keys])
sources_stds = np.array([sources_stds[key] for key in keys])
shared_means = np.array([shared_means[key] for key in keys])
shared_stds = np.array([shared_stds[key] for key in keys])
# ax.plot([0], [1], color='grey', marker='o')
# ax.plot([0, x[0]], [1, sources_means[0]], color='grey', linestyle='--')
# ax.plot([0, x[0]], [1, shared_means[0]], color='grey', linestyle='--')
x = np.concatenate([[0], x], axis=0)
sources_means = np.concatenate([[1], sources_means], axis=0)
sources_stds = np.concatenate([[0], sources_stds], axis=0)
shared_means = np.concatenate([[1], shared_means], axis=0)
shared_stds = np.concatenate([[0], shared_stds], axis=0)
ax.plot(x, sources_means, color='b', linestyle='--', marker='o', label="exclusive")
ax.plot(x, shared_means, color='r', linestyle='--', marker='o', label="shared")
ax.fill_between(x, sources_means - sources_stds, sources_means + sources_stds, color='b', alpha=0.5)
ax.fill_between(x, shared_means - shared_stds, shared_means + shared_stds, color='r', alpha=0.5)
ax.axvline(keys[0].dim_shared + (keys[0].n_sources * keys[0].dim_sources), color='k', linestyle='--')
ax.set_xlabel("latent dimension")
if ylabel:
ax.set_ylabel(r"mean reconstruction errors $\tilde{r}_{m}$ and $\tilde{r}_{e}$")
else:
ax.set_yticks([])
if title == 'exclusive':
title = r"$d_{e} = " + "{}$".format(keys[0].dim_sources)
elif title == 'n_sources':
title = r"$n = {}$".format(keys[0].n_sources)
ax.set_title(title)
if legend:
ax.legend(loc='center right')
if inset:
inset = inset_axes(ax, width="15%", height="30%", loc=1)
inset.plot(x[-lasts:], sources_means[-lasts:], color='b', linestyle='--', marker='o', label="exclusive")
inset.plot(x[-lasts:], shared_means[-lasts:], color='r', linestyle='--', marker='o', label="shared")
inset.fill_between(x[-lasts:], sources_means[-lasts:] - sources_stds[-lasts:], sources_means[-lasts:] + sources_stds[-lasts:], color='b', alpha=0.5)
inset.fill_between(x[-lasts:], shared_means[-lasts:] - shared_stds[-lasts:], shared_means[-lasts:] + shared_stds[-lasts:], color='r', alpha=0.5)
inset.set_ylim([0, None])
if __name__ == '__main__':
c = Collection('../data/trash/')
c.compute_fits()
fits = c.compute_fits_fixed_u0()
print(fits)
| charleswilmot/lossy_compression | src/collection.py | collection.py | py | 5,139 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": ... |
15932161401 | import datetime
import time
import json
import six
from ..exceptions import HydraError, ResourceNotFoundError
from . import scenario, rules
from . import data
from . import units
from .objects import JSONObject
from ..util.permissions import required_perms
from hydra_base.lib import template, attributes
from ..db.model import Project, Network, Scenario, Node, Link, ResourceGroup,\
ResourceAttr, Attr, ResourceType, ResourceGroupItem, Dataset, Metadata, DatasetOwner,\
ResourceScenario, TemplateType, TypeAttr, Template, NetworkOwner, User, Rule
from sqlalchemy.orm import noload, joinedload
from .. import db
from sqlalchemy import func, and_, or_, distinct
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import aliased
from ..util import hdb
from sqlalchemy import case
from sqlalchemy.sql import null
from collections import namedtuple
from hydra_base import config
import logging
log = logging.getLogger(__name__)
# Python 2 and 3 compatible string checking
# TODO remove this when Python2 support is dropped.
try:
unicode
except NameError:
unicode = str
def _update_attributes(resource_i, attributes):
if attributes is None:
return dict()
attrs = {}
resource_attribute_qry = db.DBSession.query(ResourceAttr)
if resource_i.ref_key == 'NETWORK':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.network_id==resource_i.id)
elif resource_i.ref_key == 'NODE':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.node_id==resource_i.id)
elif resource_i.ref_key == 'LINK':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.link_id==resource_i.link_id)
elif resource_i.ref_key == 'GROUP':
resource_attribute_qry = resource_attribute_qry.filter(ResourceAttr.group_id==resource_i.group_id)
resource_attributes = resource_attribute_qry.all()
attr_id_map = dict([(ra_i.id, ra_i) for ra_i in resource_attributes])
#ra is for ResourceAttr
for ra in attributes:
if ra.id < 0:
ra_i = resource_i.add_attribute(ra.attr_id, ra.attr_is_var)
else:
ra_i = attr_id_map[ra.id]
ra_i.attr_is_var = ra.attr_is_var
attrs[ra.id] = ra_i
return attrs
def get_scenario_by_name(network_id, scenario_name,**kwargs):
try:
scen = db.DBSession.query(Scenario).filter(and_(Scenario.network_id==network_id, func.lower(Scenario.id) == scenario_name.lower())).one()
return scen.id
except NoResultFound:
log.info("No scenario in network %s with name %s"\
% (network_id, scenario_name))
return None
def get_timing(time):
return datetime.datetime.now() - time
def _get_all_attributes(network):
"""
Get all the complex mode attributes in the network so that they
can be used for mapping to resource scenarios later.
"""
attrs = network.attributes
for n in network.nodes:
attrs.extend(n.attributes)
for l in network.links:
attrs.extend(l.attributes)
for g in network.resourcegroups:
attrs.extend(g.attributes)
return attrs
def _check_ra_duplicates(all_resource_attrs, resource_id_name_map):
"""
Check for any duplicate resource attributes before inserting
into the DB. This just helps to prevent an ugly DB contraint error
"""
unique_ra_check = {}
for ra in all_resource_attrs:
k = (_get_resource_id(ra), ra['attr_id'])
if unique_ra_check.get(k) is None:
unique_ra_check[k] = ra
else:
ref_key = ra['ref_key']
if ref_key == 'NODE':
ref_id = ra['node_id']
elif ref_key == 'LINK':
ref_id = ra['link_id']
elif ref_key == 'GROUP':
ref_id = ra['group_id']
elif ref_key == 'NETWORK':
ref_id = ra['network_id']
resource_name = resource_id_name_map[ref_id]
attr_id = ra['attr_id']
attr_name = db.DBSession.query(Attr.name).filter(Attr.id==attr_id).one()
raise HydraError(f"Duplicate Resource Attr specified: {resource_name} {attr_name}")
def _bulk_add_resource_attrs(network_id, ref_key, resources, resource_name_map, template_lookup=None):
log.info("Bulk adding resource attributes")
if template_lookup is None:
template_lookup = {}
start_time = datetime.datetime.now()
#List of resource attributes
resource_attrs = {}
#Default ra / dataset pairings.
defaults = {}
attr_lookup = {}
all_attrs = db.DBSession.query(Attr).all()
for a in all_attrs:
attr_lookup[a.id] = a
#First get all the attributes assigned from the csv files.
t0 = datetime.datetime.now()
for resource in resources:
#cast name as string here in case the name is a number
resource_i = resource_name_map[str(resource.name)]
resource_attrs[resource.id] = []
if resource.attributes is not None:
for ra in resource.attributes:
if attr_lookup.get(ra.attr_id) is None:
raise HydraError(f"Unable to process attribute {ra.attr_id} on resource {resource.name} as it does not exist")
resource_attrs[resource.id].append({
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'attr_id' : ra.attr_id,
'attr_is_var' : ra.attr_is_var,
})
logging.info("Resource attributes from resources added in %s",
(datetime.datetime.now() - t0))
#Now get all the attributes supposed to be on the resources based on the types.
t0 = time.time()
##the current user is validated, but some checks require admin permissions,
##so call as a user with all permissions
admin_id = config.get('DEFAULT', 'ALL_PERMISSION_USER', 1)
# template_lookup = {} #a lookup of all the templates used by the resource
typeattr_lookup = {} # a lookup from type ID to a list of typeattrs
#A lookup from type ID to the child template that it should be using.
#We assume that a resource can't have 2 type IDS from the same network.
type_child_template_id_lookup = {}
#Holds all the attributes supposed to be on a resource based on its specified
#type
resource_resource_types = []
resource_id_name_map = {}
network_child_template_id = None
checked_for_child_template = False
for resource in resources:
#cast name as string here in case the name is a number
resource_i = resource_name_map[str(resource.name)]
resource_id_name_map[resource_i.id] = str(resource.name)
existing_attrs = [ra['attr_id'] for ra in resource_attrs[resource.id]]
if resource.types is not None:
for resource_type in resource.types:
#Go through all the resource types and add the appropriate resource
#type entries
resource_type_id = resource_type.id
if resource_type.child_template_id is None:
if type_child_template_id_lookup.get(resource_type_id) is None:
if network_child_template_id is None and checked_for_child_template == False:
network_child_template_id = template.get_network_template(network_id, resource_type.id)#TODO this should be type_id
checked_for_child_template = True
#ok, so no child ID found. We need to just use the template
#ID of the type which was given
if network_child_template_id is None:
tt = template.get_templatetype(resource_type.id, user_id=admin_id)
network_child_template_id = tt.template_id
type_child_template_id_lookup[resource_type_id] = network_child_template_id
resource_type.child_template_id = type_child_template_id_lookup[resource_type_id]
ref_id = resource_i.id
if resource_type.id is None:
raise HydraError(f"Resource type on resource {resource_i.name} has no ID")
resource_resource_types.append(
{
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'type_id' : resource_type.id,#TODO this should be type_id
'child_template_id' : resource_type.child_template_id
}
)
#Go through all types in the resource and add attributes from these types
template_j = template_lookup.get(resource_type.child_template_id)
if template_j is None:
#it's OK to use user ID 1 here because the calling function has been
#validated for the calling user's permission to get the network
tt = template.get_templatetype(resource_type.id, user_id=admin_id)
template_j = template.get_template(resource_type.child_template_id, user_id=admin_id)
template_lookup[template_j.id] = template_j
for tt in template_j.templatetypes:
typeattr_lookup[tt.id] = tt.typeattrs
typeattrs = typeattr_lookup.get(resource_type.id, []) #TODO this should be type_id
for ta in typeattrs:
if ta.attr_id not in existing_attrs:
resource_attrs[resource.id].append({
'ref_key' : ref_key,
'node_id' : resource_i.id if ref_key == 'NODE' else None,
'link_id' : resource_i.id if ref_key == 'LINK' else None,
'group_id' : resource_i.id if ref_key == 'GROUP' else None,
'network_id' : resource_i.id if ref_key == 'NETWORK' else None,
'attr_id' : ta.attr_id,
'attr_is_var' : ta.attr_is_var,
})
existing_attrs.append(ta.attr_id)
if ta.default_dataset_id is not None:
defaults[(ref_id, ta.attr_id)] = {'dataset_id':ta.default_dataset_id}
if len(resource_resource_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, resource_resource_types)
logging.info("%s ResourceTypes inserted in %s secs", \
len(resource_resource_types), str(time.time() - t0))
logging.info("Resource attributes from types added in %s",
(datetime.datetime.now() - start_time))
if len(resource_attrs) > 0:
all_resource_attrs = []
for na in resource_attrs.values():
all_resource_attrs.extend(na)
_check_ra_duplicates(all_resource_attrs, resource_id_name_map)
if len(all_resource_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, all_resource_attrs)
logging.info("ResourceAttr insert took %s secs", str(time.time() - t0))
else:
logging.warning("No attributes on any %s....", ref_key.lower())
logging.info("Resource attributes insertion from types done in %s",\
(datetime.datetime.now() - start_time))
#Now that the attributes are in, we need to map the attributes in the DB
#to the attributes in the incoming data so that the resource scenarios
#know what to refer to.
res_qry = db.DBSession.query(ResourceAttr)
if ref_key == 'NODE':
res_qry = res_qry.join(Node).filter(Node.network_id == network_id)
elif ref_key == 'GROUP':
res_qry = res_qry.join(ResourceGroup).filter(ResourceGroup.network_id == network_id)
elif ref_key == 'LINK':
res_qry = res_qry.join(Link).filter(Link.network_id == network_id)
elif ref_key == 'NETWORK':
res_qry = res_qry.filter(ResourceAttr.network_id == network_id)
real_resource_attrs = res_qry.all()
logging.info("retrieved %s entries in %s",
len(real_resource_attrs), (datetime.datetime.now() - start_time))
resource_attr_dict = {}
for resource_attr in real_resource_attrs:
if ref_key == 'NODE':
ref_id = resource_attr.node_id
elif ref_key == 'GROUP':
ref_id = resource_attr.group_id
elif ref_key == 'LINK':
ref_id = resource_attr.link_id
elif ref_key == 'NETWORK':
ref_id = resource_attr.network_id
resource_attr_dict[(ref_id, resource_attr.attr_id)] = resource_attr
if defaults.get((ref_id, resource_attr.attr_id)):
defaults[(ref_id, resource_attr.attr_id)]['id'] = resource_attr.id
logging.info("Processing Query results took %s",
(datetime.datetime.now() - start_time))
resource_attrs = {}
for resource in resources:
iface_resource = resource_name_map[str(resource.name)]
if ref_key == 'NODE':
ref_id = iface_resource.node_id
elif ref_key == 'GROUP':
ref_id = iface_resource.group_id
elif ref_key == 'LINK':
ref_id = iface_resource.link_id
elif ref_key == 'NETWORK':
ref_id = iface_resource.id
if resource.attributes is not None:
for ra in resource.attributes:
resource_attrs[ra.id] = resource_attr_dict[(ref_id, ra.attr_id)]
logging.info("Resource attributes added in %s",\
(datetime.datetime.now() - start_time))
logging.debug(" resource_attrs size: %s",\
len(resource_attrs))
return resource_attrs, defaults, template_lookup
def _add_nodes_to_database(net_i, nodes):
#First add all the nodes
log.info("Adding nodes to network %s", net_i.id)
node_list = []
for node in nodes:
node_dict = {'network_id' : net_i.id,
'name' : node.name,
'description': node.description,
'layout' : node.get_layout(),
'x' : node.x,
'y' : node.y,
}
node_list.append(node_dict)
t0 = time.time()
if len(node_list):
db.DBSession.bulk_insert_mappings(Node, node_list)
db.DBSession.flush()
logging.info("Node insert took %s secs"% str(time.time() - t0))
def _add_nodes(net_i, nodes, template_lookup):
#check_perm(user_id, 'edit_topology')
start_time = datetime.datetime.now()
#List of resource attributes
node_attrs = {}
#Maps temporary node_ids to real node_ids
node_id_map = dict()
if nodes is None or len(nodes) == 0:
return node_id_map, node_attrs, {}
_add_nodes_to_database(net_i, nodes)
iface_nodes = dict()
for n_i in net_i.nodes:
if iface_nodes.get(n_i.name) is not None:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
iface_nodes[n_i.name] = n_i
for node in nodes:
#cast node.name as str here as a node name can sometimes be a number
node_id_map[node.id] = iface_nodes[str(node.name)]
node_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'NODE', nodes, iface_nodes, template_lookup)
log.info("Nodes added in %s", get_timing(start_time))
return node_id_map, node_attrs, defaults
def _add_links_to_database(net_i, links, node_id_map):
log.info("Adding links to network")
link_dicts = []
for link in links:
node_1 = node_id_map.get(link.node_1_id)
node_2 = node_id_map.get(link.node_2_id)
if node_1 is None or node_2 is None:
raise HydraError("Node IDS (%s, %s)are incorrect!"%(node_1, node_2))
link_dicts.append({'network_id' : net_i.id,
'name' : link.name,
'description' : link.description,
'layout' : link.get_layout(),
'node_1_id' : node_1.id,
'node_2_id' : node_2.id
})
if len(link_dicts) > 0:
db.DBSession.bulk_insert_mappings(Link, link_dicts)
def _add_links(net_i, links, node_id_map, template_lookup):
#check_perm(user_id, 'edit_topology')
start_time = datetime.datetime.now()
#List of resource attributes
link_attrs = {}
#Map negative IDS to their new, positive, counterparts.
link_id_map = dict()
if links is None or len(links) == 0:
return link_id_map, link_attrs, {}
#check for duplicate names:
link_names = []
duplicate_link_names = []
for link in links:
if link.name in link_names:
duplicate_link_names.append(link.name)
else:
link_names.append(link.name)
if len(duplicate_link_names) > 0:
raise HydraError(f"Duplicate link names: {duplicate_link_names}")
#Then add all the links.
#################################################################
_add_links_to_database(net_i, links, node_id_map)
###################################################################
log.info("Links added in %s", get_timing(start_time))
iface_links = {}
for l_i in net_i.links:
iface_links[str(l_i.name)] = l_i
log.info("Link Map created %s", get_timing(start_time))
for link in links:
link_id_map[link.id] = iface_links[str(link.name)]
log.info("Link ID Map created %s", get_timing(start_time))
link_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'LINK', links, iface_links, template_lookup)
log.info("Links added in %s", get_timing(start_time))
return link_id_map, link_attrs, defaults
def _add_resource_groups(net_i, resourcegroups, template_lookup):
start_time = datetime.datetime.now()
#List of resource attributes
group_attrs = {}
#Map negative IDS to their new, positive, counterparts.
group_id_map = dict()
if resourcegroups is None or len(resourcegroups)==0:
return group_id_map, group_attrs, {}
#Then add all the groups.
log.info("Adding groups to network")
group_dicts = []
if resourcegroups:
for group in resourcegroups:
group_dicts.append({'network_id' : net_i.id,
'name' : group.name,
'description' : group.description,
})
iface_groups = {}
if len(group_dicts) > 0:
db.DBSession.bulk_insert_mappings(ResourceGroup, group_dicts)
log.info("Resource Groups added in %s", get_timing(start_time))
for g_i in net_i.resourcegroups:
if iface_groups.get(g_i.name) is not None:
raise HydraError("Duplicate Resource Group: %s"%(g_i.name))
iface_groups[g_i.name] = g_i
for group in resourcegroups:
if group.id not in group_id_map:
group_i = iface_groups[group.name]
group_attrs[group.id] = []
for ra in group.attributes:
group_attrs[group.id].append({
'ref_key' : 'GROUP',
'group_id' : group_i.id,
'attr_id' : ra.attr_id,
'attr_is_var' : ra.attr_is_var,
})
group_id_map[group.id] = group_i
group_attrs, defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'GROUP', resourcegroups, iface_groups, template_lookup)
log.info("Groups added in %s", get_timing(start_time))
return group_id_map, group_attrs, defaults
@required_perms("add_network")
def add_network(network, **kwargs):
"""
Takes an entire network complex model and saves it to the DB. This
complex model includes links & scenarios (with resource data). Returns
the network's complex model.
As links connect two nodes using the node_ids, if the nodes are new
they will not yet have node_ids. In this case, use negative ids as
temporary IDS until the node has been given an permanent ID.
All inter-object referencing of new objects should be done using
negative IDs in the client.
The returned object will have positive IDS
"""
db.DBSession.autoflush = False
start_time = datetime.datetime.now()
log.debug("Adding network")
insert_start = datetime.datetime.now()
proj_i = db.DBSession.query(Project)\
.filter(Project.id == network.project_id).first()
if proj_i is None:
raise HydraError("Project ID is none. A project ID must be specified on the Network")
existing_net = db.DBSession.query(Network)\
.filter(Network.project_id == network.project_id,
Network.name == network.name).first()
if existing_net is not None:
raise HydraError(f"A network with the name {network.name} is already"
" in project {network.project_id}")
user_id = kwargs.get('user_id')
proj_i.check_write_permission(user_id)
net_i = Network()
net_i.project_id = network.project_id
net_i.name = network.name
net_i.description = network.description
net_i.created_by = user_id
net_i.projection = network.projection
net_i.layout = network.get_json('layout')
net_i.appdata = network.get_json('appdata')
network.id = net_i.id
db.DBSession.add(net_i)
db.DBSession.flush()
#These two lists are used for comparison and lookup, so when
#new attributes are added, these lists are extended.
#List of all the resource attributes
all_resource_attrs = {}
name_map = {network.name:net_i}
network_attrs, network_defaults, template_lookup = _bulk_add_resource_attrs(net_i.id, 'NETWORK', [network], name_map)
hdb.add_resource_types(net_i, network.types)
all_resource_attrs.update(network_attrs)
log.info("Network attributes added in %s", get_timing(start_time))
node_id_map, node_attrs, node_datasets = _add_nodes(net_i, network.nodes, template_lookup)
all_resource_attrs.update(node_attrs)
link_id_map, link_attrs, link_datasets = _add_links(net_i, network.links, node_id_map, template_lookup)
all_resource_attrs.update(link_attrs)
grp_id_map, grp_attrs, grp_datasets = _add_resource_groups(net_i, network.resourcegroups, template_lookup)
all_resource_attrs.update(grp_attrs)
defaults = list(grp_datasets.values()) + list(link_datasets.values()) \
+ list(node_datasets.values()) + list(network_defaults.values())
start_time = datetime.datetime.now()
scenario_names = []
if network.scenarios is not None:
log.info("Adding scenarios to network")
for s in network.scenarios:
log.info("Adding scenario %s", s.name)
if s.name in scenario_names:
raise HydraError("Duplicate scenario name: %s"%(s.name))
scen = Scenario()
scen.name = s.name
scen.description = s.description
scen.layout = s.get_layout()
scen.start_time = s.start_time
scen.end_time = s.end_time
scen.time_step = s.time_step
scen.created_by = user_id
scenario_names.append(s.name)
#extract the data from each resourcescenario
incoming_datasets = []
scenario_resource_attrs = []
for r_scen in s.resourcescenarios:
if all_resource_attrs.get(r_scen.resource_attr_id) is None:
raise HydraError(f"Couldn't find resource attribute {r_scen.resource_attr_id} "
f"as defined on resource scenario {r_scen}. "
f"Shot in the dark: "
f"Does the exporting network have duplicate attributes?")
ra = all_resource_attrs[r_scen.resource_attr_id]
incoming_datasets.append(r_scen.dataset)
scenario_resource_attrs.append(ra)
data_start_time = datetime.datetime.now()
for default in defaults:
scen.add_resource_scenario(JSONObject(default),
JSONObject({'id':default['dataset_id']}),
source=kwargs.get('app_name'))
datasets = data._bulk_insert_data(
incoming_datasets,
user_id,
kwargs.get('app_name')
)
log.info("Data bulk insert took %s", get_timing(data_start_time))
ra_start_time = datetime.datetime.now()
for i, ra in enumerate(scenario_resource_attrs):
scen.add_resource_scenario(ra, datasets[i], source=kwargs.get('app_name'))
log.info("Resource scenarios added in %s", get_timing(ra_start_time))
item_start_time = datetime.datetime.now()
if s.resourcegroupitems is not None:
for group_item in s.resourcegroupitems:
group_item_i = ResourceGroupItem()
group_item_i.group = grp_id_map[group_item.group_id]
group_item_i.ref_key = group_item.ref_key
if group_item.ref_key == 'NODE':
group_item_i.node = node_id_map[group_item.ref_id]
elif group_item.ref_key == 'LINK':
group_item_i.link = link_id_map[group_item.ref_id]
elif group_item.ref_key == 'GROUP':
group_item_i.subgroup = grp_id_map[group_item.ref_id]
else:
raise HydraError("A ref key of %s is not valid for a "
"resource group item."%group_item.ref_key)
scen.resourcegroupitems.append(group_item_i)
log.info("Group items insert took %s", get_timing(item_start_time))
net_i.scenarios.append(scen)
log.info("Scenario %s added", s.name)
log.info("Scenarios added in %s", get_timing(start_time))
net_i.set_owner(user_id)
db.DBSession.flush()
log.info("Insertion of network took: %s",(datetime.datetime.now()-insert_start))
return net_i
def _get_all_resource_attributes(network_id, template_id=None, include_non_template_attributes=False):
"""
Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
args:
network_id (int) The ID of the network from which to retrieve the attributes
template_id (int): Optional ID of a template, which when specified only returns
attributes relating to that template
include_non_template_attributes (bool): If template_id is specified and any
resource has attribtues which are NOT associated to any
network template, this flag indicates whether to return them or not.
returns:
A list of sqlalchemy result proxy objects
"""
base_qry = db.DBSession.query(
ResourceAttr.id.label('id'),
ResourceAttr.ref_key.label('ref_key'),
ResourceAttr.cr_date.label('cr_date'),
ResourceAttr.attr_is_var.label('attr_is_var'),
ResourceAttr.node_id.label('node_id'),
ResourceAttr.link_id.label('link_id'),
ResourceAttr.group_id.label('group_id'),
ResourceAttr.network_id.label('network_id'),
ResourceAttr.attr_id.label('attr_id'),
Attr.name.label('name'),
Attr.dimension_id.label('dimension_id'),
).filter(Attr.id==ResourceAttr.attr_id)
all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id == network_id)
all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id == network_id)
all_group_attribute_qry = base_qry.join(ResourceGroup)\
.filter(ResourceGroup.network_id == network_id)
network_attribute_qry = base_qry.filter(ResourceAttr.network_id == network_id)
x = time.time()
logging.info("Getting all attributes using execute")
attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry,
all_group_attribute_qry,
network_attribute_qry)
all_resource_attributes = attribute_qry.all()
log.info("%s attrs retrieved in %s", len(all_resource_attributes), time.time()-x)
logging.info("Attributes retrieved. Processing results...")
x = time.time()
rt_attribute_dict = {
'NODE' : {},
'LINK' : {},
'GROUP': {},
'NETWORK': {},
}
template_attr_lookup, all_network_typeattrs = _get_network_template_attribute_lookup(network_id)
for resource_attr in all_resource_attributes:
if template_id is not None:
#check if it's in the template. If not, it's either associated to another
#template or to no template
if resource_attr.attr_id not in template_attr_lookup.get(template_id, []):
#check if it's in any other template
if include_non_template_attributes is True:
#if it's associated to a template (but not this one because
#it wouldn't have reached this far) then ignore it
if resource_attr.attr_id in all_network_typeattrs:
continue
else:
#The attr is associated to another template.
continue
attr_dict = rt_attribute_dict[resource_attr.ref_key]
resourceid = _get_resource_id(resource_attr)
resourceattrlist = attr_dict.get(resourceid, [])
resourceattrlist.append(resource_attr)
attr_dict[resourceid] = resourceattrlist
logging.info("Attributes processed in %s", time.time()-x)
return rt_attribute_dict
def _get_resource_id(attr):
"""
return either the node, link, group or network ID of an attribute.
Whichever one is not None
"""
for resourcekey in ('node_id', 'link_id', 'network_id', 'group_id'):
if isinstance(attr, dict):
##this if statement is needed to continue the loop, rather than just
#returning attr.get(resourcekey)
if attr.get(resourcekey) is not None:
return attr[resourcekey]
else:
if getattr(attr, resourcekey) is not None:
return getattr(attr, resourcekey)
return None
def _get_network_template_attribute_lookup(network_id):
"""
Given a network ID, identify all the templates associated to the network
and build a dictionary of template_id: [attr_id, attr_id...]
"""
#First identify all templates associated to the network (assuming the network
#types are 100% representative if all templates linked to this network)
network_types = db.DBSession.query(TemplateType)\
.join(ResourceType, ResourceType.type_id == TemplateType.id)\
.filter(ResourceType.network_id == network_id).all()
template_ids = [t.template_id for t in network_types]
#Now with access to all templates, get all type attributes for all the templates.
network_typeattrs = db.DBSession.query(TemplateType.template_id.label('template_id'),\
TemplateType.id.label('type_id'),\
TypeAttr.attr_id.label('attr_id'))\
.join(TypeAttr, TypeAttr.type_id == TemplateType.id)\
.filter(TemplateType.template_id.in_(template_ids)).all()
typeattr_lookup = {}
all_network_typeattrs = []
for typeattr in network_typeattrs:
if typeattr.template_id not in typeattr_lookup:
typeattr_lookup[typeattr.template_id] = [typeattr.attr_id]
else:
typeattr_lookup[typeattr.template_id].append(typeattr.attr_id)
all_network_typeattrs.append(typeattr.attr_id)
return typeattr_lookup, all_network_typeattrs
def _get_all_templates(network_id, template_id):
"""
Get all the templates for the nodes, links and groups of a network.
Return these templates as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
"""
base_qry = db.DBSession.query(
ResourceType.ref_key.label('ref_key'),
ResourceType.node_id.label('node_id'),
ResourceType.link_id.label('link_id'),
ResourceType.group_id.label('group_id'),
ResourceType.network_id.label('network_id'),
ResourceType.child_template_id.label('child_template_id'),
Template.name.label('template_name'),
Template.id.label('template_id'),
TemplateType.id.label('type_id'),
TemplateType.parent_id.label('parent_id'),
TemplateType.layout.label('layout'),
TemplateType.name.label('type_name'),
).filter(TemplateType.id==ResourceType.type_id,
Template.id==TemplateType.template_id)
all_node_type_qry = base_qry.filter(Node.id==ResourceType.node_id,
Node.network_id==network_id)
all_link_type_qry = base_qry.filter(Link.id==ResourceType.link_id,
Link.network_id==network_id)
all_group_type_qry = base_qry.filter(ResourceGroup.id==ResourceType.group_id,
ResourceGroup.network_id==network_id)
network_type_qry = base_qry.filter(ResourceType.network_id==network_id)
#Filter the group attributes by template
if template_id is not None:
all_node_type_qry = all_node_type_qry.filter(Template.id==template_id)
all_link_type_qry = all_link_type_qry.filter(Template.id==template_id)
all_group_type_qry = all_group_type_qry.filter(Template.id==template_id)
x = time.time()
log.info("Getting all types")
type_qry = all_node_type_qry.union(all_link_type_qry, all_group_type_qry, network_type_qry)
all_types = type_qry.all()
log.info("%s types retrieved in %s", len(all_types), time.time()-x)
log.info("Attributes retrieved. Processing results...")
x = time.time()
node_type_dict = dict()
link_type_dict = dict()
group_type_dict = dict()
network_type_dict = dict()
#a lookup to avoid having to query for the same child type every time
child_type_lookup = {}
##the current user is validated, but some checks require admin permissions,
##so call as a user with all permissions
admin_id = config.get('DEFAULT', 'ALL_PERMISSION_USER', 1)
for t in all_types:
child_layout = None
child_name = None
#Load all the inherited columns like layout and name and set them
if t.parent_id is not None:
if t.type_id in child_type_lookup:
child_type = child_type_lookup[t.type_id]
else:
#no need to check for user credentials here as it's called from a
#function which has done that for us
child_type = template.get_templatetype(t.type_id, user_id=admin_id)
child_type_lookup[t.type_id] = child_type
#Now set the potentially missing columns
child_layout = child_type.layout
child_name = child_type.name
templatetype = JSONObject({'template_id' : t.template_id,
'id' : t.type_id,
'template_name' :t.template_name,
'layout' : child_layout if child_layout else t.layout,
'name' : child_name if child_name else t.type_name,
'child_template_id' : t.child_template_id})
if t.ref_key == 'NODE':
nodetype = node_type_dict.get(t.node_id, [])
nodetype.append(templatetype)
node_type_dict[t.node_id] = nodetype
elif t.ref_key == 'LINK':
linktype = link_type_dict.get(t.link_id, [])
linktype.append(templatetype)
link_type_dict[t.link_id] = linktype
elif t.ref_key == 'GROUP':
grouptype = group_type_dict.get(t.group_id, [])
grouptype.append(templatetype)
group_type_dict[t.group_id] = grouptype
elif t.ref_key == 'NETWORK':
nettype = network_type_dict.get(t.network_id, [])
nettype.append(templatetype)
network_type_dict[t.network_id] = nettype
all_types = {
'NODE' : node_type_dict,
'LINK' : link_type_dict,
'GROUP': group_type_dict,
'NETWORK': network_type_dict,
}
logging.info("Attributes processed in %s", time.time()-x)
return all_types
def _get_all_group_items(network_id):
"""
Get all the resource group items in the network, across all scenarios
returns a dictionary of dict objects, keyed on scenario_id
"""
base_qry = db.DBSession.query(ResourceGroupItem)
item_qry = base_qry.join(Scenario).filter(Scenario.network_id==network_id)
x = time.time()
logging.info("Getting all items")
all_items = item_qry.all()
log.info("%s groups jointly retrieved in %s", len(all_items), time.time()-x)
logging.info("items retrieved. Processing results...")
x = time.time()
item_dict = dict()
for item in all_items:
items = item_dict.get(item.scenario_id, [])
items.append(JSONObject(item))
item_dict[item.scenario_id] = items
logging.info("items processed in %s", time.time()-x)
return item_dict
def _get_nodes(network_id, template_id=None):
"""
Get all the nodes in a network
"""
extras = {'types':[], 'attributes':[]}
node_qry = db.DBSession.query(Node).filter(
Node.network_id == network_id,
Node.status == 'A').options(
noload(Node.network)
)
if template_id is not None:
node_qry = node_qry.filter(ResourceType.node_id == Node.id,
TemplateType.id == ResourceType.type_id,
TemplateType.template_id == template_id)
node_res = node_qry.all()
nodes = []
for n in node_res:
nodes.append(JSONObject(n, extras=extras))
return nodes
def _get_links(network_id, template_id=None):
"""
Get all the links in a network
"""
extras = {'types':[], 'attributes':[]}
link_qry = db.DBSession.query(Link).filter(
Link.network_id==network_id,
Link.status=='A').options(
noload(Link.network)
)
if template_id is not None:
link_qry = link_qry.filter(ResourceType.link_id==Link.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
link_res = link_qry.all()
links = []
for l in link_res:
links.append(JSONObject(l, extras=extras))
return links
def _get_groups(network_id, template_id=None):
"""
Get all the resource groups in a network
"""
extras = {'types':[], 'attributes':[]}
group_qry = db.DBSession.query(ResourceGroup).filter(
ResourceGroup.network_id==network_id,
ResourceGroup.status=='A').options(
noload(ResourceGroup.network)
)
if template_id is not None:
group_qry = group_qry.filter(ResourceType.group_id == ResourceGroup.id,
TemplateType.id == ResourceType.type_id,
TemplateType.template_id == template_id)
group_res = group_qry.all()
groups = []
for g in group_res:
groups.append(JSONObject(g, extras=extras))
return groups
def _get_scenarios(network_id, include_data, include_results, user_id,
scenario_ids=None, include_metadata=False):
"""
Get all the scenarios in a network
"""
scen_qry = db.DBSession.query(Scenario).filter(
Scenario.network_id == network_id).options(
noload(Scenario.network)).filter(
Scenario.status == 'A')
if scenario_ids:
logging.info("Filtering by scenario_ids %s",scenario_ids)
scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids))
extras = {'resourcescenarios': [], 'resourcegroupitems': []}
scens_i = scen_qry.all()
scens = [JSONObject(s,extras=extras) for s in scens_i]
all_resource_group_items = _get_all_group_items(network_id)
#default to empty metadata
metadata = {}
for i, s in enumerate(scens):
s_i = scens_i[i]
s.resourcegroupitems = all_resource_group_items.get(s.id, [])
if include_data == True:
s.resourcescenarios = s_i.get_all_resourcescenarios(
user_id=user_id,
include_results=include_results,
include_metadata=include_metadata)
return scens
def get_network(network_id,
include_attributes=True,
include_data=False,
include_results=True,
scenario_ids=None,
template_id=None,
include_non_template_attributes=False,
include_metadata=False,
**kwargs):
"""
Return a whole network as a dictionary.
network_id: ID of the network to retrieve
include_attributes (bool): include attributes to save on data
include_data: (bool). Indicate whether scenario data is to be returned.
This has a significant speed impact as retrieving large amounts
of data can be expensive.
include_results: (bool). If data is requested, this flag allows results
data to be ignored (attr is var), as this can often be very large.
scenario_ids: list of IDS to be returned. Used if a network has multiple
scenarios but you only want one returned. Using this filter
will speed up this function call.
template_id: Return the network with only attributes associated with this
template on the network, groups, nodes and links.
include_non_template_attribute: Return attributes which are not associated to any template.
include_metadata (bool): If data is included, then this flag indicates whether to include metadata.
Setting this to True may have performance implications
"""
log.debug("getting network %s"%network_id)
user_id = kwargs.get('user_id')
network_id = int(network_id)
try:
log.debug("Querying Network %s", network_id)
net_i = db.DBSession.query(Network).filter(
Network.id == network_id).options(
noload(Network.scenarios)).options(
noload(Network.nodes)).options(
noload(Network.links)).options(
noload(Network.types)).options(
noload(Network.attributes)).options(
noload(Network.resourcegroups)).one()
net_i.check_read_permission(user_id)
net = JSONObject(net_i)
net.nodes = _get_nodes(network_id, template_id=template_id)
net.links = _get_links(network_id, template_id=template_id)
net.resourcegroups = _get_groups(network_id, template_id=template_id)
net.owners = net_i.get_owners()
if include_attributes in ('Y', True):
all_attributes = _get_all_resource_attributes(network_id,
template_id,
include_non_template_attributes)
log.info("Setting attributes")
net.attributes = all_attributes['NETWORK'].get(network_id, [])
for node_i in net.nodes:
node_i.attributes = all_attributes['NODE'].get(node_i.id, [])
log.info("Node attributes set")
for link_i in net.links:
link_i.attributes = all_attributes['LINK'].get(link_i.id, [])
log.info("Link attributes set")
for group_i in net.resourcegroups:
group_i.attributes = all_attributes['GROUP'].get(group_i.id, [])
log.info("Group attributes set")
log.info("Setting types")
all_types = _get_all_templates(network_id, template_id)
net.types = all_types['NETWORK'].get(network_id, [])
for node_i in net.nodes:
node_i.types = all_types['NODE'].get(node_i.id, [])
for link_i in net.links:
link_i.types = all_types['LINK'].get(link_i.id, [])
for group_i in net.resourcegroups:
group_i.types = all_types['GROUP'].get(group_i.id, [])
log.info("Getting scenarios")
net.scenarios = _get_scenarios(network_id,
include_data,
include_results,
user_id,
scenario_ids,
include_metadata=include_metadata)
except NoResultFound:
raise ResourceNotFoundError("Network (network_id=%s) not found." % network_id)
return net
def get_networks(network_ids, **kwargs):
"""
Get the list of networks specified in a list of network IDS
args:
network_ids (list(int)) : a list of network IDs
returns:
list(Network)
"""
user_id = kwargs.get('user_id')
networks = db.DBSession.query(Network).filter(
Network.id.in_(network_ids))
for n in networks:
n.check_read_permission(user_id)
return networks
def get_nodes(network_id, template_id=None, **kwargs):
"""
Get all the nodes in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return nodes whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
node_qry = db.DBSession.query(Node).filter(
Node.network_id == network_id,
Node.status == 'A').options(
noload(Node.network)
).options(
joinedload(Node.types).joinedload(ResourceType.templatetype)
).options(
joinedload(Node.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
node_qry = node_qry.filter(ResourceType.node_id==Node.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
nodes = node_qry.all()
return nodes
def get_links(network_id, template_id=None, **kwargs):
"""
Get all the links in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return links whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
link_qry = db.DBSession.query(Link).filter(
Link.network_id==network_id,
Link.status=='A').options(
noload(Link.network)
).options(
joinedload(Link.types).joinedload(ResourceType.templatetype)
).options(
joinedload(Link.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
link_qry = link_qry.filter(ResourceType.link_id==Link.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
links = link_qry.all()
return links
def get_groups(network_id, template_id=None, **kwargs):
"""
Get all the resource groups in a network.
args:
network_id (int): The network in which to search
template_id (int): Only return resource groups whose type is in this template.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
group_qry = db.DBSession.query(ResourceGroup).filter(
ResourceGroup.network_id==network_id,
ResourceGroup.status=='A').options(
noload(ResourceGroup.network)
).options(
joinedload(ResourceGroup.types).joinedload(ResourceType.templatetype)
).options(
joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)
)
if template_id is not None:
group_qry = group_qry.filter(ResourceType.group_id==ResourceGroup.id,
TemplateType.id==ResourceType.type_id,
TemplateType.template_id==template_id)
groups = group_qry.all()
return groups
def get_network_simple(network_id,**kwargs):
try:
n = db.DBSession.query(Network).filter(Network.id==network_id).options(joinedload(Network.attributes).joinedload(ResourceAttr.attr)).one()
n.types
for t in n.types:
t.templatetype.typeattrs
return n
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id,))
def get_node(node_id, scenario_id=None, **kwargs):
try:
n = db.DBSession.query(Node).filter(Node.id==node_id).options(joinedload(Node.attributes).joinedload(ResourceAttr.attr)).one()
n.types
for t in n.types:
t.templatetype.typeattrs
t.templatetype.template
#set this for easy access later by client
#t.templatetype.template_name = t.templatetype.template.name
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id,))
n = JSONObject(n)
if scenario_id is not None:
res_scens = scenario.get_resource_data('NODE', node_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in n.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return n
def get_link(link_id, scenario_id=None, **kwargs):
try:
l = db.DBSession.query(Link).filter(Link.id==link_id).options(joinedload(Link.attributes).joinedload(ResourceAttr.attr)).one()
l.types
for t in l.types:
#lazy load the type's template
t.templatetype.template
#set the template name on the type
t.templatetype.template_name = t.templatetype.template.name
t.templatetype.typeattrs
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id,))
l = JSONObject(l)
if scenario_id is not None:
res_scens = scenario.get_resource_data('LINK', link_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in l.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return l
def get_resourcegroup(group_id, scenario_id=None, **kwargs):
try:
rg = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_id).options(joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)).one()
rg.types
for t in rg.types:
#lazy load the type's template
t.templatetype.template
#set the template name on the type
t.templatetype.template_name = t.templatetype.template.name
t.templatetype.typeattrs
for ta in t.templatetype.typeattrs:
if ta.default_dataset_id is not None:
ta.default_dataset
ta.default_dataset.metadata
ta.default_dataset.unit
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id,))
rg = JSONObject(rg)
if scenario_id is not None:
res_scens = scenario.get_resource_data('GROUP', group_id, scenario_id, None, **kwargs)
rs_dict = {}
for rs in res_scens:
rs_dict[rs.resource_attr_id] = JSONObject(rs)
for ra in rg.attributes:
if rs_dict.get(ra.id):
ra.resourcescenario = rs_dict[ra.id]
return rg
def get_node_by_name(network_id, node_name,**kwargs):
try:
n = db.DBSession.query(Node).filter(Node.name==node_name,
Node.network_id==network_id).\
options(joinedload(Node.attributes).joinedload(ResourceAttr.Attr)).one()
return n
except NoResultFound:
raise ResourceNotFoundError("Node %s not found in network %s"%(node_name, network_id,))
def get_link_by_name(network_id, link_name,**kwargs):
try:
l = db.DBSession.query(Link).filter(Link.name==link_name,
Link.network_id==network_id).\
options(joinedload(Link.attributes).joinedload(ResourceAttr.attr)).one()
return l
except NoResultFound:
raise ResourceNotFoundError("Link %s not found in network %s"%(link_name, network_id))
def get_resourcegroup_by_name(network_id, group_name,**kwargs):
try:
rg = db.DBSession.query(ResourceGroup).filter(ResourceGroup.name==group_name,
ResourceGroup.network_id==network_id).\
options(joinedload(ResourceGroup.attributes).joinedload(ResourceAttr.attr)).one()
return rg
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found in network %s"%(group_name,network_id))
def get_network_by_name(project_id, network_name,**kwargs):
"""
Return a whole network as a complex model.
"""
try:
res = db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one()
net = get_network(res.id, 'Y', None, **kwargs)
return net
except NoResultFound:
raise ResourceNotFoundError("Network with name %s not found"%(network_name))
def network_exists(project_id, network_name,**kwargs):
"""
Return a whole network as a complex model.
"""
try:
db.DBSession.query(Network.id).filter(func.lower(Network.name).like(network_name.lower()), Network.project_id == project_id).one()
return 'Y'
except NoResultFound:
return 'N'
@required_perms("edit_network")
def update_network(network,
update_nodes = True,
update_links = True,
update_groups = True,
update_scenarios = True,
**kwargs):
"""
Update an entire network
"""
log.info("Updating Network %s", network.name)
user_id = kwargs.get('user_id')
#check_perm('update_network')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network.id).one()
except NoResultFound:
raise ResourceNotFoundError("Network with id %s not found"%(network.id))
net_i.project_id = network.project_id
net_i.name = network.name
net_i.description = network.description
net_i.projection = network.projection
net_i.layout = network.get_json('layout')
net_i.appdata = network.get_json('appdata')
all_resource_attrs = {}
new_network_attributes = _update_attributes(net_i, network.attributes)
all_resource_attrs.update(new_network_attributes)
hdb.add_resource_types(net_i, network.types)
#Maps temporary node_ids to real node_ids
node_id_map = dict()
if network.nodes is not None and update_nodes is True:
log.info("Updating nodes")
t0 = time.time()
#First add all the nodes
node_id_map = dict([(n.id, n) for n in net_i.nodes])
for node in network.nodes:
#If we get a negative or null node id, we know
#it is a new node.
if node.id is not None and node.id > 0:
n = node_id_map[node.id]
n.name = node.name
n.description = node.description
n.x = node.x
n.y = node.y
n.status = node.status
n.layout = node.get_layout()
else:
log.info("Adding new node %s", node.name)
n = net_i.add_node(node.name,
node.description,
node.get_layout(),
node.x,
node.y)
net_i.nodes.append(n)
node_id_map[n.id] = n
all_resource_attrs.update(_update_attributes(n, node.attributes))
hdb.add_resource_types(n, node.types)
log.info("Updating nodes took %s", time.time() - t0)
link_id_map = dict()
if network.links is not None and update_links is True:
log.info("Updating links")
t0 = time.time()
link_id_map = dict([(l.link_id, l) for l in net_i.links])
for link in network.links:
node_1 = node_id_map[link.node_1_id]
node_2 = node_id_map[link.node_2_id]
if link.id is None or link.id < 0:
log.info("Adding new link %s", link.name)
l = net_i.add_link(link.name,
link.description,
link.get_layout(),
node_1,
node_2)
net_i.links.append(l)
link_id_map[link.id] = l
else:
l = link_id_map[link.id]
l.name = link.name
l.link_descripion = link.description
l.node_a = node_1
l.node_b = node_2
l.layout = link.get_layout()
all_resource_attrs.update(_update_attributes(l, link.attributes))
hdb.add_resource_types(l, link.types)
log.info("Updating links took %s", time.time() - t0)
group_id_map = dict()
#Next all the groups
if network.resourcegroups is not None and update_groups is True:
log.info("Updating groups")
t0 = time.time()
group_id_map = dict([(g.group_id, g) for g in net_i.resourcegroups])
for group in network.resourcegroups:
#If we get a negative or null group id, we know
#it is a new group.
if group.id is not None and group.id > 0:
g_i = group_id_map[group.id]
g_i.name = group.name
g_i.description = group.description
g_i.status = group.status
else:
log.info("Adding new group %s", group.name)
g_i = net_i.add_group(group.name,
group.description,
group.status)
net_i.resourcegroups.append(net_i)
group_id_map[g_i.group_id] = g_i
all_resource_attrs.update(_update_attributes(g_i, group.attributes))
hdb.add_resource_types(g_i, group.types)
group_id_map[group.id] = g_i
log.info("Updating groups took %s", time.time() - t0)
errors = []
if network.scenarios is not None and update_scenarios is True:
for s in network.scenarios:
add_scenario = False
if s.id is not None:
if s.id > 0:
try:
scen_i = db.DBSession.query(Scenario).filter(Scenario.id==s.id).one()
if scen_i.locked == 'Y':
errors.append('Scenario %s was not updated as it is locked'%(s.id))
continue
scenario.update_scenario(s, flush=False, **kwargs)
except NoResultFound:
raise ResourceNotFoundError("Scenario %s not found"%(s.id))
else:
add_scenario = True
else:
add_scenario = True
if add_scenario is True:
log.info("Adding new scenario %s to network", s.name)
scenario.add_scenario(network.id, s, **kwargs)
db.DBSession.flush()
updated_net = get_network(network.id, summary=True, **kwargs)
return updated_net
@required_perms("edit_network")
def move_network(network_id, target_project_id, **kwargs):
"""
Move a network to the project with `target_project_id`
"""
log.info(f"Moving {network_id} to {target_project_id}")
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
except NoResultFound:
raise ResourceNotFoundError("Network with id %s not found"%(network_id))
net_i.check_write_permission(user_id)
net_i.project_id = target_project_id
db.DBSession.flush()
return JSONObject(net_i)
def update_resource_layout(resource_type, resource_id, key, value, **kwargs):
log.info("Updating %s %s's layout with {%s:%s}", resource_type, resource_id, key, value)
resource = get_resource(resource_type, resource_id, **kwargs)
if resource.layout is None:
layout = dict()
else:
layout = json.loads(resource.layout)
layout[key] = value
resource.layout = json.dumps(layout)
db.DBSession.flush()
return layout
def get_resource(resource_type, resource_id, **kwargs):
user_id = kwargs.get('user_id')
resource_type = resource_type.upper()
if resource_type == 'NODE':
return get_node(resource_id, **kwargs)
elif resource_type == 'LINK':
return get_link(resource_id, **kwargs)
elif resource_type == 'GROUP':
return get_resourcegroup(resource_id, **kwargs)
elif resource_type == 'NETWORK':
network = get_network_simple(resource_id, **kwargs)
return network
def set_network_status(network_id,status,**kwargs):
"""
Activates a network by setting its status attribute to 'A'.
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'delete_network')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
net_i.status = status
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
db.DBSession.flush()
return 'OK'
def get_network_extents(network_id,**kwargs):
"""
Given a network, return its maximum extents.
This would be the minimum x value of all nodes,
the minimum y value of all nodes,
the maximum x value of all nodes and
maximum y value of all nodes.
@returns NetworkExtents object
"""
rs = db.DBSession.query(Node.x, Node.y).filter(Node.network_id==network_id).all()
if len(rs) == 0:
return dict(
network_id = network_id,
min_x=None,
max_x=None,
min_y=None,
max_y=None,
)
# Compute min/max extent of the network.
x = [r.x for r in rs if r.x is not None]
if len(x) > 0:
x_min = min(x)
x_max = max(x)
else:
# Default x extent if all None values
x_min, x_max = 0, 1
y = [r.y for r in rs if r.y is not None]
if len(y) > 0:
y_min = min(y)
y_max = max(y)
else:
# Default y extent if all None values
y_min, y_max = 0, 1
ne = JSONObject(dict(
network_id = network_id,
min_x=x_min,
max_x=x_max,
min_y=y_min,
max_y=y_max,
))
return ne
#########################################
def add_nodes(network_id, nodes,**kwargs):
"""
Add nodes to network
"""
start_time = datetime.datetime.now()
names=[] # used to check uniqueness of node name
for n_i in nodes:
if n_i.name in names:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
names.append(n_i.name)
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
_add_nodes_to_database(net_i, nodes)
net_i.project_id = net_i.project_id
db.DBSession.flush()
node_s = db.DBSession.query(Node).filter(Node.network_id == network_id).all()
#Maps temporary node_ids to real node_ids
node_id_map = dict()
iface_nodes = dict()
for n_i in node_s:
iface_nodes[n_i.name] = n_i
for node in nodes:
node_id_map[node.id] = iface_nodes[node.name]
_bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes)
log.info("Nodes added in %s", get_timing(start_time))
return node_s
##########################################################################
def add_links(network_id, links,**kwargs):
'''
add links to network
'''
start_time = datetime.datetime.now()
user_id = kwargs.get('user_id')
names = [] # used to check uniqueness of link name before saving links to database
for l_i in links:
if l_i.name in names:
raise HydraError("Duplicate Link Name: %s"%(l_i.name))
names.append(l_i.name)
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
node_id_map=dict()
for node in net_i.nodes:
node_id_map[node.id] = node
_add_links_to_database(net_i, links, node_id_map)
net_i.project_id = net_i.project_id
db.DBSession.flush()
link_s = db.DBSession.query(Link).filter(Link.network_id == network_id).all()
iface_links = {}
for l_i in link_s:
iface_links[l_i.name] = l_i
_bulk_add_resource_attrs(net_i.id, 'LINK', links, iface_links)
log.info("Nodes added in %s", get_timing(start_time))
return link_s
#########################################
def add_node(network_id, node, **kwargs):
"""
Add a node to a network:
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
new_node = net_i.add_node(node.name, node.description, node.layout, node.x, node.y)
hdb.add_resource_attributes(new_node, node.attributes)
db.DBSession.flush()
if node.types is not None and len(node.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in node.types:
ra, rt, rs = template.set_resource_type(new_node,
typesummary.id,
network_id=network_id,
**kwargs)
if rt is not None:
res_types.append(rt)#rt is one object
res_attrs.extend(ra)#ra is a list of objects
res_scenarios.update(rs)
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr)\
.order_by(ResourceAttr.id.desc())\
.limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(new_node)
#lazy load attributes
new_node.attributes
return new_node
#########################################################################
def update_node(node, flush=True, **kwargs):
"""
Update a node.
If new attributes are present, they will be added to the node.
The non-presence of attributes does not remove them.
The flush argument indicates whether dbsession.flush should be called. THis
is set to False when update_node is called from another function which does
the flush.
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node.id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node.id))
node_i.network.check_write_permission(user_id)
node_i.name = node.name if node.name is not None else node_i.name
node_i.x = node.x if node.x is not None else node_i.x
node_i.y = node.y if node.y is not None else node_i.y
node_i.description = node.description if node.description is not None else node_i.description
node_i.layout = node.get_layout() if node.layout is not None else node_i.layout
if node.attributes is not None:
_update_attributes(node_i, node.attributes)
if node.types is not None:
hdb.add_resource_types(node_i, node.types)
if flush is True:
db.DBSession.flush()
return node_i
def update_nodes(nodes,**kwargs):
"""
Update multiple nodes.
If new attributes are present, they will be added to the node.
The non-presence of attributes does not remove them.
%TODO:merge this with the 'update_nodes' functionality in the 'update_netework'
function, so we're not duplicating functionality. D.R.Y!
returns: a list of updated nodes
"""
user_id = kwargs.get('user_id')
updated_nodes = []
for n in nodes:
updated_node_i = update_node(n, flush=False, user_id=user_id)
updated_nodes.append(updated_node_i)
db.DBSession.flush()
return updated_nodes
def set_node_status(node_id, status, **kwargs):
"""
Set the status of a node to 'X'
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id))
node_i.network.check_write_permission(user_id)
node_i.status = status
for link in node_i.links_to:
link.status = status
for link in node_i.links_from:
link.status = status
db.DBSession.flush()
return node_i
def _unique_data_qry(count=1):
rs = aliased(ResourceScenario)
subqry = db.DBSession.query(
rs.dataset_id,
func.count(rs.dataset_id).label('dataset_count')).\
group_by(rs.dataset_id).\
having(func.count(rs.dataset_id) == count).\
subquery()
unique_data = db.DBSession.query(rs).\
join(subqry,
and_(rs.dataset_id==subqry.c.dataset_id)
).\
filter(
rs.resource_attr_id == ResourceAttr.id
)
return unique_data
def delete_network(network_id, purge_data,**kwargs):
"""
Call the original purge network call for backward compatibility
"""
return purge_network(network_id, purge_data, **kwargs)
def purge_network(network_id, purge_data,**kwargs):
"""
Remove a network from DB completely
Use purge_data to try to delete the data associated with only this network.
If no other resources link to this data, it will be deleted.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
log.info("Deleting network %s, id=%s", net_i.name, network_id)
net_i.check_write_permission(user_id)
db.DBSession.delete(net_i)
db.DBSession.flush()
return 'OK'
def _purge_datasets_unique_to_resource(ref_key, ref_id):
"""
Find the number of times a a resource and dataset combination
occurs. If this equals the number of times the dataset appears, then
we can say this dataset is unique to this resource, therefore it can be deleted
"""
count_qry = db.DBSession.query(ResourceScenario.dataset_id,
func.count(ResourceScenario.dataset_id)).group_by(
ResourceScenario.dataset_id).filter(
ResourceScenario.resource_attr_id==ResourceAttr.id)
if ref_key == 'NODE':
count_qry.filter(ResourceAttr.node_id == ref_id)
elif ref_key == 'LINK':
count_qry.filter(ResourceAttr.link_id == ref_id)
elif ref_key == 'GROUP':
count_qry.filter(ResourceAttr.group_id == ref_id)
count_rs = count_qry.all()
for dataset_id, count in count_rs:
full_dataset_count = db.DBSession.query(ResourceScenario)\
.filter(ResourceScenario.dataset_id==dataset_id).count()
if full_dataset_count == count:
"""First delete all the resource scenarios"""
datasets_rs_to_delete = db.DBSession.query(ResourceScenario)\
.filter(ResourceScenario.dataset_id==dataset_id).all()
for dataset_rs in datasets_rs_to_delete:
db.DBSession.delete(dataset_rs)
"""Then delete all the datasets"""
dataset_to_delete = db.DBSession.query(Dataset)\
.filter(Dataset.id == dataset_id).one()
log.info("Deleting %s dataset %s (%s)",\
ref_key, dataset_to_delete.name, dataset_to_delete.id)
db.DBSession.delete(dataset_to_delete)
def delete_node(node_id, purge_data,**kwargs):
"""
Remove node from DB completely
If there are attributes on the node, use purge_data to try to
delete the data. If no other resources link to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()
except NoResultFound:
raise ResourceNotFoundError("Node %s not found"%(node_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.node_id==node_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('NODE', node_id)
log.info("Deleting node %s, id=%s", node_i.name, node_id)
node_i.network.check_write_permission(user_id)
db.DBSession.delete(node_i)
db.DBSession.flush()
return 'OK'
def add_link(network_id, link,**kwargs):
"""
Add a link to a network
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
try:
node_1 = db.DBSession.query(Node).filter(Node.id==link.node_1_id).one()
node_2 = db.DBSession.query(Node).filter(Node.id==link.node_2_id).one()
except NoResultFound:
raise ResourceNotFoundError("Nodes for link not found")
link_i = net_i.add_link(link.name, link.description, link.layout, node_1, node_2)
hdb.add_resource_attributes(link_i, link.attributes)
db.DBSession.flush()
if link.types is not None and len(link.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in link.types:
ra, rt, rs = template.set_resource_type(link_i,
typesummary.id,
network_id=network_id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(link_i)
#lazy load attributes
link_i.attributes
return link_i
@required_perms("edit_network")
def update_links(links, **kwargs):
log.info("Updating %s links", len(links))
for l in links:
update_link(l, flush=False, **kwargs)
db.DBSession.flush()
def update_link(link, flush=False, **kwargs):
"""
Update a link.
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link.id).one()
link_i.network.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link.id))
#Each of thiese should be updateable independently
if link.name is not None:
link_i.name = link.name
if link.node_1_id is not None:
link_i.node_1_id = link.node_1_id
if link.node_2_id is not None:
link_i.node_2_id = link.node_2_id
if link.description is not None:
link_i.description = link.description
if link.layout is not None:
link_i.layout = link.get_layout()
if link.attributes is not None:
hdb.add_resource_attributes(link_i, link.attributes)
if link.types is not None:
hdb.add_resource_types(link_i, link.types)
if flush is True:
db.DBSession.flush()
return link_i
def set_link_status(link_id, status, **kwargs):
"""
Set the status of a link
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'edit_topology')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link_id).one()
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id))
link_i.network.check_write_permission(user_id)
link_i.status = status
db.DBSession.flush()
def delete_link(link_id, purge_data,**kwargs):
"""
Remove link from DB completely
If there are attributes on the link, use purge_data to try to
delete the data. If no other resources link to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
link_i = db.DBSession.query(Link).filter(Link.id == link_id).one()
except NoResultFound:
raise ResourceNotFoundError("Link %s not found"%(link_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.link_id==link_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('LINK', link_id)
log.info("Deleting link %s, id=%s", link_i.name, link_id)
link_i.network.check_write_permission(user_id)
db.DBSession.delete(link_i)
db.DBSession.flush()
def add_group(network_id, group,**kwargs):
"""
Add a resourcegroup to a network
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
res_grp_i = net_i.add_group(group.name, group.description, group.status)
hdb.add_resource_attributes(res_grp_i, group.attributes)
db.DBSession.flush()
if group.types is not None and len(group.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in group.types:
ra, rt, rs = template.set_resource_type(res_grp_i,
typesummary.id,
network_id=network_id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(res_grp_i)
#lazy load attributes
res_grp_i.attributes
return res_grp_i
def update_group(group,**kwargs):
"""
Update a group.
If new attributes are present, they will be added to the group.
The non-presence of attributes does not remove them.
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group.id).one()
except NoResultFound:
raise ResourceNotFoundError("group %s not found"%(group.id))
group_i.network.check_write_permission(user_id)
group_i.name = group.name if group.name != None else group_i.name
group_i.description = group.description if group.description else group_i.description
if group.attributes is not None:
_update_attributes(group_i, group.attributes)
if group.types is not None:
hdb.add_resource_types(group_i, group.types)
db.DBSession.flush()
return group_i
def set_group_status(group_id, status, **kwargs):
"""
Set the status of a group to 'X'
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id))
group_i.network.check_write_permission(user_id)
group_i.status = status
db.DBSession.flush()
return group_i
def delete_group(group_id, purge_data,**kwargs):
"""
Remove group from DB completely
If there are attributes on the group, use purge_data to try to
delete the data. If no other resources group to this data, it
will be deleted.
"""
user_id = kwargs.get('user_id')
try:
group_i = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == group_id).one()
except NoResultFound:
raise ResourceNotFoundError("Group %s not found"%(group_id))
group_items = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.group_id==group_id).all()
for gi in group_items:
db.DBSession.delete(gi)
if purge_data == 'Y':
_purge_datasets_unique_to_resource('GROUP', group_id)
log.info("Deleting group %s, id=%s", group_i.name, group_id)
group_i.network.check_write_permission(user_id)
db.DBSession.delete(group_i)
db.DBSession.flush()
def get_scenarios(network_id,**kwargs):
"""
Get all the scenarios in a given network.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_read_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
return net_i.scenarios
def validate_network_topology(network_id,**kwargs):
"""
Check for the presence of orphan nodes in a network.
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
nodes = []
for node_i in net_i.nodes:
if node_i.status == 'A':
nodes.append(node_i.node_id)
link_nodes = []
for link_i in net_i.links:
if link_i.status != 'A':
continue
if link_i.node_1_id not in link_nodes:
link_nodes.append(link_i.node_1_id)
if link_i.node_2_id not in link_nodes:
link_nodes.append(link_i.node_2_id)
nodes = set(nodes)
link_nodes = set(link_nodes)
isolated_nodes = nodes - link_nodes
return isolated_nodes
def get_resource(resource_type, resource_id, **kwargs):
user_id = kwargs.get('user_id')
resource_type = resource_type.upper()
if resource_type == 'NODE':
return get_node(resource_id, **kwargs)
elif resource_type == 'LINK':
return get_link(resource_id, **kwargs)
elif resource_type == 'GROUP':
return get_resourcegroup(resource_id, **kwargs)
elif resource_type == 'NETWORK':
network = get_network_simple(resource_id, **kwargs)
return network
def get_resources_of_type(network_id, type_id, **kwargs):
"""
Return the Nodes, Links and ResourceGroups which
have the type specified.
"""
#'set a ref key on the resources to easily distinguish them'
nodes_with_type = db.DBSession.query(Node).join(ResourceType).filter(Node.network_id==network_id, ResourceType.type_id==type_id).all()
for n in nodes_with_type:
n.ref_key = 'NODE'
links_with_type = db.DBSession.query(Link).join(ResourceType).filter(Link.network_id==network_id, ResourceType.type_id==type_id).all()
for l in links_with_type:
l.ref_key = 'LINK'
groups_with_type = db.DBSession.query(ResourceGroup).join(ResourceType).filter(ResourceGroup.network_id==network_id, ResourceType.type_id==type_id).all()
for g in groups_with_type:
g.ref_key = 'GROUP'
return nodes_with_type+links_with_type+groups_with_type
def clean_up_network(network_id, **kwargs):
"""
Purge any deleted nodes, links, resourcegroups and scenarios in a given network
"""
user_id = kwargs.get('user_id')
#check_perm(user_id, 'delete_network')
try:
log.debug("Querying Network %s", network_id)
net_i = db.DBSession.query(Network).filter(Network.id == network_id).\
options(noload(Network.scenarios)).options(noload(Network.nodes)).options(noload(Network.links)).options(
noload(Network.resourcegroups)).options(
joinedload(Network.types)\
.joinedload(ResourceType.templatetype)\
.joinedload(TemplateType.template)
).one()
net_i.attributes
#Define the basic resource queries
node_qry = db.DBSession.query(Node).filter(Node.network_id==network_id).filter(Node.status=='X').all()
link_qry = db.DBSession.query(Link).filter(Link.network_id==network_id).filter(Link.status=='X').all()
group_qry = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==network_id).filter(ResourceGroup.status=='X').all()
scenario_qry = db.DBSession.query(Scenario).filter(Scenario.network_id==network_id).filter(Scenario.status=='X').all()
for n in node_qry:
db.DBSession.delete(n)
for l in link_qry:
db.DBSession.delete(l)
for g in group_qry:
db.DBSession.delete(g)
for s in scenario_qry:
db.DBSession.delete(s)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
db.DBSession.flush()
return 'OK'
def get_all_node_data(network_id, scenario_id, node_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'NODE', ref_ids=node_ids, include_metadata='N', **kwargs)
node_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
node_data.append(resource_attr)
return node_data
def get_all_link_data(network_id, scenario_id, link_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'LINK', ref_ids=link_ids, include_metadata='N', **kwargs)
link_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
link_data.append(resource_attr)
return link_data
def get_all_group_data(network_id, scenario_id, group_ids=None, include_metadata=False, **kwargs):
resource_scenarios = get_attributes_for_resource(network_id, scenario_id, 'GROUP', ref_ids=group_ids, include_metadata='N', **kwargs)
group_data = []
for rs in resource_scenarios:
resource_attr = JSONObject({
'id': rs.resourceattr.id,
'attr_id' : rs.resourceattr.attr_id,
'attr_name' : rs.resourceattr.attr.name,
'resourcescenario': rs
})
group_data.append(resource_attr)
return group_data
def get_attributes_for_resource(network_id, scenario_id, ref_key, ref_ids=None, include_metadata=False, **kwargs):
try:
db.DBSession.query(Network).filter(Network.id==network_id).one()
except NoResultFound:
raise HydraError("Network %s does not exist"%network_id)
try:
db.DBSession.query(Scenario).filter(Scenario.id==scenario_id, Scenario.network_id==network_id).one()
except NoResultFound:
raise HydraError("Scenario %s not found."%scenario_id)
rs_qry = db.DBSession.query(ResourceScenario).filter(
ResourceAttr.id==ResourceScenario.resource_attr_id,
ResourceScenario.scenario_id==scenario_id,
ResourceAttr.ref_key==ref_key)\
.join(ResourceScenario.dataset)
log.info("Querying %s data",ref_key)
if ref_ids is not None and len(ref_ids) < 999:
if ref_key == 'NODE':
rs_qry = rs_qry.filter(ResourceAttr.node_id.in_(ref_ids))
elif ref_key == 'LINK':
rs_qry = rs_qry.filter(ResourceAttr.link_id.in_(ref_ids))
elif ref_key == 'GROUP':
rs_qry = rs_qry.filter(ResourceAttr.group_id.in_(ref_ids))
all_resource_scenarios = rs_qry.all()
log.info("Data retrieved")
resource_scenarios = []
dataset_ids = []
if ref_ids is not None:
log.info("Pulling out requested info")
for rs in all_resource_scenarios:
ra = rs.resourceattr
if ref_key == 'NODE':
if ra.node_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
elif ref_key == 'LINK':
if ra.link_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
elif ref_key == 'GROUP':
if ra.group_id in ref_ids:
resource_scenarios.append(rs)
if rs.dataset_id not in dataset_ids:
dataset_ids.append(rs.dataset_id)
else:
resource_scenarios.append(ra)
log.info("Requested info pulled out.")
else:
resource_scenarios = all_resource_scenarios
log.info("Retrieved %s resource attrs", len(resource_scenarios))
if include_metadata is True:
metadata_qry = db.DBSession.query(Metadata).filter(
ResourceAttr.ref_key == ref_key,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceScenario.scenario_id == scenario_id,
Dataset.id == ResourceScenario.dataset_id,
Metadata.dataset_id == Dataset.id)
log.info("Querying node metadata")
all_metadata = metadata_qry.all()
log.info("Node metadata retrieved")
metadata = []
if ref_ids is not None:
for m in all_metadata:
if m.dataset_id in dataset_ids:
metadata.append(m)
else:
metadata = all_metadata
log.info("%s metadata items retrieved", len(metadata))
metadata_dict = {}
for m in metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id].append(m)
else:
metadata_dict[m.dataset_id] = [m]
for rs in resource_scenarios:
d = rs.dataset
if d.hidden == 'Y':
try:
d.check_read_permission(kwargs.get('user_id'))
except:
d.value = None
d.metadata = []
else:
if include_metadata is True:
rs.dataset.metadata = metadata_dict.get(d.id, [])
return resource_scenarios
def get_all_resource_attributes_in_network(attr_id, network_id, include_resources=True, **kwargs):
"""
Find every resource attribute in the network matching the supplied attr_id
Args:
attr_id (int): The attribute on which to match
network_id (int): The ID of the network to search
include_resources (bool): A flag to indicate whether to return the
resource that the resource attribute belongs to.
Including resources can have a performance implication
Returns:
List of JSONObjects
Raises:
HydraError if the attr_id or network_id do not exist
"""
user_id = kwargs.get('user_id')
try:
a = db.DBSession.query(Attr).filter(Attr.id == attr_id).one()
except NoResultFound:
raise HydraError("Attribute %s not found"%(attr_id,))
ra_qry = db.DBSession.query(ResourceAttr).filter(
ResourceAttr.attr_id == attr_id,
or_(Network.id == network_id,
Node.network_id == network_id,
Link.network_id == network_id,
ResourceGroup.network_id == network_id)
).outerjoin(ResourceAttr.node)\
.outerjoin(ResourceAttr.link)\
.outerjoin(ResourceAttr.network)\
.outerjoin(ResourceAttr.resourcegroup)\
.options(joinedload(ResourceAttr.node))\
.options(joinedload(ResourceAttr.link))\
.options(joinedload(ResourceAttr.resourcegroup))\
.options(joinedload(ResourceAttr.network))
resourceattrs = ra_qry.all()
json_ra = []
#Load the metadata too
for ra in resourceattrs:
ra_j = JSONObject(ra, extras={'node':JSONObject(ra.node) if ra.node else None,
'link':JSONObject(ra.link) if ra.link else None,
'resourcegroup':JSONObject(ra.resourcegroup) if ra.resourcegroup else None,
'network':JSONObject(ra.network) if ra.network else None})
if ra_j.node is not None:
ra_j.resource = ra_j.node
elif ra_j.link is not None:
ra_j.resource = ra_j.link
elif ra_j.resourcegroup is not None:
ra_j.resource = ra_j.resourcegroup
elif ra.network is not None:
ra_j.resource = ra_j.network
json_ra.append(ra_j)
return json_ra
def get_all_resource_data(scenario_id, include_metadata=False, page_start=None, page_end=None, **kwargs):
"""
A function which returns the data for all resources in a network.
-
"""
rs_qry = db.DBSession.query(
ResourceAttr.attr_id,
Attr.name.label('attr_name'),
ResourceAttr.id.label('resource_attr_id'),
ResourceAttr.ref_key,
ResourceAttr.network_id,
ResourceAttr.node_id,
ResourceAttr.link_id,
ResourceAttr.group_id,
ResourceAttr.project_id,
ResourceAttr.attr_is_var,
ResourceScenario.scenario_id,
ResourceScenario.source,
Dataset.id.label('dataset_id'),
Dataset.name.label('dataset_name'),
Dataset.value,
Dataset.unit_id,
Dataset.hidden,
Dataset.type,
null().label('metadata'),
case(
(ResourceAttr.node_id != None, Node.name),
(ResourceAttr.link_id != None, Link.name),
(ResourceAttr.group_id != None, ResourceGroup.name),
(ResourceAttr.network_id != None, Network.name),
).label('ref_name'),
).join(ResourceScenario, ResourceScenario.resource_attr_id==ResourceAttr.id)\
.join(Dataset, ResourceScenario.dataset_id==Dataset.id).\
join(Attr, ResourceAttr.attr_id==Attr.id).\
outerjoin(Node, ResourceAttr.node_id==Node.id).\
outerjoin(Link, ResourceAttr.link_id==Link.id).\
outerjoin(ResourceGroup, ResourceAttr.group_id==ResourceGroup.id).\
outerjoin(Network, ResourceAttr.network_id==Network.id).\
filter(ResourceScenario.scenario_id==scenario_id)
all_resource_data = rs_qry.all()
if page_start is not None and page_end is None:
all_resource_data = all_resource_data[page_start:]
elif page_start is not None and page_end is not None:
all_resource_data = all_resource_data[page_start:page_end]
log.info("%s datasets retrieved", len(all_resource_data))
if include_metadata is True:
metadata_qry = db.DBSession.query(
distinct(Metadata.dataset_id).label('dataset_id'),
Metadata.key,
Metadata.value).filter(
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceScenario.scenario_id == scenario_id,
Dataset.id == ResourceScenario.dataset_id,
Metadata.dataset_id == Dataset.id)
log.info("Querying node metadata")
metadata = metadata_qry.all()
log.info("%s metadata items retrieved", len(metadata))
metadata_dict = {}
for m in metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id].append(m)
else:
metadata_dict[m.dataset_id] = [m]
return_data = []
for ra in all_resource_data:
ra_dict = ra._asdict()
if ra.hidden == 'Y':
try:
d = db.DBSession.query(Dataset).filter(
Dataset.id == ra.dataset_id
).options(noload(Dataset.metadata)).one()
d.check_read_permission(kwargs.get('user_id'))
except:
ra_dict['value'] = None
ra_dict['metadata'] = []
else:
if include_metadata is True:
ra_dict['metadata'] = metadata_dict.get(ra.dataset_id, [])
return_data.append(namedtuple('ResourceData', ra_dict.keys())(**ra_dict))
log.info("Returning %s datasets", len(return_data))
return return_data
def clone_network(network_id,
recipient_user_id=None,
new_network_name=None,
new_network_description=None,
project_id=None,
project_name=None,
new_project=True,
include_outputs=False,
scenario_ids=[],
creator_is_owner=False,
**kwargs):
"""
Create an exact clone of the specified network for the specified user.
If project_id is specified, put the new network in there.
Otherwise create a new project with the specified name and put it in there.
creator_is_owner (Bool) : The user who creates the network isn't added as an owner
(won't have an entry in tNetworkOwner and therefore won't see the network in 'get_project')
"""
user_id = kwargs['user_id']
ex_net = db.DBSession.query(Network).filter(Network.id==network_id).one()
ex_net.check_read_permission(user_id)
if recipient_user_id is None:
recipient_user_id = user_id
if project_id is None and new_project == True:
log.info("Creating a new project for cloned network")
ex_proj = db.DBSession.query(Project).filter(Project.id==ex_net.project_id).one()
user = db.DBSession.query(User).filter(User.id==user_id).one()
project = Project()
if project_name is None or project_name=="":
project_name=ex_proj.name + " (Cloned by %s)" % user.display_name
#check a project with this name doesn't already exist:
ex_project = db.DBSession.query(Project).filter(Project.name == project_name,
Project.created_by == user_id).all()
#If it exists, use it.
if len(ex_project) > 0:
project=ex_project[0]
else:
project.name = project_name
project.created_by = user_id
if creator_is_owner is True and user_id != recipient_user_id:
project.set_owner(user_id)
if recipient_user_id is not None:
project.set_owner(recipient_user_id)
db.DBSession.add(project)
db.DBSession.flush()
project_id = project.id
elif project_id is None:
log.info("Using current project for cloned network")
project_id = ex_net.project_id
if new_network_name is None or new_network_name == "":
new_network_name = ex_net.name
log.info('Cloning Network...')
#Find if there's any projects with this name in the project already
ex_network = db.DBSession.query(Network).filter(Network.project_id == project_id,
Network.name.like(
f"{new_network_name}%")).all()
if len(ex_network) > 0:
new_network_name = f"{new_network_name} ({str(len(ex_network))})"
newnet = Network()
newnet.project_id = project_id
newnet.name = new_network_name
newnet.description = ex_net.description if new_network_description is None else new_network_description
newnet.layout = ex_net.layout
newnet.status = ex_net.status
newnet.projection = ex_net.projection
newnet.created_by = user_id
#if true, the the creator will see this network in their project.networks.
if creator_is_owner is True and user_id != recipient_user_id:
newnet.set_owner(user_id)
#set the owner to the recipient. THis can be either the requesting user id (user_id)
#or an explicitly defined user.
newnet.set_owner(recipient_user_id)
db.DBSession.add(newnet)
db.DBSession.flush()
newnetworkid = newnet.id
log.info('CLoning Nodes')
node_id_map = _clone_nodes(network_id, newnetworkid, user_id)
log.info('Cloning Links')
link_id_map = _clone_links(network_id, newnetworkid, node_id_map, user_id)
log.info('CLoning Groups')
group_id_map = _clone_groups(network_id,
newnetworkid,
node_id_map,
link_id_map,
user_id)
log.info("Cloning Resource Attributes")
ra_id_map = _clone_resourceattrs(network_id,
newnetworkid,
node_id_map,
link_id_map,
group_id_map,
newnet.project_id,
ex_net.project_id,
user_id)
log.info("Cloning Resource Types")
_clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map)
log.info('Cloning Scenarios')
scenario_id_map = _clone_scenarios(network_id,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=include_outputs,
scenario_ids=scenario_ids)
_clone_rules(
network_id,
newnetworkid,
node_id_map,
link_id_map,
group_id_map,
scenario_id_map,
user_id)
db.DBSession.flush()
return newnetworkid
def _clone_rules(old_network_id, new_network_id, node_id_map, link_id_map, group_id_map, scenario_id_map, user_id):
"""
"""
rules.clone_resource_rules('NETWORK',
old_network_id,
target_ref_key='NETWORK',
target_ref_id=new_network_id,
scenario_id_map=scenario_id_map,
user_id=user_id)
node_rules = db.DBSession.query(Rule).join(Node).filter(Node.network_id==old_network_id).all()
for node_rule in node_rules:
rules.clone_rule(node_rule.id,
target_ref_key='NODE',
target_ref_id=node_id_map[node_rule.node_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
link_rules = db.DBSession.query(Rule).join(Link).filter(Link.network_id==old_network_id).all()
for link_rule in link_rules:
rules.clone_rule(link_rule.id,
target_ref_key='LINK',
target_ref_id=link_id_map[link_rule.link_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
group_rules = db.DBSession.query(Rule).join(ResourceGroup).filter(ResourceGroup.network_id==old_network_id).all()
for group_rule in group_rules:
rules.clone_rule(group_rule.id,
group_rule.node_id,
target_ref_key='GROUP',
target_ref_id=group_id_map[group_rule.group_id],
scenario_id_map=scenario_id_map,
user_id=user_id)
def _clone_nodes(old_network_id, new_network_id, user_id):
nodes = db.DBSession.query(Node).filter(Node.network_id==old_network_id).all()
newnodes = []
old_node_name_map = {}
id_map = {}
for ex_n in nodes:
new_n = dict(
network_id=new_network_id,
name = ex_n.name,
description = ex_n.description,
x = ex_n.x,
y = ex_n.y,
layout = ex_n.layout,
status = ex_n.status,
)
old_node_name_map[ex_n.name] = ex_n.node_id
newnodes.append(new_n)
db.DBSession.bulk_insert_mappings(Node, newnodes)
db.DBSession.flush()
#map old IDS to new IDS
nodes = db.DBSession.query(Node).filter(Node.network_id==new_network_id).all()
for n in nodes:
old_node_id = old_node_name_map[n.name]
id_map[old_node_id] = n.node_id
return id_map
def _clone_links(old_network_id, new_network_id, node_id_map, user_id):
links = db.DBSession.query(Link).filter(Link.network_id==old_network_id).all()
newlinks = []
old_link_name_map = {}
id_map = {}
for ex_l in links:
new_l = dict(
network_id=new_network_id,
name = ex_l.name,
description = ex_l.description,
node_1_id = node_id_map[ex_l.node_1_id],
node_2_id = node_id_map[ex_l.node_2_id],
layout = ex_l.layout,
status = ex_l.status,
)
newlinks.append(new_l)
old_link_name_map[ex_l.name] = ex_l.id
db.DBSession.bulk_insert_mappings(Link, newlinks)
db.DBSession.flush()
#map old IDS to new IDS
links = db.DBSession.query(Link).filter(Link.network_id==new_network_id).all()
for l in links:
old_link_id = old_link_name_map[l.name]
id_map[old_link_id] = l.link_id
return id_map
def _clone_groups(old_network_id, new_network_id, node_id_map, link_id_map, user_id):
groups = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==old_network_id).all()
newgroups = []
old_group_name_map = {}
id_map = {}
for ex_g in groups:
new_g = dict(
network_id=new_network_id,
name = ex_g.name,
description = ex_g.group_description,
status = ex_g.status,
)
newgroups.append(new_g)
old_group_name_map[ex_g.name] = ex_g.id
db.DBSession.bulk_insert_mappings(ResourceGroup, newgroups)
db.DBSession.flush()
#map old IDS to new IDS
groups = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==new_network_id).all()
for g in groups:
old_group_id = old_group_name_map[g.name]
id_map[old_group_id] = g.group_id
return id_map
def _clone_attributes(network_id, newnetworkid, exnet_project_id, newnet_project_id, user_id):
"""
Clone the attributes scoped to a network nad its project when cloning a network
"""
#first find any attributes which are scoped to the source network, and scope them to the parent project if the source
#and target are in the same project, otherwise clone all the scoped attributes.
#find any attributes scoped directly to the source
network_scoped_attrs = attributes.get_attributes(network_id=network_id, user_id=user_id)
project_scoped_attrs = []
#get all the attributes scoped to the project of the source network (if it's not the same project as the target)
if exnet_project_id != newnet_project_id:
new_attributes = []
exnet_project_scoped_attrs = attributes.get_attributes(project_id=exnet_project_id, user_id=user_id)
for a in exnet_project_scoped_attrs:
a.project_id = newnet_project_id
new_attributes.append(a)
for a in network_scoped_attrs:
#the networks are in different projects, so clone the attributes
a = JSONObject(a)
a.network_id = newnetworkid
new_attributes.append(a)
attributes.add_attributes(new_attributes, user_id=user_id)
else:
for a in network_scoped_attrs:
#the networks are in the same project, so re-scope the attribute
#to the project, so it is shared by the networks
a.network_id=None
a.project_id=exnet_project_id
attributes.update_attribute(a)
def _clone_resourceattrs(network_id, newnetworkid, node_id_map, link_id_map, group_id_map, exnet_project_id, newnet_project_id, user_id):
#clone any attributes which are scoped to a network or to the network's project (if the networks)
#are in different projects.
_clone_attributes(network_id, newnetworkid, exnet_project_id, newnet_project_id, user_id)
log.info("Cloning Network Attributes")
network_ras = db.DBSession.query(ResourceAttr).filter(ResourceAttr.network_id==network_id)
id_map = {}
new_ras = []
old_ra_name_map = {}
for ra in network_ras:
new_ras.append(dict(
network_id=newnetworkid,
node_id=None,
group_id=None,
link_id=None,
ref_key='NETWORK',
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
))
#key is (network_id, node_id, link_id, group_id) -- only one of which can be not null for a given row
old_ra_name_map[(newnetworkid, None, None, None, ra.attr_id)] = ra.id
log.info("Cloning Node Attributes")
node_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.node_id==Node.id, Node.network_id==network_id)).all()
for ra in node_ras:
new_ras.append(dict(
node_id=node_id_map[ra.node_id],
network_id=None,
link_id=None,
group_id=None,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, node_id_map[ra.node_id], None, None, ra.attr_id)] = ra.id
log.info("Cloning Link Attributes")
link_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.link_id==Link.id, Link.network_id==network_id)).all()
for ra in link_ras:
new_ras.append(dict(
link_id=link_id_map[ra.link_id],
network_id=ra.network_id,
node_id=ra.node_id,
group_id=ra.group_id,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, None, link_id_map[ra.link_id], None, ra.attr_id)] = ra.id
log.info("Cloning Group Attributes")
group_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.group_id==ResourceGroup.id, ResourceGroup.network_id==network_id)).all()
for ra in group_ras:
new_ras.append(dict(
group_id=group_id_map[ra.group_id],
network_id=ra.network_id,
link_id=ra.link_id,
node_id=ra.node_id,
attr_id=ra.attr_id,
attr_is_var=ra.attr_is_var,
ref_key=ra.ref_key,
))
old_ra_name_map[(None, None, None, group_id_map[ra.group_id], ra.attr_id)] = ra.id
log.info("Inserting new resource attributes")
db.DBSession.bulk_insert_mappings(ResourceAttr, new_ras)
db.DBSession.flush()
log.info("Insertion Complete")
log.info("Getting new RAs and building ID map")
new_network_ras = db.DBSession.query(ResourceAttr).filter(ResourceAttr.network_id==newnetworkid).all()
for ra in new_network_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_node_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.node_id==Node.id, Node.network_id==newnetworkid)).all()
for ra in new_node_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_link_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.link_id==Link.id, Link.network_id==newnetworkid)).all()
for ra in new_link_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
new_group_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.group_id==ResourceGroup.id, ResourceGroup.network_id==newnetworkid)).all()
for ra in new_group_ras:
id_map[old_ra_name_map[(ra.network_id, ra.node_id, ra.link_id, ra.group_id, ra.attr_id)]] = ra.id
log.info("ID map completed. Returning")
return id_map
def _clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map):
log.info("Cloning Network Types")
network_rts = db.DBSession.query(ResourceType).filter(ResourceType.network_id==network_id)
new_ras = []
for rt in network_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=newnetworkid,
node_id=rt.node_id,
link_id=rt.link_id,
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Node Types")
node_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.node_id==Node.id, Node.network_id==network_id))
for rt in node_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=node_id_map[rt.node_id],
link_id=rt.link_id,
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Link Types")
link_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.link_id==Link.id, Link.network_id==network_id))
for rt in link_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=rt.node_id,
link_id=link_id_map[rt.link_id],
group_id=rt.group_id,
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Cloning Group Types")
group_rts = db.DBSession.query(ResourceType).filter(and_(ResourceType.group_id==ResourceGroup.id, ResourceGroup.network_id==network_id))
for rt in group_rts:
new_ras.append(dict(
ref_key=rt.ref_key,
network_id=rt.network_id,
node_id=rt.node_id,
link_id=rt.link_id,
group_id=group_id_map[rt.group_id],
type_id=rt.type_id,
child_template_id=rt.child_template_id,
))
log.info("Inserting new resource types")
db.DBSession.bulk_insert_mappings(ResourceType, new_ras)
db.DBSession.flush()
log.info("Insertion Complete")
def _clone_scenarios(network_id,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=False,
scenario_ids=[]):
scenarios = db.DBSession.query(Scenario).filter(Scenario.network_id == network_id).all()
id_map = {}
for scenario in scenarios:
#if scenario_ids are specified (the list is not empty) then filter out
#the scenarios not specified.
if len(scenario_ids) > 0 and scenario.id not in scenario_ids:
log.info("Not cloning scenario %s", scenario.id)
continue
if scenario.status == 'A':
new_scenario_id = _clone_scenario(scenario,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=include_outputs)
id_map[scenario.id] = new_scenario_id
return id_map
def _clone_scenario(old_scenario,
newnetworkid,
ra_id_map,
node_id_map,
link_id_map,
group_id_map,
user_id,
include_outputs=False):
log.info("Adding scenario shell to get scenario ID")
news = Scenario()
news.network_id = newnetworkid
news.name = old_scenario.name
news.description = old_scenario.description
news.layout = old_scenario.layout
news.start_time = old_scenario.start_time
news.end_time = old_scenario.end_time
news.time_step = old_scenario.time_step
news.parent_id = old_scenario.parent_id
news.created_by = user_id
db.DBSession.add(news)
db.DBSession.flush()
scenario_id = news.id
log.info("New Scenario %s created", scenario_id)
log.info("Getting old resource scenarios for scenario %s", old_scenario.id)
old_rscen_qry = db.DBSession.query(ResourceScenario).filter(
ResourceScenario.scenario_id == old_scenario.id,
ResourceAttr.id == ResourceScenario.resource_attr_id,
)
#Filter out output data unless explicitly requested not to.
if include_outputs is not True:
old_rscen_qry = old_rscen_qry.filter(ResourceAttr.attr_is_var == 'N')
old_rscen_rs = old_rscen_qry.all()
new_rscens = []
for old_rscen in old_rscen_rs:
new_rscens.append(dict(
dataset_id=old_rscen.dataset_id,
scenario_id=scenario_id,
resource_attr_id=ra_id_map[old_rscen.resource_attr_id],
))
log.info("Inserting new resource scenarios")
db.DBSession.bulk_insert_mappings(ResourceScenario, new_rscens)
log.info("Insertion Complete")
log.info("Getting old resource group items for scenario %s", old_scenario.id)
old_rgis = db.DBSession.query(ResourceGroupItem).filter(
ResourceGroupItem.scenario_id == old_scenario.id).all()
new_rgis = []
for old_rgi in old_rgis:
new_rgis.append(dict(
ref_key=old_rgi.ref_key,
node_id=node_id_map.get(old_rgi.node_id),
link_id=link_id_map.get(old_rgi.link_id),
subgroup_id=group_id_map.get(old_rgi.subgroup_id),
group_id=group_id_map.get(old_rgi.group_id),
scenario_id=scenario_id,
))
db.DBSession.bulk_insert_mappings(ResourceGroupItem, new_rgis)
return scenario_id
@required_perms("edit_network")
def apply_unit_to_network_rs(network_id, unit_id, attr_id, scenario_id=None, **kwargs):
"""
Set the unit on all the datasets in a network which have the same attribue
as the supplied resource_attr_id.
args:
unit_id (int): The unit ID to set on the network's datasets
attr_id (int): The attribute ID
scenario_id (int) (optional): Supplied if only datasets in a
specific scenario are to be affected
returns:
None
raises:
ValidationError if the supplied unit is incompatible with the attribute's dimension
"""
#Now get all the RS associated to both the attr and network.
network_rs_query = db.DBSession.query(ResourceScenario).filter(
Scenario.network_id == network_id,
ResourceScenario.scenario_id == Scenario.id,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceAttr.attr_id == attr_id)
if scenario_id is not None:
network_rs_query.filter(Scenario.id == scenario_id)
network_rs_list = network_rs_query.all()
#Get the attribute in question so we can check its dimension
attr_i = db.DBSession.query(Attr).filter(Attr.id == attr_id).one()
#now check whether the supplied unit can be applied by comparing it to the attribute's dimension
units.check_unit_matches_dimension(unit_id, attr_i.dimension_id)
#set the unit ID for each of the resource scenarios
for network_rs in network_rs_list:
network_rs.dataset.unit_id = unit_id
| hydraplatform/hydra-base | hydra_base/lib/network.py | network.py | py | 127,911 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "hydra_base.lib.attributes",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "... |
70338202429 | #!/usr/bin/env python3
"""
The main program that will be run on the Raspberry Pi,
which is the controller for the pharmacy client.
DINs of drugs on this pharmacy should be specified in din.cfg
"""
# these libraries come with python
import logging
import datetime
import struct
import asyncio
import json
import base64
# please run setup.sh first to install these libraries
import numpy as np
import cv2
import face_recognition
import aiohttp
# constant: endpoint for the web API
api_endpoint = 'https://example.com/arka'
# constant: current api version
api_version = '0'
# local drug database
din_to_motor = {}
async def dispense(din):
"""
Try to dispense a drug.
Returns true if it succeeded.
"""
motor = din_to_motor[din]
return False
async def report_dispensed(auth_token, drugs_dispensed):
"""
Reports back to the server that drugs were dispensed... later
"""
# get the timestamp NOW
ts = datetime.datetime.utcnow().timestamp()
# wait until dispensing should be done
await asyncio.sleep(30)
# if nothing was dispensed, easy
if not drugs_dispensed:
logging.log(logging.INFO, 'No drug dispensing to report')
return True
logging.log(logging.DEBUG, 'Now trying to report drug dispensed')
# start a HTTP session
async with aiohttp.ClientSession() as session:
logging.log(logging.DEBUG, 'HTTP session started from report_dispensed')
# build the json object to send
data_send = {
'version': api_version,
'id': auth_token,
'din': drugs_dispensed,
'timestamp': ts
}
# response is assumed none until we get something
data_response = None
# it's not done until we've confirmed it's done
while data_response is None:
# connect to the api!
async with session.get(
api_endpoint + '/user/pharmacy_done',
json = data_send
) as response:
# get data as json
data_response = response.json()
if data_response['version'] != api_version:
raise AssertionError('Incorrect API version encountered in report_dispensed')
elif not data_response['success']:
logging.log(logging.INFO, 'API endpoint said drug dispense report failed for whatever reason')
data_response = None
await asyncio.sleep(30)
logging.log(logging.INFO, 'Drug delivery report completed and confirmed')
def pack_fingerprint(fingerprint):
"""
Takes the vector which is a face fingerprint and
creates a bytes object to represent it.
Some information will be lost.
"""
# test our assumptions
if np.any(fingerprint > 1):raise ValueError('Fingerprint contains value greater than 1')
if np.any(fingerprint < -1):raise ValueError('Fingerprint contains value less than -1')
# convert from 64-bit float in range [-1, 1] to 16-bit int in full range
# 1 - 2^-53 is the largest double value below 1
# by scaling by this much, we prevent the edge case of boundary number 1 which can overflow to -2^15 after scaling
scale = 1 - 2 ** -53
if scale >= 1:raise AssertionError('Fingerprint packing uses incorrect scaling factor')
# scale to get the 16-bit int range
scale *= 2 ** 15
# convert to the 16-bit int vector
values = np.array(np.floor(fingerprint * scale), dtype=np.int16)
# pack in bytes
# 128 values, 16-bit integer, little endian -> 256 bytes
result = struct.pack('<128h', *values)
return result
async def main_step(capture):
"""
Contains the code for the main loop.
A return here will act as a continue in the loop.
"""
# wait for either user to press the button or a certain number of seconds to pass
await asyncio.sleep(1)
logging.log(logging.DEBUG, 'Now trying to capture an image')
# capture an image
succeeded, pixels = capture.read()
logging.log(logging.DEBUG, 'Image capture completed, and it ' + ('succeeded' if succeeded else 'failed'))
# this line explains itself well
if not succeeded:return
# OpenCV uses BGR as its output format but we want RGB
pixels = cv2.cvtColor(pixels, cv2.COLOR_BGR2RGB)
logging.log(logging.DEBUG, 'Image colour channels changed to RGB')
# find face locations in the image
face_boxes = face_recognition.face_locations(pixels, model='hog')
num_faces = len(face_boxes)
logging.log(logging.DEBUG, 'Found ' + str(num_faces) + 'faces in the image')
# no faces means nothing to do
if num_faces == 0:return
# TODO filter faces so only 1 is left, or else give up
# generate the 128-vector as face fingerprint
fingerprints = face_recognition.face_encodings(pixels, face_boxes)
fingerprint = fingerprints[0]
logging.log(logging.DEBUG, 'Face fingerprint was generated')
# pack the fingerprint as bytes
packed_fingerprint = pack_fingerprint(fingerprint)
logging.log(logging.INFO, 'Packed face fingerprint as ' + packed_fingerprint.hex())
# start a HTTP session
async with aiohttp.ClientSession() as session:
logging.log(logging.DEBUG, 'HTTP session started from main_step')
# build the json object to send
data_send = {
'version': api_version,
'fingerprint': base64.b64encode(packed_fingerprint)
}
# response is assumed none until we get something
data_response = None
# connect to the api!
async with session.get(
api_endpoint + '/user/pharmacy_get',
json = data_send
) as response:
logging.log(logging.DEBUG, 'Sent face fingerprint to authenticate')
# get the response as json
data_response = await response.json()
logging.log(logging.DEBUG, 'Decoded response data as JSON')
# continue if it succeeded
if data_response is not None and data.get('success', None) and data['version'] == api_version:
logging.log(logging.DEBUG, 'Authenticated and prescription data acquired')
# the authentication token for this session
auth_token = data['id']
# make a list of drugs that were dispensed
drugs_dispensed = []
await asyncio.create_task(report_dispensed(auth_token, drugs_dispensed))
# loop over all valid prescriptions
for pres in data['prescriptions']:
# get the DIN of the drug
din = pres['din']
# is this drug in this pharmacy?
if din in din_to_motor:
logging.log(logging.INFO, 'Attempting to dispense drug with DIN ' + din)
# try to dispense it
drug_was_dispensed = await dispense(din)
if drug_was_dispensed:
logging.log(logging.INFO, 'Drug dispense reported success')
drugs_dispensed.append(din)
else:
logging.log(logging.INFO, 'Drug dispense reported failure')
async def main_async():
"""
Actual main function to be used in production.
"""
# log timing information
logging.log(logging.INFO, 'Starting main function | Current UTC time is ' + str(datetime.datetime.utcnow()))
# set up the video capture object
capture = cv2.VideoCapture(0)
# the main loop
while True:
# log some timing information
logging.log(logging.DEBUG, 'Starting the main loop | Current UTC time is ' + str(datetime.datetime.utcnow()))
# try block to prevent errors from breaking the program
try:
# special function represents the code of the main loop
await main_step(capture)
except KeyboardInterrupt:
# the user intends to stop the program, so we respect this
logging.log(logging.INFO, 'Exiting main loop because a keyboard interrupt (SIGINT) was received')
raise KeyboardInterrupt
except Exception as exc:
# any other error must not break the program
logging.log(logging.ERROR, exc)
# get rid of the video capture object
capture.release()
# say bye bye
logging.log(logging.WARNING, 'Exiting main function, program is ending | Current UTC time is ' + str(datetime.datetime.utcnow()))
def main():
"""
Entry point to the program.
Will first read in the local database from the config file.
Redirects to main_async.
"""
global din_to_motor
with open('din.cfg','r') as file:
for line in file:
din, motor = line.strip().split()
din_to_motor[din] = motor
asyncio.run(main_async())
def main_test():
"""
Previous main function left over from testing. Will be removed when it is no longer useful.
"""
print('start of program')
cap = cv2.VideoCapture(0)
print('camera initialized')
for _ in range(1):
print('start of main loop')
# try to capture an image
# image is a 3d array: (Y, X, bgr)
ret, frame = cap.read()
print('image captured')
# reorder to RGB
# not necessary to do it this way but it works
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
print('image converted to rgb')
# we must first detect and locate faces within the image
# this is separate from the face fingerprinting
face_boxes = face_recognition.face_locations(frame,
model='hog')
print('faces detected in image')
# face_recognition library includes a premade AI
# this will spit out a 1d array with 128 floating point entries
# they seem to be within [-1, 1] and average at 0
# this fingerprint is a summary of the features of the faces
# we will later transform this vector and then send that to the server for processing
fingerprints = face_recognition.face_encodings(frame, face_boxes)
print('face fingerprints generated')
print(f'created {len(fingerprints)} fingerprints')
for index, fingerprint in enumerate(fingerprints):
print('-'*40)
print(f'data of fingerprint #{index}')
print(f'is a vector with shape {fingerprint.shape} and type {fingerprint.dtype}')
print(f'min is {np.min(fingerprint)}')
print(f'max is {np.max(fingerprint)}')
print(f'mean is {np.mean(fingerprint)}')
print('raw data')
print(fingerprint)
print('main loop exited')
print('cleaning up')
cap.release()
cv2.destroyAllWindows()
print('bye bye!')
# standard way to invoke main but only if this script is run as the program and not a library
if __name__ == '__main__':
main_test()
| alimzhan2000/arka_project_on_python | drug_delivering_code.py | drug_delivering_code.py | py | 11,430 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "asyncio.sleep",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging... |
5892500320 | import tkinter
import requests
import ujson
import datetime
from PIL import ImageTk,Image
from tkinter import ttk
from concurrent import futures
# pip install: requests, pillow, ujson
#region Static Requests
key = 0000000000 #<-- Riot developer key needed.
# ----------- Request Session -----------
sessionSummoner = requests.Session()
sessionRank = requests.Session()
sessionMatch = requests.Session()
sessionMatchList = requests.Session()
# ----------- Current Patch -----------
patchesJson = requests.get("https://ddragon.leagueoflegends.com/api/versions.json")
patches = ujson.loads(patchesJson.text)
currentPatch = patches[0]
# ----------- Static League Data -----------
summonerSpellJsonData = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{currentPatch}/data/en_US/summoner.json")
summonerSpellRawData = ujson.loads(summonerSpellJsonData.text)["data"]
mapListJsonData = requests.get("https://static.developer.riotgames.com/docs/lol/maps.json")
mapListRawData = ujson.loads(mapListJsonData.text)
#endregion
root = tkinter.Tk()
root.title("League Quick Data")
root.iconbitmap("LQ.ico")
root.configure(background = "black")
root.resizable(False, False)
#region Languages
class ChangeRegion:
def __init__(self, languageDict = None, buttonSearchLang = None, sessionRegionLang = None, matchResultLang = None):
self.languageDict = languageDict
self.buttonSearchLang = buttonSearchLang
self.sessionRegionLang = sessionRegionLang
self.matchResultLang = matchResultLang
def CreateDict(self):
self.languageDict = {"searchButton":["BUSCAR", "SEARCH"], "sessionRegion":["BR", "NA"], "gameResult":[["VITORIA", "DERROTA"], ["VICTORY", "DEFEAT"]]}
self.buttonSearchLang = self.languageDict["searchButton"][0]
self.sessionRegionLang = self.languageDict["sessionRegion"][0]
self.matchResultLang = self.languageDict["gameResult"][0]
def RegionNA(self, buttonSearch, buttonBR, buttonNA):
self.buttonSearchLang = self.languageDict["searchButton"][1]
self.sessionRegionLang = self.languageDict["sessionRegion"][1]
self.matchResultLang = self.languageDict["gameResult"][1]
buttonSearch.configure(text = self.buttonSearchLang)
buttonBR.configure(background = "black")
buttonNA.configure(background = "#10293f")
def RegionBR(self, buttonSearch, buttonBR, buttonNA):
self.buttonSearchLang = self.languageDict["searchButton"][0]
self.sessionRegionLang = self.languageDict["sessionRegion"][0]
self.matchResultLang = self.languageDict["gameResult"][0]
buttonSearch.configure(text = self.buttonSearchLang)
buttonBR.configure(background = "#10293f")
buttonNA.configure(background = "black")
regionMethods = ChangeRegion()
regionMethods.CreateDict()
#endregion
# ----------- Search Button -----------
searchButtonBorder = tkinter.Frame(root, background = "#048195")
searchButtonBorder.grid(row = 0, column = 2, sticky = "nswe")
searchButtonBorder.grid_columnconfigure(0, weight = 1)
searchButton = tkinter.Label(searchButtonBorder, text = "BUSCAR", font = ("", 8, "bold"), background = "black", foreground = "white", borderwidth = 3)
searchButton.grid(row = 0, column = 0, sticky = "nswe", padx = 2, pady = 2)
# ----------- Region Buttons -----------
languageFrame = tkinter.Frame(root, width = 10, background = "#024e64")
languageFrame.grid(row = 0, column = 4, sticky = "e")
brButton = tkinter.Button(languageFrame,
width = 3,
text = "BR",
font = ("Arial", 9, "bold"),
activebackground = "#07141f",
activeforeground = "white",
foreground = "white",
background = "black",
relief = "ridge",
borderwidth = 0,
command = lambda: regionMethods.RegionBR(searchButton, brButton, naButton))
brButton.grid(row = 0, column = 0, padx = 1, pady = 1)
naButton = tkinter.Button(languageFrame,
width = 3,
text = "NA",
font = ("Arial", 9, "bold"),
activebackground = "#07141f",
activeforeground = "white",
foreground = "white",
background = "black",
relief = "ridge",
borderwidth = 0,
command = lambda: regionMethods.RegionNA(searchButton, brButton, naButton))
naButton.grid(row = 0, column = 1, padx = 1, pady = 1)
regionMethods.RegionBR(searchButton, brButton, naButton)
# ----------- Scrollbar Style -----------
style = ttk.Style()
style.theme_use("classic")
style.map("TScrollbar", background=[('pressed', '!focus', '#ae914b')], relief=[('pressed', 'flat')])
style.configure("TScrollbar", troughcolor = "black", relief = "flat", background = "#775829", arrowsize = 0, width = 5, borderwidth = 0)
#region Entries
player1 = tkinter.Entry(root, width = 22,
background = "black",
foreground = "white",
borderwidth = 0,
highlightthickness = 2,
highlightcolor = "#775829",
highlightbackground = "#775829",
insertbackground = "light grey",
insertborderwidth = 1,
relief= "ridge")
player1.grid(row = 1, column = 0, sticky = "we")
player2 = tkinter.Entry(root, width = 22,
background = "black",
foreground = "white",
borderwidth = 0,
highlightthickness = 2,
highlightcolor = "#775829",
highlightbackground = "#775829",
insertbackground = "light grey",
insertborderwidth = 1,
relief= "ridge")
player2.grid(row = 1, column = 1, sticky = "we")
player3 = tkinter.Entry(root, width = 22,
background = "black",
foreground = "white",
borderwidth = 0,
highlightthickness = 2,
highlightcolor = "#775829",
highlightbackground = "#775829",
insertbackground = "light grey",
insertborderwidth = 1,
relief= "ridge")
player3.grid(row = 1, column = 2, sticky = "we")
player4 = tkinter.Entry(root, width = 22,
background = "black",
foreground = "white",
borderwidth = 0,
highlightthickness = 2,
highlightcolor = "#775829",
highlightbackground = "#775829",
insertbackground = "light grey",
insertborderwidth = 1,
relief= "ridge")
player4.grid(row = 1, column = 3, sticky = "we")
player5 = tkinter.Entry(root, width = 22,
background = "black",
foreground = "white",
borderwidth = 0,
highlightthickness = 2,
highlightcolor = "#775829",
highlightbackground = "#775829",
insertbackground = "light grey",
insertborderwidth = 1,
relief= "ridge")
player5.grid(row = 1, column = 4, sticky = "we")
playerArray = [player1, player2, player3, player4, player5]
#endregion
#region Gui creation methods
scrollBarArray = [0, 0, 0, 0, 0]
# ----------- Frame Buttons -----------
playerHistoryButtonArray = [0, 0, 0, 0, 0]
def CreateButtonBG():
for i in range(5):
if playerArray[i].get():
buttonBackground = tkinter.Label(root, background = "black", foreground = "white")
buttonBackground.grid(row = 2, columnspan = 5, sticky = "nswe")
break
if i == 4:
buttonBackground = tkinter.Label(root, background = "black", foreground = "white", text = "Null")
buttonBackground.grid(row = 2, columnspan = 5, sticky = "nswe")
def CreateHistoryButton(playerNumber):
historyButtonBorder = tkinter.Frame(root, background = "#ae914b")
historyButtonBorder.grid(row = 2, column = playerNumber, sticky = "we")
historyButtonBorder.grid_columnconfigure(0, weight = 1)
playerHistoryButton = tkinter.Label(historyButtonBorder, text = playerArray[playerNumber].get(), font = ("", 9, "bold"), background = "#0e191d",
foreground = "#fff6d6", borderwidth = 2)
playerHistoryButton.grid(row = 0, column = 0, sticky = "we", padx = 1, pady = 1)
playerHistoryButtonArray[playerNumber] = playerHistoryButton
# ----------- Frames -----------
scrollableMainFrameArray = [0, 0, 0, 0, 0]
historyFrameArray = [0, 0, 0, 0, 0]
def CreateHistoryFrame(playerNumber):
scrollableFrame = tkinter.Frame(root, height = 450, width = 680, background = "black")
scrollableFrame.grid(row = 4, columnspan = 5, sticky = "nsew")
scrollableFrame.grid_columnconfigure((0, 1), weight = 1)
canvasLayout = tkinter.Canvas(scrollableFrame, height = 450, width = 680, background = "black", highlightthickness = 0, scrollregion = (0, 0, 0, 980))
canvasLayout.grid(row=0, column = 0, sticky = "nsew")
historyFrame = tkinter.Frame(canvasLayout, height = 450, width = 680, background = "black")
canvasLayout.create_window((0, 0), window = historyFrame, anchor = "nw")
scrollbar = ttk.Scrollbar(scrollableFrame, orient = "vertical", command = canvasLayout.yview)
scrollbar.grid(row = 0, column = 1, sticky = "nse", padx = (4, 3), pady = (0, 3))
canvasLayout.configure(yscrollcommand = scrollbar.set)
# ----------- Scroll Function -----------
def MouseWheelMove(event):
canvasLayout.yview_scroll(-1 * (event.delta // 120), "units")
scrollbar.bind_all("<MouseWheel>", MouseWheelMove)
scrollableMainFrameArray[playerNumber] = scrollableFrame
historyFrameArray[playerNumber] = historyFrame
# ----------- Match Previews -----------
playerMatchArray = [0, 0, 0, 0, 0]
def CreateMatchPreview(playerNumber):
matchArray = []
for i in range(11):
if i == 0:
ProfileSummary.CreateProfileFrame(playerNumber)
else:
match = tkinter.Frame(historyFrameArray[playerNumber], height = 85, width = 680, background = "black")
match.grid(pady = (6, 0), columnspan = 5)
match.grid_rowconfigure((0,1) , weight = 1)
match.grid_columnconfigure((0,1,2,3) , weight = 1)
match.grid_propagate(False)
matchArray.append(match)
playerMatchArray[playerNumber] = matchArray
#endregion
#region Classes
championCircleFrame = ImageTk.PhotoImage(Image.open("circlebig.png").resize((75, 75)))
levelCircleFrame = ImageTk.PhotoImage(Image.open("circlesma.png").resize((23, 23)))
minionIcon = ImageTk.PhotoImage(Image.open("minion.png").resize((11, 13)))
goldIcon = ImageTk.PhotoImage(Image.open("gold.png").resize((15, 12)))
itemList1 = [[],[],[],[],[],[],[],[],[],[]]
itemList2 = [[],[],[],[],[],[],[],[],[],[]]
itemList3 = [[],[],[],[],[],[],[],[],[],[]]
itemList4 = [[],[],[],[],[],[],[],[],[],[]]
itemList5 = [[],[],[],[],[],[],[],[],[],[]]
spellList1 = [[],[],[],[],[],[],[],[],[],[]]
spellList2 = [[],[],[],[],[],[],[],[],[],[]]
spellList3 = [[],[],[],[],[],[],[],[],[],[]]
spellList4 = [[],[],[],[],[],[],[],[],[],[]]
spellList5 = [[],[],[],[],[],[],[],[],[],[]]
championList = [[],[],[],[],[],[],[],[],[],[]]
summaryChampionIconArray = [[],[],[],[],[]]
profileSummaryArray = [0, 0, 0, 0, 0]
# ----------- Get Data -----------
class SummaryStats:
def __init__(self, matchesWon = None, matchesLost = None, averageKill = None, averageDeath = None, averageAssist = None, championDictOrder = None):
self.matchesWon = matchesWon
self.matchesLost = matchesLost
self.averageKill = averageKill
self.averageDeath = averageDeath
self.averageAssist = averageAssist
self.championDictOrder = championDictOrder
def GetSummaryWins(self, matchRawDataArray, playerPuuid): #Recent win/lose/winrate
self.matchesWon = 0
self.matchesLost = 0
for i in range(10):
if len(matchRawDataArray) >= i + 1:
participants = matchRawDataArray[i]["metadata"]["participants"]
if matchRawDataArray[i]["info"]["participants"][participants.index(playerPuuid)]["win"]:
self.matchesWon += 1
else:
self.matchesLost += 1
def GetSummaryKda(self, matchRawDataArray, playerPuuid): #Player kda
self.averageKill = 0
self.averageDeath = 0
self.averageAssist = 0
for i in range(10):
if len(matchRawDataArray) >= i + 1:
participants = matchRawDataArray[i]["metadata"]["participants"]
self.averageKill += matchRawDataArray[i]["info"]["participants"][participants.index(playerPuuid)]["kills"]
self.averageDeath += matchRawDataArray[i]["info"]["participants"][participants.index(playerPuuid)]["deaths"]
self.averageAssist += matchRawDataArray[i]["info"]["participants"][participants.index(playerPuuid)]["assists"]
def GetSummaryChampions(self, matchRawDataArray, playerPuuid, player):
championDict = {}
participantsArray = []
championPlayedArray = []
# ----------- Recent Champion Names -----------
for i in range(10):
if len(matchRawDataArray) >= i + 1:
participants = matchRawDataArray[i]["metadata"]["participants"]
participantsArray.append(participants)
championPlayedArray.append(matchRawDataArray[i]["info"]["participants"][participants.index(playerPuuid)]["championName"])
# ----------- Match Result -----------
championIndex = 0
for i in championPlayedArray:
if i in championDict:
if matchRawDataArray[championIndex]["info"]["participants"][participantsArray[championIndex].index(playerPuuid)]["win"]:
championDict[i][1] += 1
else:
championDict[i][2] += 1
else:
if matchRawDataArray[championIndex]["info"]["participants"][participantsArray[championIndex].index(playerPuuid)]["win"]:
championDict[i] = [[0, 0, 0], 1, 0]
else:
championDict[i] = [[0, 0, 0], 0, 1]
championIndex += 1
# ----------- Recent Champion Names -----------
for i in range(10):
if len(matchRawDataArray) >= i + 1:
championName = matchRawDataArray[i]["info"]["participants"][participantsArray[i].index(playerPuuid)]["championName"]
championDict[championName][0][0] += matchRawDataArray[i]["info"]["participants"][participantsArray[i].index(playerPuuid)]["kills"]
championDict[championName][0][1] += matchRawDataArray[i]["info"]["participants"][participantsArray[i].index(playerPuuid)]["deaths"]
championDict[championName][0][2] += matchRawDataArray[i]["info"]["participants"][participantsArray[i].index(playerPuuid)]["assists"]
# ----------- Sort Dictionary -----------
self.championDictOrder = [[key, value] for (key, value) in championDict.items()]
for i in range(len(championDict)):
aux = 0
for j in range(len(championDict) - 1):
if (self.championDictOrder[j][1][1] + self.championDictOrder[j][1][2]) < (self.championDictOrder[j + 1][1][1] + self.championDictOrder[j + 1][1][2]):
aux = self.championDictOrder[j + 1]
self.championDictOrder[j + 1] = self.championDictOrder[j]
self.championDictOrder[j] = aux
# ----------- Champion Icon -----------
for i in range(3):
if len(self.championDictOrder) >= i + 1:
try:
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.championDictOrder[i][0]}.png").resize((32,32)))
except:
response = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{currentPatch}/img/champion/{self.championDictOrder[i][0]}.png")
if response.status_code == 200:
open(f"datadragon/{self.championDictOrder[i][0]}.png", 'wb').write(response.content)
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.championDictOrder[i][0]}.png").resize((32, 32)))
summaryChampionIconArray[player].append(image)
class PlayerStats:
def __init__(self, playerPuuid = None, encryptedSummonerId = None, playerRank = None):
self.playerPuuid = playerPuuid #"puuid" - summoner api
self.encryptedSummonerId = encryptedSummonerId #"id" - summoner api
self.playerRank = playerRank #"tier + rank" - leagueV4 api
def PlayerDataRequest(self, name):
if regionMethods.sessionRegionLang == "BR":
playerJsonData = sessionSummoner.get(f"https://br1.api.riotgames.com/lol/summoner/v4/summoners/by-name/{name}?api_key={key}")
playerRawData = ujson.loads(playerJsonData.text)
else:
playerJsonData = sessionSummoner.get(f"https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/{name}?api_key={key}")
playerRawData = ujson.loads(playerJsonData.text)
try:
self.playerPuuid = playerRawData["puuid"]
self.encryptedSummonerId = playerRawData["id"]
print(self.playerPuuid)
except:
return 0
else:
return 1
def PlayerRankRequest(self):
if regionMethods.sessionRegionLang == "BR":
playerRankJsonData = sessionRank.get(f"https://br1.api.riotgames.com/lol/league/v4/entries/by-summoner/{self.encryptedSummonerId}?api_key={key}")
playerRankRawData = ujson.loads(playerRankJsonData.text)
else:
playerRankJsonData = sessionRank.get(f"https://na1.api.riotgames.com/lol/league/v4/entries/by-summoner/{self.encryptedSummonerId}?api_key={key}")
playerRankRawData = ujson.loads(playerRankJsonData.text)
try:
self.playerRank = playerRankRawData[0]["tier"] + " " + playerRankRawData[0]["rank"]
if playerRankRawData[0]["tier"] == "MASTER" or "GRANDMASTER" or "CHALLANGER":
self.playerRank = playerRankRawData[0]["tier"]
except:
self.playerRank = "Unranked"
class MatchStatsChampion:
def __init__(self, championId = None, championLevel = None):
self.championId = championId #"championId" - match api
self.championLevel = championLevel #"champLevel" - #match api
def MatchStatsChampionRequest(self, matchRawData, playerKey, player):
participants = matchRawData["metadata"]["participants"]
self.championId = matchRawData["info"]["participants"][participants.index(playerKey)]["championName"]
self.championLevel = matchRawData["info"]["participants"][participants.index(playerKey)]["champLevel"]
try:
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.championId}.png").resize((60,60)))
except:
response = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{currentPatch}/img/champion/{self.championId}.png")
if response.status_code == 200:
open(f"datadragon/{self.championId}.png", 'wb').write(response.content)
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.championId}.png").resize((60,60)))
championList[player].append(image)
class MatchStatsSpells:
def __init__(self, spellArrayIds = None, spellSpriteName = None):
self.spellArrayIds = spellArrayIds #["Summoner1Id", "Summoner2Id"] - #match api
self.spellSpriteName = spellSpriteName #[spells[0], spells[1]] - #key in http://ddragon.leagueoflegends.com/cdn/11.19.1/data/en_US/summoner.json
def MatchStatsSpellsRequest(self, matchRawData, playerKey):
participants = matchRawData["metadata"]["participants"]
self.spellArrayIds = [0, 0]
self.spellSpriteName = [0, 0]
self.spellArrayIds[0] = matchRawData["info"]["participants"][participants.index(playerKey)]["summoner1Id"]
self.spellArrayIds[1] = matchRawData["info"]["participants"][participants.index(playerKey)]["summoner2Id"]
for spellDict in summonerSpellRawData.values():
if spellDict["key"] == f"{self.spellArrayIds[0]}":
self.spellSpriteName[0] = (spellDict["id"])
elif spellDict["key"] == f"{self.spellArrayIds[1]}":
self.spellSpriteName[1] = (spellDict["id"])
def GetSpellSprites(self, player, preview):
for i in range(2):
if self.spellSpriteName[i] != 0:
try:
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.spellSpriteName[i]}.png").resize((18, 18)))
except:
response = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{currentPatch}/img/spell/{self.spellSpriteName[i]}.png")
if response.status_code == 200:
open(f"datadragon/{self.spellSpriteName[i]}.png", 'wb').write(response.content)
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.spellSpriteName[i]}.png").resize((18, 18)))
if player == 0:
spellList1[preview].append(image)
elif player == 1:
spellList2[preview].append(image)
elif player == 2:
spellList3[preview].append(image)
elif player == 3:
spellList4[preview].append(image)
elif player == 4:
spellList5[preview].append(image)
if player == 0:
return spellList1
elif player == 1:
return spellList2
elif player == 2:
return spellList3
elif player == 3:
return spellList4
elif player == 4:
return spellList5
class MatchStatsItems:
def __init__(self, itemArray = None):
self.itemArray = itemArray #["num", "num", "num", "num", "num", "num", "num"] - #match api
def MatchStatsItemsRequests(self, matchRawData, playerKey):
participants = matchRawData["metadata"]["participants"]
self.itemArray = []
for i in range(7):
self.itemArray.append(matchRawData["info"]["participants"][participants.index(playerKey)][f"item{i}"])
def GetItemSprites(self, player, preview):
for i in range(7):
if self.itemArray[i] != 0:
try:
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.itemArray[i]}.png").resize((32, 32)))
except:
response = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{currentPatch}/img/item/{self.itemArray[i]}.png")
if response.status_code == 200:
open(f"datadragon/{self.itemArray[i]}.png", 'wb').write(response.content)
image = ImageTk.PhotoImage(Image.open(f"datadragon/{self.itemArray[i]}.png").resize((32, 32)))
if player == 0:
itemList1[preview].append(image)
elif player == 1:
itemList2[preview].append(image)
elif player == 2:
itemList3[preview].append(image)
elif player == 3:
itemList4[preview].append(image)
elif player == 4:
itemList5[preview].append(image)
if player == 0:
return itemList1
elif player == 1:
return itemList2
elif player == 2:
return itemList3
elif player == 3:
return itemList4
elif player == 4:
return itemList5
class MatchStatsPlayer:
def __init__(self, playerKills = None, playerDeaths = None, playerAssists = None, playerMinions = None, playerGold = None):
self.playerKills = playerKills #"kill" - match api
self.playerDeaths = playerDeaths #"death" - match api
self.playerAssists = playerAssists #"assist" - match api
self.playerMinions = playerMinions #"totalMinionsKilled" - match api
self.playerGold = playerGold #"goldEarned" - match api
def MatchStatsPlayerRequest(self, matchRawData, playerKey):
participants = matchRawData["metadata"]["participants"]
self.playerKills = matchRawData["info"]["participants"][participants.index(playerKey)]["kills"]
self.playerDeaths = matchRawData["info"]["participants"][participants.index(playerKey)]["deaths"]
self.playerAssists = matchRawData["info"]["participants"][participants.index(playerKey)]["assists"]
self.playerMinions = matchRawData["info"]["participants"][participants.index(playerKey)]["totalMinionsKilled"]
self.playerGold = matchRawData["info"]["participants"][participants.index(playerKey)]["goldEarned"]
self.playerGold = '{:,}'.format(self.playerGold).replace(",", ".")
def ScoreConstructor(self):
return f"{self.playerKills} / {self.playerDeaths} / {self.playerAssists}"
class MatchStatsGame:
def __init__(self, mapId = None, mapName = None, gameMode = None, gameCreation = None, gameDuration = None, matchResult = None):
self.mapId = mapId #"mapId" - match api
self.mapName = mapName #mapId > mapName https://static.developer.riotgames.com/docs/lol/maps.json
self.gameMode = gameMode #"gameMode" - match api
self.gameCreation = gameCreation #"gameCreation" - match api - unix to date
self.gameDuration = gameDuration #"gameDuration" - match api - milisegundos
self.matchResult = matchResult #"win" - match api
def MatchModeRequest(self, matchRawData):
self.mapId = matchRawData["info"]["mapId"]
self.mapName = [mapValues["mapName"] for mapValues in mapListRawData if mapValues["mapId"] == self.mapId]
self.mapName = self.mapName[0]
self.gameMode = matchRawData["info"]["gameMode"]
def MatchTimeRequest(self, matchRawData):
gameCreationTimestamp = matchRawData["info"]["gameCreation"]
gameCreationDatetime = datetime.datetime.fromtimestamp(gameCreationTimestamp/1000)
if regionMethods.sessionRegionLang == "BR":
self.gameCreation = gameCreationDatetime.strftime('%d / %m / %Y')
else:
self.gameCreation = gameCreationDatetime.strftime('%m / %d / %Y')
if "gameEndTimestamp" in matchRawData["info"]:
datatimeRaw = str(datetime.timedelta(seconds = matchRawData["info"]["gameDuration"]))
if datatimeRaw[0] == "0":
self.gameDuration = datatimeRaw[2:]
else:
self.gameDuration = datatimeRaw
else:
datatimeRaw = str(datetime.timedelta(seconds = (matchRawData["info"]["gameDuration"] // 1000)))
if datatimeRaw[0] == "0":
self.gameDuration = datatimeRaw[2:]
else:
self.gameDuration = datatimeRaw
def GetMatchResult(self, matchRawData, playerKey):
participants = matchRawData["metadata"]["participants"]
self.matchResult = regionMethods.matchResultLang[0] if matchRawData["info"]["participants"][participants.index(playerKey)]["win"] else regionMethods.matchResultLang[1]
# ----------- Create Assets -----------
class ProfileSummary:
def CreateProfileFrame(playerNumber):
profileSummaryFrame = tkinter.Frame(historyFrameArray[playerNumber], height = 60, width = 680, background = "black")
profileSummaryFrame.grid(columnspan = 5)
profileSummaryFrame.grid_propagate(False)
profileSummaryFrame.grid_rowconfigure(0, weight = 1)
profileSummaryFrame.grid_columnconfigure((0, 1), weight = 1)
profileSummaryArray[playerNumber] = profileSummaryFrame
def CreateNameRank(profileSummaryArray, name, rank):
nameRankFrame = tkinter.Frame(profileSummaryArray, height = 38, width = 135, background = "black")
nameRankFrame.grid(row = 0, column = 0, sticky = "w")
nameRankFrame.grid_propagate(False)
nameRankFrame.grid_rowconfigure((0, 1), weight = 1)
nameRankFrame.grid_columnconfigure(0, weight = 1)
nameLabel = tkinter.Label(nameRankFrame, text = name, font = ("", 10, "bold"), background = "black", foreground = "white", borderwidth = 0, highlightthickness = 0)
nameLabel.grid(row = 0, column = 0, sticky = "swe", pady = (0, 0))
rankLabel = tkinter.Label(nameRankFrame, text = rank, font = ("", 10, "bold"), background = "black", foreground = "white", borderwidth = 0, highlightthickness = 0)
rankLabel.grid(row = 1, column = 0, sticky = "nwe", pady = (0, 0))
frameLine = tkinter.Frame(nameRankFrame, height = 1, width = 120, background = "#775829")
frameLine.grid(row = 2, column = 0, pady = (5, 0))
def CreateRecentMatches(profileSummaryArray, recentWinValue, recentLossValue, averageKill, averageDeath, averageAssist):
# ----------- Recent Matches Stats -----------
recentMatchesStats = tkinter.Frame(profileSummaryArray, height = 110, width = 152, background = "black")
recentMatchesStats.grid(row = 0, column = 1, sticky = "w", pady = (7, 0))
recentMatchesStats.grid_propagate(False)
recentMatchesStats.grid_rowconfigure((0, 1), weight = 1)
recentMatchesStats.grid_columnconfigure((0, 1), weight = 1)
# ----------- Player Performance (Recent Matches Stats) -----------
recentPerformance = tkinter.Frame(recentMatchesStats, height = 30, width = 150)
recentPerformance.grid(row = 0, column = 0)
recentPerformance.grid_propagate(False)
recentPerformance.grid_rowconfigure((0, 1), weight = 1)
recentPerformance.grid_columnconfigure((0), weight = 1)
winrate = f"{recentWinValue} / {recentLossValue}"
kda = f"{averageKill / 10} / {averageDeath / 10} / {averageAssist / 10}"
recentWinrateLabel = tkinter.Label(recentPerformance, text = winrate, font = ("", 11, "bold"), background = "black", foreground = "white")
recentWinrateLabel.grid(row = 0, column = 0, sticky = "we")
averageKdaLabel = tkinter.Label(recentPerformance, text = kda, font = ("", 8, "bold"), background = "black", foreground = "white")
averageKdaLabel.grid(row = 1, column = 0, sticky = "we")
# ----------- Winrate Stats (Recent Matches Stats) -----------
winrateGraph = tkinter.Frame(recentMatchesStats, height = 22, width = 150, background = "black", highlightthickness = 0, borderwidth = 0)
winrateGraph.grid(row = 1, column = 0, pady = (0, 4))
winrateGraph.grid_propagate(False)
winrateGraph.grid_columnconfigure((0, 1, 2), weight = 1)
winrateGraph.grid_rowconfigure(0, weight = 1)
recentWinsLabel = tkinter.Label(winrateGraph, text = f"{recentWinValue} V", font = ("", 10, "bold"), background = "black", foreground = "deep sky blue", borderwidth = 0,
highlightthickness = 0)
recentWinsLabel.grid(row = 0, column = 0, sticky = "e")
kdaBar = tkinter.Frame(winrateGraph, height = 15, width = 80, highlightthickness = 0, borderwidth = 0)
kdaBar.grid(row = 0, column = 1)
recentLossesLabel = tkinter.Label(winrateGraph, text = f"{recentLossValue} D", font = ("", 10, "bold"), background = "black", foreground = "red", borderwidth = 0,
highlightthickness = 0)
recentLossesLabel.grid(row = 0, column = 2, sticky = "w")
for i in range(recentWinValue):
filledColor = tkinter.Canvas(kdaBar, height = 15, width = 8, background = "deep sky blue", highlightthickness = 0, borderwidth = 0)
filledColor.grid(row = 0, column = i)
for i in range(recentLossValue):
filledColor = tkinter.Canvas(kdaBar, height = 15, width = 8, background = "red", highlightthickness = 0, borderwidth = 0)
filledColor.grid(row = 0, column = recentWinValue + i)
# ----------- Vertical Line (Recent Matches Stats) -----------
frameLine = tkinter.Frame(recentMatchesStats, height = 110, width = 1, background = "#775829")
frameLine.grid(row = 0,rowspan = 2, column = 1,sticky = "ns")
def CreateRecentChampion(profileSummaryArray, championDict, championIconArray):
recentChampionsFrame = tkinter.Frame(profileSummaryArray, height = 34, width = 381, background = "black")
recentChampionsFrame.grid(row = 0, column = 2)
recentChampionsFrame.grid_propagate(False)
recentChampionsFrame.grid_columnconfigure((0, 1, 2), weight = 1)
recentChampionsFrame.grid_rowconfigure(0, weight = 1)
for i in range(3):
if len(championDict) >= i + 1:
# ----------- Champion Data -----------
championWinrate = f"{championDict[i][1][1]} / {championDict[i][1][2]}"
championWinrate = championWinrate + " (" + str("{:.0f}".format((championDict[i][1][1] / (championDict[i][1][1] + championDict[i][1][2])) * 100)) + "%)"
championAverageKill = "{:.1f}".format(championDict[i][1][0][0] / (championDict[i][1][1] + championDict[i][1][2]))
championAverageDeath = "{:.1f}".format(championDict[i][1][0][1] / (championDict[i][1][1] + championDict[i][1][2]))
championAverageAssist = "{:.1f}".format(championDict[i][1][0][2] / (championDict[i][1][1] + championDict[i][1][2]))
championKda = f"{championAverageKill} / {championAverageDeath} / {championAverageAssist}"
# ----------- Recent Played Champion -----------
mostPlayedChampion = tkinter.Frame(recentChampionsFrame, height = 34, width = 127, background = "black")
mostPlayedChampion.grid(row = 0, column = i)
mostPlayedChampion.grid_propagate(False)
mostPlayedChampion.grid_columnconfigure((0, 1), weight = 1)
mostPlayedChampion.grid_rowconfigure(0, weight = 1)
# ----------- Champion Icon (Recent Played Champion) -----------
championBorder = tkinter.Frame(mostPlayedChampion, height = 34, width = 34, background = "#775829", borderwidth = 0, highlightthickness = 0)
championBorder.grid(row = 0, column = 0, sticky = "w")
championIcon = tkinter.Canvas(championBorder, height = 32, width = 32, background = "black", borderwidth = 0, highlightthickness = 0)
championIcon.grid(row = 0, column = 0, padx = 1, pady = 1)
championIcon.create_image((16, 16), image = championIconArray[i])
# ----------- Champion Stats Label (Recent Played Champion) -----------
championStats = tkinter.Frame(mostPlayedChampion, height = 34, width = 84, background = "black", borderwidth = 0)
championStats.grid(row = 0, column = 1, padx = (0, 6), sticky = "w")
championStats.grid_propagate(False)
championStats.grid_columnconfigure(0, weight = 1)
championStats.grid_rowconfigure((0, 1), weight = 1)
championWinrateLabel = tkinter.Label(championStats, text = championWinrate, font = ("Arial Narrow", 10, "bold"), background = "black", foreground = "white")
championWinrateLabel.grid(row = 0, column = 0, sticky = "w" )
championKdaLabel = tkinter.Label(championStats, text = championKda, font = ("Arial Narrow", 10, "bold"), background = "black", foreground = "white")
championKdaLabel.grid(row = 1, column = 0, sticky = "w")
class MatchPreview:
def ChampionCircle(frameNumber, championImage, playerLevel):
circle = tkinter.Canvas(frameNumber, height = 85, width = 85, background = "black", highlightthickness = 0)
circle.grid(row = 0, column = 0)
circle.create_image((42, 42), image = championImage)
circle.create_image((42, 42), image = championCircleFrame)
circle.create_image((65, 62), image = levelCircleFrame)
circle.create_text((65, 63), text = playerLevel, fill = "#918c83", font = ("", 8, "bold"))
def GamemodeResult(frameNumber, matchResult, gameMode, spellArray, preview):
gamemodeResultFrame = tkinter.Frame(frameNumber, height = 63, width = 110, background = "black")
gamemodeResultFrame.grid(row = 0, column = 1, pady = (14, 0), sticky= "nwe")
gamemodeResultFrame.grid_rowconfigure((0 , 1, 2), weight = 1)
gamemodeResultFrame.grid_propagate(False)
# ----------- Match Result -----------
matchResultLabel = tkinter.Label(gamemodeResultFrame, text = matchResult, background = "black",
foreground = "red" if matchResult == regionMethods.matchResultLang[1] else "deep sky blue", borderwidth = 0, font = ("", 10, "bold")) #text = matchResult/gameMode
matchResultLabel.grid(row = 0, column = 0, sticky = "nw")
# ----------- Gamemode -----------
matchGamemodeLabel = tkinter.Label(gamemodeResultFrame, text = gameMode, background = "black", foreground = "#918c83", borderwidth = 0,
font = ("", 9, "bold")) #text = matchResult/gameMode
matchGamemodeLabel.grid(row = 1, column = 0, sticky= "nw", pady = (0,3))
# ----------- Spell Sprites -----------
spellFrame = tkinter.Frame(gamemodeResultFrame, height = 18, width = 36, background = "#775829", borderwidth = 0)
spellFrame.grid(row = 2, column = 0, sticky = "nw", pady = (0, 3))
for i in range(2):
if len(spellArray[preview]) >= i + 1:
if i == 1:
spellSprite = tkinter.Canvas(spellFrame, height = 18, width = 18 , highlightthickness = 0, borderwidth = 0)
spellSprite.grid(row = 0, column = i, padx = 1, pady = 1)
else:
spellSprite = tkinter.Canvas(spellFrame, height = 18, width = 18 , highlightthickness = 0, borderwidth = 0)
spellSprite.grid(row = 0, column = i, padx = (1,0), pady = 1)
spellSprite.create_image((9, 9), image = spellArray[preview][i])
def PlayerResult(frameNumber, gold, totalMinion, score, itemArray, preview):
playerResultFrame = tkinter.Frame(frameNumber, height = 64, width = 192, background = "black", borderwidth = 0)
playerResultFrame.grid(row = 0, column = 2, pady = (16, 0), padx = (20, 20), sticky = "n")
# ----------- Items -----------
itemFrame = tkinter.Frame(playerResultFrame, height = 32, width = 192, background = "#775829", borderwidth = 0)
itemFrame.grid(row = 0, column = 0)
for i in range(7):
if i == 6:
itemSprite = tkinter.Canvas(itemFrame, height = 32, width = 32 , background = "black", highlightthickness = 0, borderwidth = 0)
itemSprite.grid(row = 0, column = i, padx = 1, pady = 1)
else:
itemSprite = tkinter.Canvas(itemFrame, height = 32, width = 32 , background = "black", highlightthickness = 0, borderwidth = 0)
itemSprite.grid(row = 0, column = i, padx = (1,0), pady = 1)
if i < len(itemArray[preview]):
itemSprite.create_image((16,16), image = itemArray[preview][i])
# ----------- Score -----------
scoreFrame = tkinter.Frame(playerResultFrame, height = 11, width = 192, background = "black", borderwidth = 0)
scoreFrame.grid(row = 1, column = 0, pady = (9, 0), sticky = "swe")
scoreFrame.grid_columnconfigure((0, 1, 2), weight = 1)
kdaLabel = tkinter.Label(scoreFrame, text = score, background = "black", foreground = "#918c83", font = ("Heuristica", 11,"bold"), borderwidth = 0)
kdaLabel.grid(row = 0, column = 0, sticky = "w")
# ----------- Minions -----------
minionFrame = tkinter.Frame(scoreFrame, background = "black")
minionFrame.grid(row = 0, column = 1)
minionLabel = tkinter.Label(minionFrame, text = totalMinion, background = "black", foreground = "#918c83", font = ("", 11,"bold"), borderwidth = 0)
minionLabel.grid(row = 0, column = 0, padx = (0, 2))
minionCanvas = tkinter.Canvas(minionFrame, background = "black", highlightthickness = 0, height = 16, width = 16)
minionCanvas.grid(row = 0, column = 1)
minionCanvas.create_image((8, 7), image = minionIcon)
# ----------- Gold -----------
goldFrame = tkinter.Frame(scoreFrame, background = "black")
goldFrame.grid(row = 0, column = 2, sticky = "e")
goldLabel = tkinter.Label(goldFrame, text = gold, background = "black", foreground = "#918c83",font = ("", 11,"bold"), borderwidth = 0)
goldLabel.grid(row = 0, column = 0, padx = (0, 4))
goldCanvas = tkinter.Canvas(goldFrame, background = "black", highlightthickness = 0, height = 17, width = 17)
goldCanvas.grid(row = 0, column = 1)
goldCanvas.create_image((8, 8), image = goldIcon)
def TimeData(frameNumber, mapName, gameDuration, gameCreation):
dataFrame = tkinter.Frame(frameNumber, height = 85, width = 100, background = "black", borderwidth = 0)
dataFrame.grid(row = 0, column = 3, pady = 5, sticky = "nswe")
dataFrame.grid_rowconfigure((0, 1), weight=1)
dataFrame.grid_columnconfigure((0), weight=1)
dataFrame.grid_propagate(False)
mapLabel = tkinter.Label(dataFrame, text = mapName, background = "black", font = ("", 9, "bold"), foreground = "#918c83")
mapLabel.grid(row = 0, column = 0, sticky = "w")
dateTimeLabel = tkinter.Label(dataFrame, text = f"{gameDuration} · {gameCreation}", font = ("", 9, "bold"), background = "black", foreground = "#918c83")
dateTimeLabel.grid(row = 1, column = 0, pady = (0, 20), sticky = "w")
def PreviewLine(frameNumber):
line = tkinter.Frame(frameNumber, height = 1, width = 800, background = "#7d6f4b", borderwidth = 0)
line.grid(row = 0, columnspan = 6, sticky = "swe")
#endregion
#region Match Data
matchDataArray = [[], [], [], [], []]
def MatchDataRequest(match):
matchJsonData = sessionMatch.get(match)
matchRawData = ujson.loads(matchJsonData.text)
return matchRawData
def MatchListDataRequest(playerPuuid, player):
matchListJsonData = sessionMatchList.get(f"https://americas.api.riotgames.com/lol/match/v5/matches/by-puuid/{playerPuuid}/ids?start=0&count=10&api_key={key}")
matchListRawData = ujson.loads(matchListJsonData.text)
multithreadMatchList = []
for i in range(10):
if len(matchListRawData) >= i + 1:
multithreadMatchList.append(f"https://americas.api.riotgames.com/lol/match/v5/matches/{matchListRawData[i]}?api_key={key}")
if len(matchListRawData) == 0:
return 0
with futures.ThreadPoolExecutor(max_workers = 10) as executor:
for request in executor.map(MatchDataRequest, multithreadMatchList):
matchDataArray[player].append(request)
def ChangeFrame(player):
if player == "player1":
scrollableMainFrameArray[0].tkraise()
elif player == "player2":
scrollableMainFrameArray[1].tkraise()
elif player == "player3":
scrollableMainFrameArray[2].tkraise()
elif player == "player4":
scrollableMainFrameArray[3].tkraise()
elif player == "player5":
scrollableMainFrameArray[4].tkraise()
#endregion
#region Instantiation
playerSummaryStats1 = SummaryStats()
playerStats1 = PlayerStats()
matchStatsChampion1 = MatchStatsChampion()
matchStatsSpells1 = MatchStatsSpells()
matchStatsItems1 = MatchStatsItems()
matchStatsPlayer1 = MatchStatsPlayer()
matchStatsGame1 = MatchStatsGame()
playerSummaryStats2 = SummaryStats()
playerStats2 = PlayerStats()
matchStatsChampion2 = MatchStatsChampion()
matchStatsSpells2 = MatchStatsSpells()
matchStatsItems2 = MatchStatsItems()
matchStatsPlayer2 = MatchStatsPlayer()
matchStatsGame2 = MatchStatsGame()
playerSummaryStats3 = SummaryStats()
playerStats3 = PlayerStats()
matchStatsChampion3 = MatchStatsChampion()
matchStatsSpells3 = MatchStatsSpells()
matchStatsItems3 = MatchStatsItems()
matchStatsPlayer3 = MatchStatsPlayer()
matchStatsGame3 = MatchStatsGame()
playerSummaryStats4 = SummaryStats()
playerStats4 = PlayerStats()
matchStatsChampion4 = MatchStatsChampion()
matchStatsSpells4 = MatchStatsSpells()
matchStatsItems4 = MatchStatsItems()
matchStatsPlayer4 = MatchStatsPlayer()
matchStatsGame4 = MatchStatsGame()
playerSummaryStats5 = SummaryStats()
playerStats5 = PlayerStats()
matchStatsChampion5 = MatchStatsChampion()
matchStatsSpells5 = MatchStatsSpells()
matchStatsItems5 = MatchStatsItems()
matchStatsPlayer5 = MatchStatsPlayer()
matchStatsGame5 = MatchStatsGame()
#endregion
playerSummaryStatsArray = [playerSummaryStats1, playerSummaryStats2, playerSummaryStats3, playerSummaryStats4, playerSummaryStats5]
playerStatsArray = [playerStats1, playerStats2, playerStats3, playerStats4, playerStats5]
statsChampionArray = [matchStatsChampion1, matchStatsChampion2, matchStatsChampion3, matchStatsChampion4, matchStatsChampion5]
statsSpellsArray = [matchStatsSpells1, matchStatsSpells2, matchStatsSpells3, matchStatsSpells4, matchStatsSpells5]
statsItemsArray = [matchStatsItems1, matchStatsItems2, matchStatsItems3, matchStatsItems4, matchStatsItems5]
matchStatsPlayerArray = [matchStatsPlayer1, matchStatsPlayer2, matchStatsPlayer3, matchStatsPlayer4, matchStatsPlayer5]
statsGameArray = [matchStatsGame1, matchStatsGame2, matchStatsGame3, matchStatsGame4, matchStatsGame5]
def AssignHistoryButton(player):
if player == 0:
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeFrame("player1"))
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeEntry(event, 0), add = "+")
if player == 1:
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeFrame("player2"))
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeEntry(event, 1), add = "+")
if player == 2:
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeFrame("player3"))
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeEntry(event, 2), add = "+")
if player == 3:
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeFrame("player4"))
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeEntry(event, 3), add = "+")
if player == 4:
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeFrame("player5"))
playerHistoryButtonArray[player].bind("<Button-1>", lambda event: ChangeEntry(event, 4), add = "+")
def DestroyOldApp():
for i in range(5):
summaryChampionIconArray[i].clear()
if playerHistoryButtonArray[i] != 0:
scrollableMainFrameArray[i].destroy()
matchDataArray[i].clear()
profileSummaryArray[i].destroy()
for i in range(10):
itemList1[i].clear()
itemList2[i].clear()
itemList3[i].clear()
itemList4[i].clear()
itemList5[i].clear()
spellList1[i].clear()
spellList2[i].clear()
spellList3[i].clear()
spellList4[i].clear()
spellList5[i].clear()
championList[i].clear()
def AppBuilder(event):
lastColor = []
DestroyOldApp()
# ----------- UI Creation -----------
CreateButtonBG()
for i in range(5):
if playerArray[i].get() != "" and " ":
if playerStatsArray[i].PlayerDataRequest(playerArray[i].get()) == 0:
pass
elif MatchListDataRequest(playerStatsArray[i].playerPuuid, i) == 0:
pass
else:
CreateHistoryButton(i)
CreateHistoryFrame(i)
CreateMatchPreview(i)
AssignHistoryButton(i)
playerStatsArray[i].PlayerRankRequest()
playerSummaryStatsArray[i].GetSummaryWins(matchDataArray[i], playerStatsArray[i].playerPuuid)
playerSummaryStatsArray[i].GetSummaryKda(matchDataArray[i], playerStatsArray[i].playerPuuid)
playerSummaryStatsArray[i].GetSummaryChampions(matchDataArray[i], playerStatsArray[i].playerPuuid, i)
ProfileSummary.CreateNameRank(profileSummaryArray[i], playerArray[i].get(), playerStatsArray[i].playerRank)
ProfileSummary.CreateRecentMatches(profileSummaryArray[i], playerSummaryStatsArray[i].matchesWon, playerSummaryStatsArray[i].matchesLost,
playerSummaryStatsArray[i].averageKill, playerSummaryStatsArray[i].averageDeath, playerSummaryStatsArray[i].averageAssist)
ProfileSummary.CreateRecentChampion(profileSummaryArray[i], playerSummaryStatsArray[i].championDictOrder, summaryChampionIconArray[i])
lastColor.append(playerHistoryButtonArray[i])
elif i == 4:
lastColor[len(lastColor) - 1].configure(background = "#042937")
for player in range(5):
if playerHistoryButtonArray[player] != 0:
for preview in range(10):
# ----------- Data Requests -----------
statsChampionArray[player].MatchStatsChampionRequest(matchDataArray[player][preview], playerStatsArray[player].playerPuuid, player)
statsSpellsArray[player].MatchStatsSpellsRequest(matchDataArray[player][preview], playerStatsArray[player].playerPuuid)
statsItemsArray[player].MatchStatsItemsRequests(matchDataArray[player][preview], playerStatsArray[player].playerPuuid)
matchStatsPlayerArray[player].MatchStatsPlayerRequest(matchDataArray[player][preview], playerStatsArray[player].playerPuuid)
statsGameArray[player].GetMatchResult(matchDataArray[player][preview], playerStatsArray[player].playerPuuid)
statsGameArray[player].MatchModeRequest(matchDataArray[player][preview])
statsGameArray[player].MatchTimeRequest(matchDataArray[player][preview])
# ----------- UI Elements -----------
MatchPreview.ChampionCircle(playerMatchArray[player][preview], championList[player][preview], statsChampionArray[player].championLevel)
MatchPreview.GamemodeResult(playerMatchArray[player][preview], statsGameArray[player].matchResult, statsGameArray[player].gameMode,
statsSpellsArray[player].GetSpellSprites(player, preview), preview)
MatchPreview.PlayerResult(playerMatchArray[player][preview], matchStatsPlayerArray[player].playerGold, matchStatsPlayerArray[player].playerMinions,
matchStatsPlayerArray[player].ScoreConstructor(), statsItemsArray[player].GetItemSprites(player, preview), preview)
MatchPreview.TimeData(playerMatchArray[player][preview], statsGameArray[player].mapName, statsGameArray[player].gameDuration, statsGameArray[player].gameCreation)
MatchPreview.PreviewLine(playerMatchArray[player][preview])
def ChangeSearch(event):
if str(event.type) == "ButtonPress":
searchButton.config(background = "#07141f")
elif str(event.type) == "ButtonRelease":
searchButton.config(background = "black")
def ChangeEntry(event, player):
for i in range(5):
if i == player:
playerHistoryButtonArray[i].configure(background = "#042937")
elif playerHistoryButtonArray[i] != 0:
playerHistoryButtonArray[i].configure(background = "black")
searchButton.bind("<Button-1>", ChangeSearch)
searchButton.bind("<Button-1>", AppBuilder, add = "+")
searchButton.bind("<ButtonRelease>", ChangeSearch)
player1.bind("<Return>", AppBuilder)
player2.bind("<Return>", AppBuilder)
player3.bind("<Return>", AppBuilder)
player4.bind("<Return>", AppBuilder)
player5.bind("<Return>", AppBuilder)
root.mainloop()
#pyinstaller --onefile --noconsole MainFile.py
| WandersonKnight/League-Quick-Data | MainFile.py | MainFile.py | py | 54,615 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.Session",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.Session",... |
12424083867 | __author__ = "Vanessa Sochat, Alec Scott"
__copyright__ = "Copyright 2021-2022, Vanessa Sochat and Alec Scott"
__license__ = "Apache-2.0"
from .command import Command
import json
# Every command must:
# 1. subclass Command
# 2. defined what container techs supported for (class attribute) defaults to all
# 3. define run function with kwargs
class Size(Command):
supported_for = ["docker", "podman"]
pre_message = "Sizing Container..."
def run(self, **kwargs):
"""
Get a container size.
"""
# Always run this first to make sure container tech is valid
self.check(**kwargs)
# These are both required for docker/podman
container_name = self.kwargs["container_name"]
out, err = self.execute_host(
[
self.tech,
"container",
"ls",
"-s",
"--filter",
"name=%s" % container_name,
"--format",
'"{{ json .}}"',
]
)
if not err:
out = json.loads(out.strip().strip('"'))
return self.return_success(out["Size"])
return self.return_failure(err)
class InspectContainer(Command):
supported_for = ["docker", "podman"]
pre_message = "Inspecting Container..."
def run(self, **kwargs):
"""
Inspect a container fully, or specific sections
"""
# Always run this first to make sure container tech is valid
self.check(**kwargs)
# These are both required for docker/podman
container_name = self.kwargs["container_name"]
# inspect defaults to labels and environment
if self.args:
for section in self.args:
result = self.run_command(
[
self.tech,
"inspect",
"--format",
"{{json .%s }}" % section.capitalize(),
container_name,
]
)
else:
result = self.run_command([self.tech, "inspect", container_name])
if result:
return result
return self.return_success()
| syspack/paks | paks/commands/inspect.py | inspect.py | py | 2,263 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "command.Command",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "command.Command",
"line_number": 47,
"usage_type": "name"
}
] |
33022799224 | from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'yj.views.home'),
url(r'^api/', include('api.urls')),
# Include an application:
# url(r'^app_name/', include('app_name.urls', namespace="app_name")),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
| bob1b/yj | yj/urls.py | urls.py | py | 477 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.patterns",
"line_number": 7,
"usage_type": "call"
},
{
"api... |
9836602574 | import matplotlib.pyplot as plt
import numpy as np
k=9.0e9
q=1.9e-19
d=1.0e1
t=np.linspace(0,2*np.pi,10000)
i=1
V=V=(3*k*q*(d**2)/(2*(i**3)))*np.cos(2*t)
plt.plot(t,V,color='black')
plt.xlabel('theta')
plt.ylabel('Potential')
plt.show() | Rohan-Chakravarthy/Basic-Mathematics-Programs | quad alt.py | quad alt.py | py | 247 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"lin... |
70789372028 | # Counting element
# Given an integer array, count element x such that x + 1 is also in array.If there're duplicates in array, count them separately.
# Example 1:
# Input: {1, 2, 3}
# Output: 2
# Explanation:
# First element is 1 + 1 = 2 (2 is present in an array)
# Second element is 2 + 1 = 3 (3 is present in an array)
# Third element is 3 + 1 = 4 (4 is not present in an array)
#
# Example 2:
# Input: {1, 1, 3, 3, 5, 5, 7, 7}
# Output: 0
#
# Example 3:
# Input: {1, 3, 2, 3, 5, 0}
# Output: 3
# Explanation:
# 1 + 1 = 2 (Exist)
# 3 + 1 = 4 (Not exist)
# 2 + 1 = 3 (Exist)
# 3 + 1 = 4 (Not exist)
# 5 + 1 = 6 (Not exist)
# 0 + 1 = 1 (Exist)
#
# Example 4:
# Input: {1, 1, 2, 2}
# Output: 2
from collections import defaultdict
class Solution(object):
def countElements(self, arr):
number_dictionary = defaultdict(int)
count = 0
for num in arr:
number_dictionary[num] += 1
for num in number_dictionary:
count += number_dictionary.get(num-1, 0)
return count
| deepk777/leetcode | 30-day-challenge-2020/April/week1/day7-counting-element.py | day7-counting-element.py | py | 1,238 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 38,
"usage_type": "call"
}
] |
39943332700 | from decouple import config
from logic import bet
My_Money = int(config('MY_MONEY'))
while True:
print('you have ' + str(My_Money))
print('do you wanna play? (yes or no)')
a = input('')
if a.strip() == 'no':
print('you are out of the game')
break
elif a.strip() == 'yes':
b = int(input('guess the number from 1 to 30 '))
g = int(input('your bet '))
My_Money -= g
My_Money += bet(b, g)
#cvbnjmk
else:
print('yes or no')
| aliiiiaa/hw5 | 25-2_Aliia_Abyllkasymova_hw_5.py | 25-2_Aliia_Abyllkasymova_hw_5.py | py | 509 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "decouple.config",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logic.bet",
"line_number": 16,
"usage_type": "call"
}
] |
30906506351 | import mailchimp_marketing as MailchimpMarketing
from mailchimp_marketing.api_client import ApiClientError
def survey_monkey_distribute_daily(**kwargs):
api_key = kwargs['api_key']
server = kwargs['server']
try:
client = MailchimpMarketing.Client()
client.set_config({
"api_key": api_key,
"server": server
})
response = client.ping.get()
print(response)
except ApiClientError as error:
print(error)
x = client.campaigns.replicate('df4d22a9b2')['id']
client.campaigns.send(x) | GregorMonsonFD/holmly_sourcing_legacy | scripts/python/survey_monkey_distribute_daily.py | survey_monkey_distribute_daily.py | py | 525 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mailchimp_marketing.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mailchimp_marketing.api_client.ApiClientError",
"line_number": 16,
"usage_type": "name"
}
] |
33369908821 | """
A script to extract IuPS addresses from an RNC CMExport file
Works with Huawei RNC CMExport
By Tubagus Rizal
2017
"""
import xml.etree.ElementTree as ET
import glob
import pdb
def getRncInfo(xmlroot):
# get RNC info
rnc = {}
for rncInfo in xmlroot.findall(".//*[@className='BSC6900UMTSNE']/attr"):
if rncInfo.attrib["name"] == "fdn":
rnc["fdn"] = rncInfo.text
if rncInfo.attrib["name"] == "name":
rnc["name"] = rncInfo.text
if rncInfo.attrib["name"] == "neID":
rnc["neid"] = rncInfo.text
return rnc
def getIuPSIpAddress(xmlroot):
# get a list of IuPS interface
iupsIpAddr = []
for ipPath in xmlroot.findall(".//*[@className='BSC6900UMTSIPPATH']/attr"):
if ipPath.attrib["name"] == "IPADDR":
ipAddress = ipPath.text
if ipPath.attrib["name"] == "ITFT" and ipPath.text == "IUPS":
if not ipAddress in iupsIpAddr:
iupsIpAddr.append(ipAddress)
return iupsIpAddr
def main():
xmlFolder = "D:\\1000-MyDocuments\\100-Projects\\098-ProximusCFT\\TAADisttributor\\3G-OSS"
xmlFiles = [file for file in glob.glob(xmlFolder + "/**/*.xml", recursive=True)]
for xmlFile in xmlFiles:
tree = ET.parse(xmlFile)
root = tree.getroot()
#print result
rnc = getRncInfo(root)
for key, value in rnc.items():
print (key, value, ",", end=" ")
iupsIpAddr = getIuPSIpAddress(root)
for item in iupsIpAddr:
print ( "IuPS: ", item, end=" ")
print("\n")
if __name__ == "__main__":
main()
| trizal/python-CMExportReader | getIuPS.py | getIuPS.py | py | 1,817 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 47,
"usage_type": "name"
}
] |
22049716249 | from os import name
import sys
import requests
import time
import threading
sys.path.append('../')
from DeskFoodModels.DeskFoodLib import Item, OrderStatus, Order
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QCheckBox, QComboBox, QDialog, QApplication, QListWidget, QMenu, QPushButton, QStackedWidget, QTextBrowser, QWidget
from urllib.request import urlopen
import json
from DeskFoodModels import firebaseAuth
#---Global Variables---#
#orderID = ""
#order = Order()
userID = ""
#--------------------Login Window--------------------
class loginScreen(QDialog):
def __init__(self):
#TODO if login info is wrong, maybe raise an error message
super(loginScreen, self).__init__()
loadUi("Login.ui", self)
self.loginButton.clicked.connect(self.login)
self.registerButton.clicked.connect(self.register)
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
def login(self):
self.username = self.emailEdit.text()
self.password = self.passwordEdit.text()
self.user = firebaseAuth.login(self.username, self.password)
global userID
userID =self.user["localId"]
if self.user:
#BUG: If a user types admin with a capital letter, it will take them to the customer or runner screen instead of the kitchen menu
if(self.username == "admin@admin.com"):
self.acceptadmin()
else:
self.accept()
else:
self.emailEdit.setText("")
self.passwordEdit.setText("")
self.emailEdit.setFocus()
#self.errorLabel.setText("Invalid username or password")
def register(self):
kScreen = registerScreen()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def accept(self):
kScreen = customerORRunner()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def acceptadmin(self):
kScreen = kitchenMenu()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Register Window--------------------
class registerScreen(QDialog):
def __init__(self):
super(registerScreen, self).__init__()
loadUi("SignUp.ui", self)
self.registerButton.clicked.connect(self.register)
self.registerButton.setEnabled(False)
self.passwordEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordConfirmEdit.setEchoMode(QtWidgets.QLineEdit.Password)
self.termsAndConditionsRadioButton.toggled.connect(self.enableRegisterButton)
def register(self):
self.username = self.userNameEdit.text()
self.password = self.passwordEdit.text()
self.passwordConfirm = self.passwordConfirmEdit.text()
self.email = self.emailEdit.text()
if self.username != "" and self.password != "" and self.passwordConfirm != "":
if self.password == self.passwordConfirm:
self.user = firebaseAuth.register(self.email, self.password, self.username)
if self.user:
global userID
userID = self.user["localId"]
self.accept()
self.passwordEdit.setText("")
self.passwordConfirmEdit.setText("")
self.userNameEdit.setFocus()
#self.errorLabel.setText("Invalid username or password")
def enableRegisterButton(self):
if self.termsAndConditionsRadioButton.isChecked():
self.registerButton.setEnabled(True)
else:
self.registerButton.setEnabled(False)
def accept(self):
kScreen = customerORRunner()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Customer or Runner Window---------------
class customerORRunner(QDialog):
def __init__(self):
super(customerORRunner, self).__init__()
loadUi("customerORrunner.ui", self)
self.customerBTN.clicked.connect(self.customer)
self.runnerBTN.clicked.connect(self.runner)
def customer(self):
kScreen = orderWindow()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def runner(self):
kScreen = RunnerPickOrder()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Runner Pick Orders Window--------------------
class RunnerPickOrder(QDialog):
def __init__(self):
super(RunnerPickOrder, self).__init__()
loadUi("RunnerPickOrder.ui", self)
self.loadOrders()
self.returnBTN.clicked.connect(self.goBack)
self.orderList.itemDoubleClicked.connect(self.orderDetails)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadOrders(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders/Status/Ready"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
# Clear orderList
self.orderList.clear()
# iterate over the data and append the id of the orders to a list
for i in range(len(data_json)):
self.orderList.addItem(data_json[i]['order_id'])
def orderDetails(self):
# Switch to the order details window
kScreen = RunnerOrderDetails(orderID=self.orderList.currentItem().text())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Runner Order Details Window--------------------
class RunnerOrderDetails(QDialog):
def __init__(self, orderID):
super(RunnerOrderDetails, self).__init__()
loadUi("RunnerOrderDetails.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.setCustomer(orderID)
self.setOrder(orderID)
self.setOrderItems(orderID)
self.setDeliveryLocation(orderID)
self.setOrderStatus(orderID)
self.setOrderTotal(orderID)
self.setOrderInstructions(orderID)
self.statusButton.clicked.connect(self.changeStatusToEnRoute)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
# Set the customer label to the userID of the order
def setCustomer(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/UserID"
response = urlopen(url)
userID = json.loads(response.read())
self.customerIDLabel.setText(userID)
# Set the order label to the orderID of the order
def setOrder(self, orderID):
self.orderIDLabel.setText(orderID)
# Populate the items list with the items in the order
def setOrderItems(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Items"
response = urlopen(url)
data_json = json.loads(response.read())
self.itemsList.addItems(data_json)
# Set the delivery location label to the delivery location of the order
def setDeliveryLocation(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/DeliveryLocation"
response = urlopen(url)
data_json = json.loads(response.read())
self.deliveryLocationLabel.setText(data_json)
# Set the order status label to the order status of the order
def setOrderStatus(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderStatusLabel.setText(data_json)
# Set the order total label to the order total of the order
def setOrderTotal(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Total"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderTotalLabel.setText("$" + str(data_json))
# Set the order instructions label to the order instructions of the order
def setOrderInstructions(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Instructions"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderInstructionsLabel.setText(data_json)
def changeStatusToEnRoute(self):
orderID = self.orderIDLabel.text()
#Update the order status to en route
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.ON_THE_WAY.value)
#Update the order RunnerID to the current runner
r = requests.put("http://localhost:8000/Orders/" + orderID + "/RunnerID" + "?runnerId=" + userID)
self.setOrderStatus(orderID)
self.statusButton.setText("Confirm Delivery")
self.statusButton.clicked.connect(self.changeStatusToDelivered)
def changeStatusToDelivered(self):
orderID = self.orderIDLabel.text()
#Update the order status to delivered
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.DELIVERED.value)
self.setOrderStatus(orderID)
#Switch back to the RunnerPickOrder window
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.currentWidget().loadOrders()
widget.removeWidget(self)
#--------------------Order Window----------------------------
class orderWindow(QDialog):
def __init__(self):
super(orderWindow, self).__init__()
loadUi("Order.ui", self)
self.subtotalText.setText("0")
self.loadKitchens()
self.kitchensList.itemDoubleClicked.connect(self.loadMenu)
self.returnBTN.clicked.connect(self.goBack)
self.menuList.itemDoubleClicked.connect(self.addToOrder)
self.finishBTN.clicked.connect(self.finish)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadKitchens(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.kitchensList.addItems(data_json)
def loadMenu(self):
nameOfKitchen = self.kitchensList.currentItem().text()
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.menuList.clear()
myArray = data_json.keys()
for val in myArray:
if (data_json[val]["Available"]):
self.menuList.addItem(str(val) + ": $" + "%0.2f" % float(data_json[val]["Price"]))
def addToOrder(self):
itemToAdd = self.menuList.currentItem().text()
temp = itemToAdd.split(':')
itemToAdd2 = temp[0]
self.orderList.addItem(itemToAdd2)
subtotal = float(self.subtotalText.toPlainText())
temp2 = itemToAdd.split('$')
subtotal2 = float(temp2[1])
subtotal = round(subtotal + subtotal2, 2)
self.subtotalText.setText( "%0.2f" % subtotal )
tax = round(subtotal * .08, 2)
self.taxText.setText( "%0.2f" % tax)
subtotal = float(self.subtotalText.toPlainText())
self.totalText.setText( "%0.2f" % round(tax + subtotal, 2) )
def finish(self):
kScreen = OrderConfirmaiton(self.orderList, self.totalText.toPlainText())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Order Confirmation Window----------------------------
class OrderConfirmaiton(QDialog):
def __init__(self, orderList, total):
super(OrderConfirmaiton, self).__init__()
loadUi("OrderConfirmation.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.ConfirmBTN.clicked.connect(self.finish)
for i in range(orderList.count()):
self.orderItemList.addItem(orderList.item(i).text())
self.TotalField.setText(total)
self.DeliveryLocation.returnPressed.connect(self.enableConfirmButton)
# The Button should not be enabled until the user has entered their location
self.ConfirmBTN.setEnabled(False)
#Method to enable the confirm button
def enableConfirmButton(self):
# Check if the location is empty
if self.DeliveryLocation.text() != "":
self.ConfirmBTN.setEnabled(True)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
orderItems = []
for i in range(self.orderItemList.count()):
orderItems.append(self.orderItemList.item(i).text())
order = Order(
user_id = userID,
delivery_location = self.DeliveryLocation.text(),
items = orderItems,
total = self.TotalField.text(),
instructions = self.Instructions.text()
)
kScreen = paymentWindow(order)
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Payment Window--------------------
class paymentWindow(QDialog):
def __init__(self, order):
super(paymentWindow, self).__init__()
loadUi("Payment.ui", self)
self.setWindowTitle("Payment")
self.studentID.setHidden(True)
self.returnBTN.clicked.connect(self.goBack)
self.studentIDCheck.clicked.connect(self.clickSID)
self.debitcreditCheck.clicked.connect(self.clickDCC)
# Don't know why this works but stack overflow says it does
self.finishBTN.clicked.connect(lambda: self.finish(order))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def clickSID(self):
self.studentIDCheck.setChecked(1)
self.debitcreditCheck.setChecked(0)
self.fullName.setHidden(True)
self.ccNumber.setHidden(True)
self.expDate.setHidden(True)
self.CVV.setHidden(True)
self.nameInput.setHidden(True)
self.dccInput.setHidden(True)
self.expInput.setHidden(True)
self.cvvInput.setHidden(True)
self.studentID.setHidden(False)
self.idInput.setHidden(False)
def clickDCC(self):
self.studentIDCheck.setChecked(0)
self.debitcreditCheck.setChecked(1)
self.studentID.setHidden(True)
self.idInput.setHidden(True)
self.fullName.setHidden(False)
self.ccNumber.setHidden(False)
self.expDate.setHidden(False)
self.CVV.setHidden(False)
self.nameInput.setHidden(False)
self.dccInput.setHidden(False)
self.expInput.setHidden(False)
self.cvvInput.setHidden(False)
def finish(self, order):
#Stores the orderID that's created in the database
r = requests.post("http://127.0.0.1:8000/CreateNewOrder", order.json())
print(r.text)
kScreen = statusWindow()
widget.addWidget(kScreen)
kScreen.textOrderID.setText(r.text)
kScreen.textOrderID.hide()
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Order Status Window--------------------
class statusWindow(QDialog):
def __init__(self):
self.x = 1
super(statusWindow, self).__init__()
loadUi("OrderStatus.ui", self)
self.setWindowTitle("Order Status")
threading.Thread(target=self.update, daemon=True).start()
#self.homeBTN.clicked.connect(self.home)
def update(self):
print("Updating Order Status")
self.orderStatus()
time.sleep(1)
if(self.x == 1):
self.update()
def orderStatus(self):
# print("This is what we're getting" + orderID)
#NOTE: This is a bit of a hack, idk why the orderID keeps the " " around it
url = "http://127.0.0.1:8000/Orders/" + self.textOrderID.toPlainText().replace('"', "") + "/Status"
#url = "http://127.0.0.1:8000/Orders/" + "-Mpr0leituNsBbqY2CDq" + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
if (data_json == OrderStatus.PENDING.value):
self.statusLBL.setText("Order is pending!")
elif (data_json == OrderStatus.PREPARING.value):
self.statusLBL.setText("Preparing the order!")
elif (data_json == OrderStatus.COOKING.value):
self.statusLBL.setText("Cooking Order!")
elif (data_json == OrderStatus.READY.value):
self.statusLBL.setText("Order is ready!")
elif (data_json == OrderStatus.ON_THE_WAY.value):
self.statusLBL.setText("Order is on the way!")
elif (data_json == OrderStatus.DELIVERED.value):
self.statusLBL.setText("Order is delivered!")
else:
self.statusLBL.setText("Something went wrong!")
"""def home(self):
kScreen = orderWindow()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)"""
#--------------------Select Option Window--------------------
class kitchenMenu(QDialog):
def __init__(self):
super(kitchenMenu, self).__init__()
loadUi("KitchenMenu.ui", self)
self.AddItemBTN.clicked.connect(self.addItem)
self.updatePriceBTN.clicked.connect(self.updatePrice)
self.updateAvailabilityBTN.clicked.connect(self.updateAvailability)
self.viewKitchensBTN.clicked.connect(self.viewKitchens)
self.RemoveItemBTN.clicked.connect(self.removeItem)
self.orderDetailsBTN.clicked.connect(self.viewOrders)
def addItem(self):
kScreen = kitchenAddItem()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def updatePrice(self):
kScreen = kitchenUpdatePrice()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def updateAvailability(self):
kScreen = kitchenUpdateAvailability()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def viewKitchens(self):
kScreen = KitchensScreen()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def removeItem(self):
kScreen = KitchenRemoveItem()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
def viewOrders(self):
kScreen = KitchenSeeOrders()
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Kitchens See orders window--------------------
class KitchenSeeOrders(QDialog):
def __init__(self):
super(KitchenSeeOrders, self).__init__()
loadUi("KitchenOrderDetails.ui", self)
self.loadOrders()
self.returnBTN.clicked.connect(self.goBack)
self.orderList.itemDoubleClicked.connect(self.orderDetails)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadOrders(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders/Status/Pending"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
# Clear orderList
self.orderList.clear()
# iterate over the data and append the id of the orders to a list
for i in range(len(data_json)):
self.orderList.addItem(data_json[i]['order_id'])
def orderDetails(self):
# Switch to the order details window
kScreen = KitchenSeeOrdersDetails(orderID=self.orderList.currentItem().text())
widget.addWidget(kScreen)
widget.setCurrentIndex(widget.currentIndex() + 1)
#--------------------Expanded Kitchens order details window--------------------
class KitchenSeeOrdersDetails(QDialog):
def __init__(self, orderID):
super(KitchenSeeOrdersDetails, self).__init__()
loadUi("OrderDetail.ui", self)
self.setCustomer(orderID)
self.setOrder(orderID)
self.setOrderItems(orderID)
self.setDeliveryLocation(orderID)
self.setOrderStatus(orderID)
self.setOrderTotal(orderID)
self.setOrderInstructions(orderID)
self.statusButton.clicked.connect(self.changeStatusToCooking)
# Set the customer label to the userID of the order
def setCustomer(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/UserID"
response = urlopen(url)
userID = json.loads(response.read())
self.customerIDLabel.setText(userID)
# Set the order label to the orderID of the order
def setOrder(self, orderID):
self.orderIDLabel.setText(orderID)
# Populate the items list with the items in the order
def setOrderItems(self, orderID):
# parameter for urlopen
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Items"
response = urlopen(url)
data_json = json.loads(response.read())
self.itemsList.addItems(data_json)
# Set the delivery location label to the delivery location of the order
def setDeliveryLocation(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/DeliveryLocation"
response = urlopen(url)
data_json = json.loads(response.read())
self.deliveryLocationLabel.setText(data_json)
# Set the order status label to the order status of the order
def setOrderStatus(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Status"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderStatusLabel.setText(data_json)
# Set the order total label to the order total of the order
def setOrderTotal(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Total"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderTotalLabel.setText(str(data_json))
# Set the order instructions label to the order instructions of the order
def setOrderInstructions(self, orderID):
url = "http://127.0.0.1:8000/Orders" + "/" + orderID + "/Instructions"
response = urlopen(url)
data_json = json.loads(response.read())
self.orderInstructionsLabel.setText(data_json)
def changeStatusToCooking(self):
orderID = self.orderIDLabel.text()
#Update the order status to cooking
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.COOKING.value)
self.setOrderStatus(orderID)
self.statusButton.setText("Complete Order")
self.statusButton.clicked.connect(self.completeOrder)
def completeOrder(self):
orderID = self.orderIDLabel.text()
#Update the order status to complete
r = requests.put("http://localhost:8000/Orders/" + orderID + "/Status" + "?status=" + OrderStatus.READY.value)
self.setOrderStatus(orderID)
#Switch back to the kitchenorders window
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.currentWidget().loadOrders()
widget.removeWidget(self)
#--------------------Kitchens Add Item Window--------------------
class kitchenAddItem(QDialog):
def __init__(self):
super(kitchenAddItem, self).__init__()
loadUi("KitchenAddItem.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.freshensCheck.clicked.connect(self.unclickF)
self.deliCheck.clicked.connect(self.unclickD)
self.pizzaCheck.clicked.connect(self.unclickP)
self.burgerCheck.clicked.connect(self.unclickB)
self.marketCheck.clicked.connect(self.unclickM)
self.finishBTN.clicked.connect(self.finish)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def unclickF(self):
self.freshensCheck.setChecked(1)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickD(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(1)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickP(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(1)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
def unclickB(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(1)
self.marketCheck.setChecked(0)
def unclickM(self):
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(1)
def finish(self):
if(len(self.textName.toPlainText()) > 0):
if(len(self.textCost.toPlainText()) > 0):
if((self.freshensCheck.checkState()) or (self.deliCheck.checkState()) or (self.pizzaCheck.checkState()) or (self.burgerCheck.checkState()) or self.marketCheck.checkState()):
available = False
if(self.checkBox.checkState()): available = True
if(self.freshensCheck.checkState()): mykitchen = "Freshens"
if(self.deliCheck.checkState()): mykitchen = "Deli"
if(self.pizzaCheck.checkState()): mykitchen = "Pizza"
if(self.burgerCheck.checkState()): mykitchen = "Burgers"
if(self.marketCheck.checkState()): mykitchen = "Market"
item = Item(name = self.textName.toPlainText(), price = self.textCost.toPlainText(), available = available)
r = requests.put("http://localhost:8000/AddToMenu/" + mykitchen, item.json())
self.textName.setText("")
self.textCost.setText("")
self.textDecription.setText("")
self.checkBox.setChecked(0)
self.freshensCheck.setChecked(0)
self.deliCheck.setChecked(0)
self.pizzaCheck.setChecked(0)
self.burgerCheck.setChecked(0)
self.marketCheck.setChecked(0)
#--------------------Kitchens Remove Item Window--------------------
class KitchenRemoveItem(QDialog):
def __init__(self):
super(KitchenRemoveItem, self).__init__()
loadUi("KitchenRemoveItem.ui", self)
self.ReturnButton.clicked.connect(self.goBack)
self.ConfirmButton.clicked.connect(self.RemoveItem)
self.fillBTN.clicked.connect(self.fillItems)
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def RemoveItem(self):
if(len(self.itemBox.currentText()) > 0):
r = requests.delete("http://localhost:8000/RemoveItemFromMenu/" + self.kitchenBox.currentText() + "/" +self.itemBox.currentText())
#--------------------Kitchens Update Price Window--------------------
class kitchenUpdatePrice(QDialog):
def __init__(self):
super(kitchenUpdatePrice, self).__init__()
loadUi("KitchenUpdatePrice.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.finishBTN.clicked.connect(self.finish)
self.fillBTN.clicked.connect(self.fillItems)
self.fillPriceBTN.clicked.connect(self.fillPrice)
#fill kitchen combo box
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def fillPrice(self):
nameOfKitchen = self.kitchenBox.currentText()
nameOfItem = self.itemBox.currentText()
#NOTE: this is a bit of a hack, but it works. Essentially the URL does not like spaces in the item name, so I had to replace them with '%20'.
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen + "/" + nameOfItem.replace(' ', '%20') + "/Price"
response = urlopen(url)
data_json = json.loads(response.read())
self.textCost.setText(str(data_json))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
if(len(self.itemBox.currentText()) > 0):
r = requests.put("http://localhost:8000/UpdateItemPrice/" + self.kitchenBox.currentText() + "/" + self.itemBox.currentText() + "?price=" + self.textCost.toPlainText())
self.textCost.setText("")
#--------------------Kitchens Update Availability Window--------------------
class kitchenUpdateAvailability(QDialog):
def __init__(self):
super(kitchenUpdateAvailability, self).__init__()
loadUi("KitchenUpdateAvailability.ui", self)
self.returnBTN.clicked.connect(self.goBack)
self.finishBTN.clicked.connect(self.finish)
self.fillBTN.clicked.connect(self.fillItems)
url = "http://127.0.0.1:8000/Kitchens"
response = urlopen(url)
data_json = json.loads(response.read())
self.kitchenBox.addItems(data_json)
def fillItems(self):
nameOfKitchen = self.kitchenBox.currentText()
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
response = urlopen(url)
data_json = json.loads(response.read())
self.itemBox.clear()
self.itemBox.addItems(list(data_json.keys()))
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def finish(self):
if(len(self.itemBox.currentText()) > 0):
if(self.checkBox.checkState()):
available = True
else: available = False
r = requests.put("http://localhost:8000/UpdateItemAvailability/" + self.kitchenBox.currentText() + "/" + self.itemBox.currentText() + "?availability=" + str(available))
#--------------------Kitchens and Menu Window--------------------
class KitchensScreen(QDialog):
def __init__(self):
super(KitchensScreen, self).__init__()
loadUi("ListOfKitchens.ui", self)
self.loadKitchens()
self.kitchensList.itemDoubleClicked.connect(self.loadMenu)
self.locationLabel.setText("Campus Center Market")
self.returnBTN.clicked.connect(self.goBack)
def goBack(self):
widget.setCurrentIndex(widget.currentIndex() - 1)
widget.removeWidget(self)
def loadKitchens(self):
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens"
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
self.kitchensList.addItems(data_json)
def loadMenu(self):
nameOfKitchen = self.kitchensList.currentItem().text()
print(nameOfKitchen)
# parameter for urlopen
url = "http://127.0.0.1:8000/Kitchens/" + nameOfKitchen
# store the response of URL
response = urlopen(url)
# storing the JSON response
# # from url in data
data_json = json.loads(response.read())
print(data_json)
self.menuList.clear()
self.menuList.addItems(list(data_json.keys()))
#--------------------MAIN--------------------
#Setting up App
app = QApplication(sys.argv)
loginScreen = loginScreen()
widget = QStackedWidget()
widget.addWidget(loginScreen)
widget.setFixedHeight(800)
widget.setFixedWidth(1200)
widget.show()
try:
sys.exit(app.exec_())
except:
print("Exiting") | YY0NII/DeskFood | Frontend/Main.py | Main.py | py | 33,421 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi"... |
37370950188 | import argparse
import openai
import json
import time
from tqdm.auto import tqdm
from settings import settings
from textwrap import dedent
def evaluate(dataset: str, gold_path, log_file: str):
"""
Returns the average score for the dataset.
Args:
dataset: Path to the json dataset
log_file: Path to save the evaluation results
Returns:
Average score for the dataset
"""
def _get_score(generated_content, ground_truth):
if ground_truth == "":
raise ValueError("Ground truth is empty")
prompt = (
f"{base_prompt}\nSurvey Name: {survey_name.strip()}\nSurvey Section: {survey_section.strip()}\nContent: {generated_content.strip()}\nGround Truth Text: {ground_truth}\nEvaluation Form (scores ONLY)\nScore:"
)
score = get_llm_score(prompt)
return score
with open(dataset, "r") as f:
data = json.load(f)
with open(gold_path, "r", encoding="utf8") as f:
gold_data = json.load(f)
with open(log_file, "w") as f:
all_scores = []
for survey_name in data:
for survey_section, content in tqdm(data[survey_name].items(), desc=f"Evaluating {survey_name}"):
if content.get("subsections"):
all_sub_scores = []
for sub_name, sub_content in tqdm(content.get("subsections").items(), desc=f"Subsections"):
generated_content = sub_content["content"]
ground_truth = gold_data[survey_name][survey_section]["subsections"][sub_name]["content"]
sub_score = _get_score(generated_content, ground_truth)
all_sub_scores.append(sub_score)
json.dump({"survey_name": survey_name, "survey_section": survey_section, "subsection": sub_name, "score": sub_score}, f)
f.write("\n")
score = sum(all_sub_scores)/len(all_sub_scores)
else:
generated_content = content["content"]
ground_truth = gold_data[survey_name][survey_section]["content"]
score = _get_score(generated_content, ground_truth)
all_scores.append(score)
json.dump({"survey_name": survey_name, "survey_section": survey_section, "content": generated_content, "score": score}, f)
f.write("\n")
return sum(all_scores)/len(all_scores)
def get_llm_score(prompt, tries=0):
system_prompt = dedent("""
You will be given a text written for a survey section and a ground truth section.
Your task is to rate the content of the survey section on one metric comparing this text with the ground truth which has the maximum score.
Please make sure you read and understand the instructions carefully.
Please keep the document open while reviewing, and refer to it as needed.""")
try:
response = openai.ChatCompletion.create(
model=settings.model,
messages=[
{"role": "system", "content": system_prompt.strip()},
{"role": "user", "content": prompt},
],
max_tokens=settings.max_tokens,
temperature=settings.temperature,
top_p=settings.top_p,
n=settings.n,
)
except Exception as e:
time.sleep(60 + 10*tries)
print(f"Retrying {tries+1} time")
if tries < 6:
return get_llm_score(prompt, tries+1)
else:
raise e
all_predictions = [int(item["message"]["content"]) for item in response.choices]
# Scores are the sum of probabilities for each class multiplied by the class value
scores = sum(all_predictions.count(i)/len(all_predictions) * i for i in range(1, 6))
return scores
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc1/proc1.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc1_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc2/proc2.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc2_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc3/proc3.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc3_eval.json
# python evaluate.py --dataset /home/thales/Documents/AutoSurvey/test/proc4/proc4.json --gold /home/thales/Documents/AutoSurvey/data/dataset/survey_3.json --logs proc4_eval.json
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--dataset", type=str, required=True, help="Path to the json dataset")
argparser.add_argument("--gold", type=str, required=True, help="Path to the json dataset")
argparser.add_argument("--logs", type=str, default="evaluation_results.json", help="Path to save the evaluation results")
args = argparser.parse_args()
openai.api_key = settings.openai_key
base_prompt = dedent("""Evaluation Steps:
1 - Carefully read the content to identify the main topic and key points.
2 - Evaluate whether the content adequately addresses the main topic stated in the title and provides a comprehensive technical description of it.
3 - Assign a score to the text on a scale of 1 to 5, where 1 represents the lowest score and 5 represents the highest score, according to the Evaluation Criteria.""")
average_score = evaluate(args.dataset, args.gold, args.logs) | ZanezZephyrs/AutoSurvey | AutoSurvey/evaluation/evaluate.py | evaluate.py | py | 5,555 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number"... |
12388726621 | import os, sys
import subprocess
import json
import uproot
import awkward as ak
from coffea import processor, util, hist
from coffea.nanoevents import NanoEventsFactory, NanoAODSchema
from boostedhiggs import HbbPlotProcessor
from distributed import Client
from lpcjobqueue import LPCCondorCluster
from dask.distributed import performance_report
from dask_jobqueue import HTCondorCluster, SLURMCluster
env_extra = [
f"export PYTHONPATH=$PYTHONPATH:{os.getcwd()}",
]
cluster = LPCCondorCluster(
transfer_input_files=["boostedhiggs"],
ship_env=True,
memory="8GB",
image="coffeateam/coffea-dask:0.7.11-fastjet-3.3.4.0rc9-ga05a1f8"
)
cluster.adapt(minimum=1, maximum=50)
client = Client(cluster)
print("Waiting for at least one worker...") # noqa
client.wait_for_workers(1)
year = sys.argv[1]
with performance_report(filename="dask-report.html"):
# get list of input files
infiles = subprocess.getoutput("ls infiles/"+year+"*.json").split()
for this_file in infiles:
if "bsm" in this_file:
continue
index = this_file.split("_")[1].split(".json")[0]
print(this_file, index)
if "qcd" in index or "higgs" in index or "data" in index or 'top' in index:
continue
uproot.open.defaults["xrootd_handler"] = uproot.source.xrootd.MultithreadedXRootDSource
p = HbbPlotProcessor(year=year,jet_arbitration='ddb')
args = {'savemetrics':True, 'schema':NanoAODSchema}
output = processor.run_uproot_job(
this_file,
treename="Events",
processor_instance=p,
executor=processor.dask_executor,
executor_args={
"client": client,
# "skipbadfiles": args.skipbadfiles,
"schema": processor.NanoAODSchema,
"retries": 50,
},
chunksize=100000,
# maxchunks=args.max,
)
outfile = 'outfiles-plots/'+str(year)+'_dask_'+index+'.coffea'
util.save(output, outfile)
| jennetd/hbb-coffea | vbf-scripts/submit-plots-dask.py | submit-plots-dask.py | py | 2,206 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "lpcjobqueue.LPCCondorCluster",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "distributed.Client",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.argv",
... |
2705587377 | from __future__ import annotations
import abc
from collections import ChainMap
from typing import Any, ClassVar, Optional, Type, TypeVar
import attr
from basic_notion import exc
from basic_notion.utils import set_to_dict, del_from_dict
def _get_attr_keys_for_cls(
members: dict[str, Any],
only_editable: bool = False,
only_derived: bool = False,
) -> dict[str, tuple[str, ...]]:
from basic_notion.attr import ItemAttrDescriptor
result: dict[str, tuple[str, ...]] = dict()
for name, prop in members.items():
if not isinstance(prop, ItemAttrDescriptor):
continue
if only_editable and not prop.editable:
continue
if only_derived and not prop.derived:
continue
attr_key: tuple[str, ...]
try:
attr_key = prop.key
except AttributeError:
attr_key = (name,)
result[name] = attr_key
return result
class NotionItemBaseMetaclass(abc.ABCMeta):
# abc.ABCMeta is needed here for the abc.ABC functionality
"""Metaclass that adds ``__notion_attr_keys__`` to all ``NotionItemBase`` subclasses"""
def __new__(cls, name: str, bases: tuple[type, ...], dct: dict):
attr_keys_name = '__notion_attr_keys__'
editable_keys_name = '__notion_editable_keys__'
derived_keys_name = '__notion_derived_keys__'
base_attr_key_maps = tuple(
getattr(base, attr_keys_name) # type: ignore
for base in bases if type(base) is cls
)
base_editable_key_maps = tuple(
getattr(base, editable_keys_name) # type: ignore
for base in bases if type(base) is cls
)
base_derived_key_maps = tuple(
getattr(base, derived_keys_name) # type: ignore
for base in bases if type(base) is cls
)
attr_keys = dict(ChainMap(_get_attr_keys_for_cls(dct), *base_attr_key_maps))
editable_keys = dict(ChainMap(_get_attr_keys_for_cls(dct, only_editable=True), *base_editable_key_maps))
derived_keys = dict(ChainMap(_get_attr_keys_for_cls(dct, only_derived=True), *base_derived_key_maps))
# Added to __dict__
dct[attr_keys_name] = attr_keys
dct[editable_keys_name] = editable_keys
dct[derived_keys_name] = derived_keys
new_cls = super().__new__(cls, name, bases, dct)
return new_cls
_ITEM_TV = TypeVar('_ITEM_TV', bound='NotionItemBase')
@attr.s(slots=True)
class NotionItemBase(metaclass=NotionItemBaseMetaclass):
__notion_attr_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
__notion_editable_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
__notion_derived_keys__: dict[str, tuple[str, ...]] = None # type: ignore # defined in metaclass
OBJECT_TYPE_KEY_STR: ClassVar[str] = ''
OBJECT_TYPE_STR: ClassVar[str] = ''
_data: Optional[dict[str, Any]] = attr.ib(kw_only=True, default=None)
@classmethod
@property
def attr_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_attr_keys__
@classmethod
@property
def editable_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_editable_keys__
@classmethod
@property
def derived_keys(cls) -> dict[str, tuple[str, ...]]:
return cls.__notion_derived_keys__
@property
def data(self) -> dict:
if self._data is None:
raise exc.ItemHasNoData(f'Object {type(self).__name__} has no data')
return self._data
@classmethod
def _make_inst_attr_dict(cls, kwargs: dict[str, Any]) -> dict:
data: dict[str, Any] = {}
for name, key in cls.editable_keys.items(): # type: ignore
if name not in kwargs:
continue
value = kwargs[name]
# Get attr descriptor and its `set_converter` callable
# (if it exists) to convert the value into its serializable form
prop = getattr(cls, name)
set_converter = prop.set_converter
if set_converter is not None:
value = set_converter(value)
set_to_dict(data, key, value)
return data
@classmethod
def _make_inst_dict(cls, kwargs: dict[str, Any]) -> dict:
data = {}
if cls.OBJECT_TYPE_KEY_STR and cls.OBJECT_TYPE_STR:
data[cls.OBJECT_TYPE_KEY_STR] = cls.OBJECT_TYPE_STR
data.update(cls._make_inst_attr_dict(kwargs))
return data
@classmethod
def make(cls: Type[_ITEM_TV], **kwargs: Any) -> _ITEM_TV:
"""Generate instance from attributes"""
data = cls._make_inst_dict(kwargs)
return cls(data=data)
def clear_derived_attrs(self) -> None:
for name, other_key in self.attr_keys.items():
if name in self.derived_keys:
# Is not editable
del_from_dict(self.data, other_key)
| altvod/basic-notion | src/basic_notion/base.py | base.py | py | 4,973 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "basic_notion.attr.ItemAttrDescriptor",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "abc.ABCMeta",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "... |
15419264437 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 9 14:52:17 2022
@author: elie
"""
#################### SCALING ####################
import os
os.chdir('/home/elie/Documents/Tecnico/2ND_PERIOD/DS/PROJECT/CODE/')
from pandas import read_csv, DataFrame, concat, unique
from pandas.plotting import register_matplotlib_converters
from matplotlib.pyplot import subplots, show, figure, savefig
from ds_charts import get_variable_types, multiple_line_chart, plot_evaluation_results
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from numpy import ndarray
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
register_matplotlib_converters()
############################# GET DATA ###############################
path = "Data/"
file = path+"air_quality_tabular_without_na"
#file = path+"nyc_car_crash_without_na"
filename = file+".csv"
data = read_csv(filename, na_values='', parse_dates=True, infer_datetime_format=True)
# SPLIT DATA BASED ON TYPE OF VARIABLE
variable_types = get_variable_types(data)
numeric_vars = variable_types['Numeric']
symbolic_vars = variable_types['Symbolic']
boolean_vars = variable_types['Binary']
df_nr = data[numeric_vars]
df_sb = data[symbolic_vars]
df_bool = data[boolean_vars]
# remove symbolic values before computation : date, time, id
data = data.drop(symbolic_vars, axis=1)
############################# NORMALIZATION ###############################
# Z SCORE
transf = StandardScaler(with_mean=True, with_std=True, copy=True).fit(df_nr)
tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)
norm_data_zscore = concat([tmp, df_sb, df_bool], axis=1)
#norm_data_zscore.to_csv(f'{file}_scaled_zscore.csv', index=False)
print(norm_data_zscore.describe())
norm_data_zscore = norm_data_zscore.drop(symbolic_vars, axis=1)
# MIN MAX SCALER
transf = MinMaxScaler(feature_range=(0, 1), copy=True).fit(df_nr)
tmp = DataFrame(transf.transform(df_nr), index=data.index, columns= numeric_vars)
norm_data_minmax = concat([tmp, df_sb, df_bool], axis=1)
#norm_data_minmax.to_csv(f'{file}_scaled_minmax.csv', index=False)
print(norm_data_minmax.describe())
norm_data_minmax = norm_data_minmax.drop(symbolic_vars, axis=1)
# fig, axs = subplots(1, 3, figsize=(20,10),squeeze=False)
# axs[0, 0].set_title('Original data')
# data.boxplot(ax=axs[0, 0])
# axs[0, 1].set_title('Z-score normalization')
# norm_data_zscore.boxplot(ax=axs[0, 1])
# axs[0, 2].set_title('MinMax normalization')
# norm_data_minmax.boxplot(ax=axs[0, 2])
# show()
################################## KNN ##################################
nb_rows = norm_data_zscore.shape[0]
sample_pct = 0.33
norm_data_zscore = norm_data_zscore.sample(n=round(nb_rows*sample_pct), random_state=1)
norm_data_minmax = norm_data_minmax.sample(n=round(nb_rows*sample_pct), random_state=1)
potential_cols = ["ALARM"]
nvalues = [10, 15, 20, 25, 30, 35, 40, 45]
dist = ['manhattan', 'euclidean', 'chebyshev']
values = {}
best = (0, '')
last_best = 0
for c in potential_cols:
target = c
y = norm_data_zscore.pop(target).values
X_train, X_test, y_train, y_test = train_test_split(norm_data_zscore, y, test_size=0.33, random_state=42)
labels = unique(y_train)
labels.sort()
for d in dist:
yvalues = []
for n in nvalues:
knn = KNeighborsClassifier(n_neighbors=n, metric=d)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
current_accuracy = accuracy_score(y_test, y_pred)
yvalues.append(current_accuracy)
print("For column : "+str(c)+" Accuracy with n = "+str(n)+ " and distance : "+str(d)+" => "+str(current_accuracy))
# if yvalues[-1] > last_best:
# best = (n, d)
# last_best = yvalues[-1]
values[d] = yvalues
figure()
multiple_line_chart(nvalues, values, title='KNN variants', xlabel='n', ylabel='accuracy', percentage=True)
#savefig('images/{file_tag}_knn_study.png')
show()
# figure()
# multiple_line_chart(nvalues, values, title='KNN variants', xlabel='n', ylabel='accuracy', percentage=True)
# #savefig('images/{file_tag}_knn_study.png')
# show()
# print('Best results with %d neighbors and %s'%(best[0], best[1]))
# ###### CONFUSION MATRIX #######
clf = knn = KNeighborsClassifier(n_neighbors=6, metric="manhattan")
clf.fit(X_train, y_train)
train_pred = clf.predict(X_train)
test_pred = clf.predict(X_test)
plot_evaluation_results(labels, y_train, train_pred, y_test, test_pred)
#savefig('images/{file_tag}_knn_best.png')
show()
############################### TEST ###################################"
# # GET THE PREPROCESSED DATA WITHOUT NA
# path = "Data/"
# #file = path+"air_quality_tabular_without_na"
# file = path+"NYC_collisions_tabular"
# filename = file+".csv"
# data_bis = read_csv(filename, na_values='', parse_dates=True, infer_datetime_format=True)
# for col in data_bis.columns:
# print("COL : "+str(col))
# print("/////////////////")
# print(data_bis[col].value_counts())
# print("\n\n\n")
| elielevy3/DATA_SCIENCE_TECNICO | lab_3.py | lab_3.py | py | 5,182 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.chdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.plotting.register_matplotlib_converters",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name"... |
41682544130 | """add unique index for modalities
Revision ID: 3cccf6a0af7d
Revises: ba3bae2b5e27
Create Date: 2018-01-05 14:28:03.194013
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cccf6a0af7d'
down_revision = 'ba3bae2b5e27'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_monday_modality_name'), 'modality', ['name'], unique=True, schema='monday')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_monday_modality_name'), table_name='modality', schema='monday')
# ### end Alembic commands ###
| MondayHealth/provider-import | alembic/versions/3cccf6a0af7d_add_unique_index_for_modalities.py | 3cccf6a0af7d_add_unique_index_for_modalities.py | py | 749 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alembic.op.create_index",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_index... |
30386260395 | #!/usr/bin/env python3
import os
import urllib
import requests
import config
def dump_stories():
new_stories = 0
num_stories = 0
r = requests.get(
"https://i.instagram.com/api/v1/feed/reels_tray/",
cookies=config.instagram_cookies, headers=config.instagram_headers).json()
for user in r['tray']:
user_dir = "./stories/{0!s}-{1!s}".format(user['user']['username'], user['id'])
if not os.path.exists(user_dir):
os.makedirs(user_dir)
print("[*] dumping " + user['user']['username'])
user_stories = requests.get(
"https://i.instagram.com/api/v1/feed/user/{0!s}/reel_media/".format(user['id']),
cookies=config.instagram_cookies, headers=config.instagram_headers).json()
for item in user_stories['items']:
num_stories += 1
if 'video_versions' in item:
url = item['video_versions'][0]['url']
else:
url = item['image_versions2']['candidates'][0]['url']
filename = url.split('/')[-1].split('?')[0]
file_path = user_dir + '/' + filename
if not os.path.isfile(file_path):
new_stories += 1
print(" + " + filename)
urllib.request.urlretrieve(url, file_path)
else:
print(" - " + filename)
return len(r['tray']), num_stories, new_stories
def send_notification(message):
requests.post(
"https://api.pushover.net/1/messages.json",
data={"token": config.pushover_app_token,
"user": config.pushover_user_token,
"title": "instadump",
"message": message})
if __name__ == "__main__":
num_users, num_stories, new_stories = dump_stories()
message = "{0!s} stories ({1!s} new)\n{2!s} users".format(num_stories, new_stories, num_users)
if config.pushover_app_token and config.pushover_user_token:
send_notification(message)
else:
print(message)
| bl1nk/instadump | instadump.py | instadump.py | py | 2,019 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.instagram_cookies",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "config.instagram_headers",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name... |
3967140891 | import torch
def batch_horizontal_flip(tensor, device):
"""
:param tensor: N x C x H x W
:return:
"""
inv_idx = torch.arange(tensor.size(3) - 1, -1, -1).long().to(device)
img_flip = tensor.index_select(3, inv_idx)
return img_flip
def euclidean_dist(x: torch.Tensor, y: torch.Tensor):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(beta=1, alpha=-2, mat1=x, mat2=y.t())
dist.clamp_(min=1e-12)
dist.sqrt_() # for numerical stability
return dist
if __name__ == '__main__':
a = torch.tensor([[0., 0.]])
b = torch.tensor([[1., 1.]])
dist = euclidean_dist(a, b)
print(dist)
a = torch.randn(4, 2048, 16, 4)
b = torch.tensor([[1., 1.]])
dist = euclidean_dist(a, b)
print(dist)
| clw5180/reid-baseline | utils/tensor_utils.py | tensor_utils.py | py | 1,068 | python | en | code | null | github-code | 6 | [
{
"api_name": "torch.arange",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.pow",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number"... |
23642650864 | # -*- coding:utf-8 -*-
#@Time : 2020/4/27 16:05
#@Author: Triomphe
#@File : vulscan.py
import importlib
import os
import sys
from PyQt5.QtCore import QObject, pyqtSignal
from vulscan.port_scan import portscan
from modules.mod_get_rootPath import get_root_path
sys.path.append(os.path.abspath(
os.path.dirname(__file__))+'/vuldb')
#根目录
ROOT_PATH =get_root_path()
class Vulscan(QObject):
_signal =pyqtSignal(dict)
_signal_finish=pyqtSignal()
script_plugin_list=[]
open_prot_list=[]
def __init__(self,target_ip):
super(Vulscan, self).__init__()
#文件位置
self.root_path =get_root_path()
self.target_ip =target_ip
self.init()
def init(self):
file_list = os.listdir(self.root_path + '/vulscan/vuldb')
for filename in file_list:
try:
if filename.endswith('.py') and filename.split('.')[1] == 'py':
self.script_plugin_list.append(filename.split('.')[0])
except Exception as e:
print("error : "+str(e))
#给每个插件设置 声明根目录
#开始进行扫描
def start_scan(self):
try:
self.open_prot_list=portscan(self.target_ip)
except Exception as e:
print(e)
self.open_prot_list=['80']
self.poc_check()
#漏洞验证
def poc_check(self):
for plugin in self.script_plugin_list:
res=importlib.import_module(plugin)
setattr(res,"ROOT_PATH",ROOT_PATH)
#先使用默认端口,如果存在就不使用端口扫描的进行检测
result_info=res.check(self.target_ip)
if result_info!=None:
text=res.get_plugin_info()
text['result_info']=result_info
self._signal.emit(text)
else:
#使用masscan 扫描后对所有存活端口进行扫描
for port in self.open_prot_list:
result_info=res.check(self.target_ip,port=port)
if result_info != None and result_info !="":
text=res.get_plugin_info()
text['result_info']=result_info
self._signal.emit(text)
#表示完成了.
self._signal_finish.emit()
| TriompheL/Ratel | vulscan/vulscan.py | vulscan.py | py | 2,337 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
29249896795 | import matplotlib.pyplot as plt
import seaborn as sns
from table import create_table
import pandas as pd
import streamlit as st
import plotly.tools as tls
import plotly.figure_factory as ff
import numpy as np
import plotly.express as px
from download import report_downlaoder
import os
st.image('Somaiya Header.png',width=500)
st.title('Result analysis')
st.subheader('KJ somaiya institute of engineering and IT')
st.sidebar.title('Welcome to the result analyser App')
st.sidebar.markdown('<html><body style="background-color:yellow;"> You can Do <b>Visual</b> or <b>Database</b> analysis as you wish after filling the parameters select as required </body></html>'
,unsafe_allow_html=True)
Analyse_type=st.sidebar.radio('Analyser',('Visual','Database Analyser','Reports'))
filename = st.text_input('Enter a file path:','Sem4.pdf')
Semester=st.text_input("Enter the No of semester",4)
Subject_no =st.text_input("enter the number of subjects",5)
Labs_no =st.text_input("enter the number of Labs",5)
@st.cache(persist=True)
def load_data():
theory_df,pracs_df=create_table(filename,Subject_no,Labs_no)
return theory_df,pracs_df
cleandf=load_data()[0]
pracsdf=load_data()[1]
#pie chart
explode=[0.1,0]
colours=['lightgreen','Red']
fig=px.pie(cleandf,labels=['Pass','Fail'],names='Pass/fail',title='Passing and failing percentage')
if Analyse_type=='Visual':
#Pie chart
st.markdown('<html><h1><body style="background-color:orange;">Pie chart</body></h1></html>',unsafe_allow_html=True)
#fig=cleandf['Pass/fail'].value_counts().plot(kind='pie',labels=['pass','fail'],autopct='%1.1f%%',startangle=140,
# explode=explode,shadow=True,colors=colours,figsize=(5,5))
st.plotly_chart(fig)
#Bar chart
st.markdown('<html><h1><body style="background-color:pink;">Bar charts</body></h1></html>',unsafe_allow_html=True)
plt.style.use('bmh')
colors=['green','slateblue','mediumorchid','gold','darkorange','coral','yellow']
k=1
for i in range(int(Subject_no)):
fig=plt.figure()
#cleandf.iloc[:,k].plot(kind='hist',bins=3,color=colors[k])
sns.distplot(cleandf.iloc[:,k],color=colors[k],norm_hist=True)
plt.xlabel(f'Marks in {cleandf.columns[k]}')
plt.ylabel('No of students')
try:
plotly_fig = tls.mpl_to_plotly(fig)
except:
subject=cleandf.columns[k]
plotly_fig=px.histogram(cleandf,x=subject,histnorm='probability density',opacity=0.8,title=f'Marks in {cleandf.columns[k]}',color_discrete_sequence=['indianred'] )
st.plotly_chart(plotly_fig)
k=k+1
if(k>int(Subject_no)):
break
#Bar chart Pracicals
st.markdown('<html><h1><body style="background-color:cyan;">Bar charts for practicals</body></h1></html>',unsafe_allow_html=True)
plt.style.use('bmh')
colors=['green','slateblue','mediumorchid','gold','darkorange','coral','yellow']
k=1
for i in range(int(Subject_no)):
fig=plt.figure()
#cleandf.iloc[:,k].plot(kind='hist',bins=3,color=colors[k])
sns.distplot(pracsdf.iloc[:,k],color=colors[k],norm_hist=True)
plt.xlabel(f'Marks in {pracsdf.columns[k]}')
plt.ylabel('No of students')
try:
plotly_fig = tls.mpl_to_plotly(fig)
except:
subject=pracsdf.columns[k]
plotly_fig=px.histogram(pracsdf,x=subject,histnorm='probability density',opacity=0.8,title=f'Marks in {pracsdf.columns[k]}',color_discrete_sequence=['indianred'] )
st.plotly_chart(plotly_fig)
k=k+1
if(k>int(Subject_no)):
break
#Database
if Analyse_type=='Database Analyser':
st.markdown('<html><h1><body style="background-color:Grey;">Database Analysis</body></h1></html>',
unsafe_allow_html=True)
from database import create_database,query_execute
create_database(cleandf,pracsdf,Semester)
st.subheader(f'SQL Theory table for sem {Semester}' )
query=st.text_input("enter a query for the sql databse",f'SELECT * FROM Sem_{Semester}_theory_results')
#query=f'SELECT * FROM Sem_{Semester}_theory_results
output=query_execute(query)
st.dataframe(output)
st.subheader(f'SQL practical table for sem {Semester}')
query=st.text_input("enter a query for the sql databse",f'SELECT * FROM Sem_{Semester}_pracs_results')
#query=f'SELECT * FROM Sem_{Semester}_pracs_results'
output_pracs=query_execute(query)
st.dataframe(output_pracs)
if Analyse_type=='Reports':
#First class
st.markdown('<html><h1><body style="background-color:cyan;">First class students</body></h1></html>',
unsafe_allow_html=True)
FC=cleandf[cleandf['CGPA']>=7.75]
fc_students=FC.shape[0]
st.dataframe(FC)
st.write(f' There are {fc_students} students in first class')
#Second class
st.markdown('<html><h1><body style="background-color:cyan;">Second class students</body></h1></html>',
unsafe_allow_html=True)
SC=cleandf[(cleandf['CGPA']>=6.75) & (cleandf['CGPA']<=7.74)]
st.dataframe(SC)
sc_students=SC.shape[0]
st.write(f' There are {sc_students} students in second class')
#pass class
st.markdown('<html><h1><body style="background-color:cyan;">pass class students</body></h1></html>',
unsafe_allow_html=True)
PC=cleandf[(cleandf['CGPA']>=4.00) & (cleandf['CGPA']<=5.74)]
st.dataframe(PC)
pc_students=PC.shape[0]
st.write(f' There are {pc_students} students in pass class')
#Top 5 scorers
st.markdown('<html><h1><body style="background-color:blue;">Toppers</body></h1></html>',
unsafe_allow_html=True)
no_students = st.number_input('Number of students ', 6)
column = 'CGPA'
column=st.selectbox('select an attribute',
tuple(cleandf.columns[1:])
)
bottom = False
toppers = cleandf[column].sort_values(ascending=bottom).values
toppers_report = cleandf[cleandf[column].isin(toppers)].sort_values(by=[column], ascending=False)
st.dataframe(toppers_report)
report = report_downlaoder(FC,SC,PC,toppers_report,fig)
st.sidebar.subheader('Click On reports to generate a report and get an option to download one')
st.sidebar.markdown(report,unsafe_allow_html=True)
| rahulthaker/Result-analysis | Analysis.py | Analysis.py | py | 6,347 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.image",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar... |
71276866109 | import setuptools
with open("README.md", "r") as file:
long_description = file.read()
with open("requirements.txt", "r") as file:
required_packages = file.read().splitlines()
setuptools.setup(
name="labscribe",
version="0.4.7",
author="Jay Morgan",
author_email="jay.p.morgan@outlook.com",
description="A small package for managing python experiment scripts",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jaypmorgan/labscribe",
packages=setuptools.find_packages(),
python_requires=">=3",
install_requires=required_packages,
include_package_data=True,
package_data={"labscribe": ["labscribe/data/*.sql"]})
| jaypmorgan/labscribe | setup.py | setup.py | py | 729 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 18,
"usage_type": "call"
}
] |
38829812575 | from django.shortcuts import render , get_object_or_404 , get_list_or_404
from django.contrib.auth.decorators import login_required
from .models import Members
# Create your views here.
@login_required(login_url="/")
def onemember_record(request , name):
objlist = get_list_or_404(Members , name = name)
objlist = objlist[::-1]
cd = totalcd(objlist)
next_pay = next_month_pay(objlist)
last_loanmonth = loan_last_month(objlist)
context = {
"objectlist" : objlist[1:],
"user":name,
"totalcd" : cd,
"nextpay" : next_pay,
"loan_end" : last_loanmonth
}
return render(request , "members/one_member_record.html" , context)
def totalcd(record):
total_cd = 0
for i in record[1:]:
total_cd += i.cd
return total_cd
def next_month_pay(record):
nextmonthpay = record[0].total
return nextmonthpay
def loan_last_month(record):
bal = record[0].loan_bal
install = record[0].installment
month = record[0].month
mon , yr = month.split("-")
if bal == 0:
return None
no_of_months = round(bal/install)
if int(no_of_months) + int(mon) <= 12:
end_month = f"{int(mon)+int(no_of_months)}-{yr}"
year = 0
while no_of_months > 12:
no_of_months -= 12
year += 1
end_month = f"{no_of_months}-{int(yr)+int(year)}"
return end_month
| hiteshkhatana/khatana-society-django | members/views.py | views.py | py | 1,267 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.get_list_or_404",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Members",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 27,
"usage_type": "call"
},
{
"api_n... |
11169927859 | from rest_framework import serializers
from review_app.models import FarmersMarket, Vendor
class FarmersMarketSerializer(serializers.ModelSerializer):
rating = serializers.ReadOnlyField(source='get_rating')
class Meta:
model = FarmersMarket
fields = ['id', 'fm_name', 'fm_description', 'rating', 'fm_picture_url', 'fm_banner_picture_url',
'fm_contact_name', 'fm_contact_email', 'fm_website', 'fm_facility_type',
'fm_county', 'fm_address', 'fm_lat', 'fm_long', 'fm_programs_accepted',
'fm_phone', 'fm_seasons_of_operation', 'fm_handicap_accessible', 'fm_updated']
class VendorSerializer(serializers.ModelSerializer):
rating = serializers.ReadOnlyField(source='get_rating')
class Meta:
model = Vendor
fields = ['id', 'at_farmers_market', 'vendor_name', 'vendor_description', 'rating', 'vendor_contact_name',
'vendor_contact_email', 'vendor_website', 'vendor_phone', 'vendor_type',
'vendor_picture_url', 'vendor_banner_picture_url', 'vendor_updated']
| dhcrain/FatHen | fm_api/serializers.py | serializers.py | py | 1,092 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ReadOnlyField",
"line_number": 6,
"us... |
19972078722 | from PIL import Image
from torchvision import transforms
import torch
import numpy as np
import pandas as pd
import sys
sys.path.append("d:\\Codes\\AI\\kaggle\\kaggle-CIFAR-10\\")
def loadImages():
# image list
images = np.zeros((300000, 3, 32, 32))
print("begining loading images")
i = 0
while True:
print(i)
try:
# open a image
imageLabel = i + 1
img_path = "datas/test/" + str(imageLabel) + ".png"
img = Image.open(img_path)
except FileNotFoundError: # 没有该图片或者图片读取完成
break
else:
# transfer image type into numpy
img = np.array(img)
img = torch.from_numpy(img)
img = img.transpose(0, 2)
img = img.transpose(1, 2)
images[i, :, :, :] = img
i += 1
return images
images = loadImages()
np.save("test_images.npy", images)
| rowenci/kaggle-CIFAR-10 | submission/testProcessing.py | testProcessing.py | py | 947 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_nu... |
71968210427 | from django.conf.urls import url
from mainapp import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.view_category, name='category'),
url(r'^search_dictionary/$', views.search_dictionary, name="search_dictionary"),
url(r'^search/$', views.search, name='search'),
url(r'^dictionary/', views.dictionary, name='dictionary'),
url(r'^local_help/', views.local_help, name='local_help'),
url(r'^talk_to_someone/', views.talk_to_someone, name='talk_to_someone'),
url(r'^search_questions/$', views.search_questions, name="search_questions")
] | Gystark/Tech4Justice2016 | mainapp/urls.py | urls.py | py | 637 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mainapp.views.index",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "mainapp.views",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.conf.... |
16172136194 | import numpy as np
from my_function import smooth_curve
from my_cnet import SimpleConvNet
from mnist import load_mnist
from my_optimizer import Adam
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
(x_train,t_train),(x_test,t_test) = load_mnist(flatten=False)
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
"""
epoch 全覆盖次数
mini_batch_size 批处理数据数
train_size 训练数据数
iter_per_epoch 一次全覆盖批处理次数
max_iter 整个训练批处理次数
optimizer 梯度更新选择Adam算法
current_epoch 目前进行的epoch次数
"""
epoch = 20
mini_batch_size = 100
train_size = x_train.shape[0]
iter_per_epoch = max(train_size/mini_batch_size,1)
iter_per_epoch = int(iter_per_epoch)#变为整数
max_iter = epoch*iter_per_epoch
optimizer = Adam(lr = 0.001)
current_epoch = 0
"""
画图参数
"""
train_loss_list = []
train_acc_list = []
test_acc_list = []
print("开始训练请等待...")
for i in range(max_iter):
batch_mask = np.random.choice(train_size,mini_batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch,t_batch)
grads = optimizer.update(network.params,grads)
loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
if i %iter_per_epoch==0 :
current_epoch += 1
#取1000个数据计算正确率(节省时间)
x_train_simple,t_train_simple = x_train[:1000],t_train[:1000]
x_test_sample,t_test_sample = x_test[:1000],t_test[:1000]
train_acc = network.accuracy(x_train_simple,t_train_simple)
test_acc = network.accuracy(x_test_sample,t_test_sample)
if current_epoch == 20 :
cm = confusion_matrix(t_test_sample,np.argmax(network.predict(x_test_sample), axis=1))
cmn = cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
cmn = np.around(cmn,decimals=2)
plt.figure(figsize=(8, 8))
sns.heatmap(cmn, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("=== epoch : "+str(current_epoch)+", train acc:"+str(train_acc)+",test acc:"+str(test_acc)+" ===")
# network.save_parms("params.pkl")
print("训练结束,您的损失函数值已经降低到"+str(train_loss_list[-1])+"下面开始作图")
"""
画图
"""
plt.figure("loss")
x = np.arange(len(train_loss_list))
y = np.array(smooth_curve(train_loss_list))
plt.plot(x,y)
plt.xlabel("mini_batch")
plt.ylabel("loss")
plt.figure("accuracy")
x = np.arange(len(train_acc_list))
y1 = np.array(train_acc_list)
y2 = np.array(test_acc_list)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.plot(x,y1,label="train_accuracy")
plt.plot(x,y2,label="test_accuracy")
plt.legend()
plt.show()
| kang9kang/DL-learning | cnn/my_cnn_train.py | my_cnn_train.py | py | 3,081 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "mnist.load_mnist",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "my_cnet.SimpleConvNet",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "my_optimizer.Adam",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random... |
26043642636 | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import List, cast
from pants.backend.project_info import dependents
from pants.backend.project_info.dependents import Dependents, DependentsRequest
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import resolve_conflicting_options
from pants.engine.addresses import Address, Addresses
from pants.engine.collection import Collection
from pants.engine.internals.graph import Owners, OwnersRequest
from pants.engine.internals.mapper import SpecsFilter
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import UnexpandedTargets
from pants.option.option_types import EnumOption, StrOption
from pants.option.option_value_container import OptionValueContainer
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import help_text
from pants.vcs.git import GitWorktree
class DependentsOption(Enum):
NONE = "none"
DIRECT = "direct"
TRANSITIVE = "transitive"
@dataclass(frozen=True)
class ChangedRequest:
sources: tuple[str, ...]
dependents: DependentsOption
class ChangedAddresses(Collection[Address]):
pass
@rule
async def find_changed_owners(
request: ChangedRequest, specs_filter: SpecsFilter
) -> ChangedAddresses:
no_dependents = request.dependents == DependentsOption.NONE
owners = await Get(
Owners,
OwnersRequest(
request.sources,
# If `--changed-dependents` is used, we cannot eagerly filter out root targets. We
# need to first find their dependents, and only then should we filter. See
# https://github.com/pantsbuild/pants/issues/15544
filter_by_global_options=no_dependents,
# Changing a BUILD file might impact the targets it defines.
match_if_owning_build_file_included_in_sources=True,
),
)
if no_dependents:
return ChangedAddresses(owners)
# See https://github.com/pantsbuild/pants/issues/15313. We filter out target generators because
# they are not useful as aliases for their generated targets in the context of
# `--changed-since`. Including them makes it look like all sibling targets from the same
# target generator have also changed.
#
# However, we also must be careful to preserve if target generators are direct owners, which
# happens when a generated file is deleted.
owner_target_generators = FrozenOrderedSet(
addr.maybe_convert_to_target_generator() for addr in owners if addr.is_generated_target
)
dependents = await Get(
Dependents,
DependentsRequest(
owners,
transitive=request.dependents == DependentsOption.TRANSITIVE,
include_roots=False,
),
)
result = FrozenOrderedSet(owners) | (dependents - owner_target_generators)
if specs_filter.is_specified:
# Finally, we must now filter out the result to only include what matches our tags, as the
# last step of https://github.com/pantsbuild/pants/issues/15544.
#
# Note that we use `UnexpandedTargets` rather than `Targets` or `FilteredTargets` so that
# we preserve target generators.
result_as_tgts = await Get(UnexpandedTargets, Addresses(result))
result = FrozenOrderedSet(
tgt.address for tgt in result_as_tgts if specs_filter.matches(tgt)
)
return ChangedAddresses(result)
@dataclass(frozen=True)
class ChangedOptions:
"""A wrapper for the options from the `Changed` Subsystem.
This is necessary because parsing of these options happens before conventional subsystems are
configured, so the normal mechanisms like `Subsystem.rules()` would not work properly.
"""
since: str | None
diffspec: str | None
dependents: DependentsOption
@classmethod
def from_options(cls, options: OptionValueContainer) -> ChangedOptions:
dependents = resolve_conflicting_options(
old_option="dependees",
new_option="dependents",
old_scope=Changed.options_scope,
new_scope=Changed.options_scope,
old_container=options,
new_container=options,
)
return cls(options.since, options.diffspec, dependents)
@property
def provided(self) -> bool:
return bool(self.since) or bool(self.diffspec)
def changed_files(self, git_worktree: GitWorktree) -> list[str]:
"""Determines the files changed according to SCM/workspace and options."""
if self.diffspec:
return cast(
List[str], git_worktree.changes_in(self.diffspec, relative_to=get_buildroot())
)
changes_since = self.since or git_worktree.current_rev_identifier
return cast(
List[str],
git_worktree.changed_files(
from_commit=changes_since, include_untracked=True, relative_to=get_buildroot()
),
)
class Changed(Subsystem):
options_scope = "changed"
help = help_text(
f"""
Tell Pants to detect what files and targets have changed from Git.
See {doc_url('advanced-target-selection')}.
"""
)
since = StrOption(
default=None,
help="Calculate changes since this Git spec (commit range/SHA/ref).",
)
diffspec = StrOption(
default=None,
help="Calculate changes contained within a given Git spec (commit range/SHA/ref).",
)
dependents = EnumOption(
default=DependentsOption.NONE,
help="Include direct or transitive dependents of changed targets.",
)
dependees = EnumOption(
default=DependentsOption.NONE,
help="Include direct or transitive dependents of changed targets.",
removal_version="2.23.0.dev0",
removal_hint="Use --dependents instead",
)
def rules():
return [*collect_rules(), *dependents.rules()]
| pantsbuild/pants | src/python/pants/vcs/changed.py | changed.py | py | 6,103 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pants.backend.project_info.dependents",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": ... |
11896448439 | from unittest import mock
from django.http import HttpRequest
from google_optimize.utils import _parse_experiments, get_experiments_variants
def test_parses_single_experiment_cookie():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = _parse_experiments(request)
assert experiments == dict(utSuKi3PRbmxeG08en8VNw=1)
def test_parses_multiple_experiment_cookies():
request = HttpRequest()
request.COOKIES[
"_gaexp"
] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1!7IXTpXmLRzKwfU-Eilh_0Q.18166.0"
experiments = _parse_experiments(request)
assert experiments == {"7IXTpXmLRzKwfU-Eilh_0Q": 0, "3x8_BbSCREyqtWm1H1OUrQ": 1}
def test_parses_without_cookie():
request = HttpRequest()
experiments = _parse_experiments(request)
assert experiments is None
@mock.patch("logging.Logger.warning")
def test_logs_missing_gaexp_cookie(logger):
request = HttpRequest()
get_experiments_variants(request, [{"id": "abc"}])
logger.assert_called_with("Missing _ga_exp cookie")
@mock.patch("logging.Logger.error")
def test_logs_no_settings(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "test"
get_experiments_variants(request, None)
logger.assert_called_with("Setting GOOGLE_OPTIMIZE_EXPERIMENTS not defined")
@mock.patch("logging.Logger.error")
def test_logs_failed_cookie_parsing(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "test"
get_experiments_variants(request, [{"id": "abc"}])
logger.assert_called_with("Failed to parse _gaexp %s", "test")
@mock.patch("logging.Logger.warning")
def test_logs_settings_missing_experiment_id(logger):
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1"
get_experiments_variants(request, [{"test": "test"}])
logger.assert_called_with("experiment id not found in experiment settings")
@mock.patch("logging.Logger.warning")
def test_logs_experiment_id_not_in_cookies(logger):
request = HttpRequest()
gaexp = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1"
experiment_id = "test"
request.COOKIES["_gaexp"] = gaexp
get_experiments_variants(request, [{"id": experiment_id}])
logger.assert_called_with(
"experiment id %s not found in experiments cookie %s", experiment_id, gaexp
)
def test_parses_single_experiment():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [
{
"id": "utSuKi3PRbmxeG08en8VNw",
"alias": "redesign",
"variant_aliases": {0: "old_design", 1: "new_design"},
}
]
values = get_experiments_variants(request, experiments)
assert values == {"redesign": "new_design"}
def test_parses_multiple_experiments():
request = HttpRequest()
request.COOKIES[
"_gaexp"
] = "GAX1.2.3x8_BbSCREyqtWm1H1OUrQ.18166.1!7IXTpXmLRzKwfU-Eilh_0Q.18166.0"
experiments = [
{
"id": "3x8_BbSCREyqtWm1H1OUrQ",
"alias": "redesign_page",
"variant_aliases": {0: "old_design", 1: "new_design"},
},
{
"id": "7IXTpXmLRzKwfU-Eilh_0Q",
"alias": "resign_header",
"variant_aliases": {0: "old_header", 1: "new_header"},
},
]
values = get_experiments_variants(request, experiments)
assert values == {"redesign_page": "new_design", "resign_header": "old_header"}
def test_parses_experiments_without_variant_aliases():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [{"id": "utSuKi3PRbmxeG08en8VNw", "alias": "redesign"}]
values = get_experiments_variants(request, experiments)
assert values == {"redesign": 1}
def test_parses_experiments_without_experiment_alias():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiments = [{"id": "utSuKi3PRbmxeG08en8VNw"}]
values = get_experiments_variants(request, experiments)
assert values == {"utSuKi3PRbmxeG08en8VNw": 1}
| danihodovic/django-google-optimize | tests/test_utils.py | test_utils.py | py | 4,156 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.http.HttpRequest",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "google_optimize.utils._parse_experiments",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.http.HttpRequest",
"line_number": 16,
"usage_type": "call"
},
... |
41236219255 | """myProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('students/', include('students.urls')),
path("sers/", include("sers.urls")),
path("model_serializer/", include("model_serializer.urls")),
path("school/", include("school.urls")),
path("req/", include("req.urls")),
path("demo/", include("view.urls")),
path("generic/", include("generic.urls")),
path("mixin/", include("mixin.urls")),
path("mixin-generic/", include("mixin_generic.urls")),
path("viewset/", include("viewset.urls")),
path("generic-viewset/", include("generic_viewset.urls")),
path("mixin-generic-viewset/", include("mixin_generic_viewset.urls")),
path("mixin-generic-viewset-router/", include("mixin_generic_viewset_router.urls")),
path("authenticate_permission/", include("authenticate_permission.urls")),
path("throttle_test/", include("throttle_test.urls")),
path("filter/", include("filter.urls")),
path("pagination/", include("pagination.urls")),
path("exception/", include("exception.urls")),
path("docs/", include_docs_urls(title="站点doc")),
path("docs-drf-yasg/", include("drf_yasg_doc.urls")),
]
| beishangongzi/myProject | myProject/urls.py | urls.py | py | 1,926 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "... |
42862238176 | import pgzrun
import random
import time
import pygame.time
# import pygame
TITLE = "Brickbreaker"
# initial score is 0
# time is use to get the initial time and stores in the variable 'start time'
score = 0
# as ball hits the brick, score changes by 10
score_point = 10
start_time = time.time ()
elapsed_time = 0
# setting the size of the game window and number of bricks per row
WIDTH = 640
HEIGHT = 480
PADDLE_HEIGHT = 1
BRICKS_PER_ROW = 10
# setting the paddle initial position
paddle = Actor("paddlered.png")
paddle.x = 320
paddle.y = 440
# choosing the ball type and setting the initial ball position
ball = Actor("ballgrey.png")
ball.x = 320
ball.y = 340
# setting the initial speed
ball_x_speed = 2
ball_y_speed = 2
bricks = []
# placing the bricks in the screen
current_brick_pos_x = 64 / 2
current_brick_pos_y = 32 / 2
# Brick sprites are 64 by 32
# defining the code for different types of bricks
brick_sprites = ["element_green_rectangle.png", "element_yellow_rectangle.png", "element_red_rectangle.png"]
middle_brick = ["element_grey_rectangle.png"]
# this will be used to check if the game is over or not so that it can be used to restart and set everything back to its orignal position
game_over_box = False
# if we want to display any thing on the screen like ball, paddle, score; it must be written in this function
def draw():
global start_time
screen.fill((100, 149, 237))
paddle.draw()
ball.draw()
for brick in bricks:
brick.draw()
# to draw the score and elapsed time
screen.draw.text("Score: " + str(score), bottomleft=(10, 480), color="red")
update_elapsed_time()
screen.draw.text("Time: " + str(elapsed_time), bottomright = (630, 480), color = "red")
#if game is over it will call game over function to draw the message
game_over()
def update_elapsed_time():
global elapsed_time
global start_time
# this is the main code to checking the time
# first it checks the universal(device) time(which will be our start time)
# so as the game goes on it frequently keeps on subtracting the initial time from latest time
elapsed_time = int(time.time() - start_time)
def update_paddle():
# it will check the mouse coordinates horizontally
# if its horizontal it will move the paddle with the mouse
global paddle
if pygame.mouse.get_rel()[0] != 0:
paddle.x = pygame.mouse.get_pos()[0]
# if the mouse is not moving it will follow the keys
else:
if keyboard.a:
if (paddle.x - 4 > + 52):
paddle.x = paddle.x - 4
if keyboard.d:
if (paddle.x + 4 < 640 - 48):
paddle.x = paddle.x + 4
# updates the position of given parameters
def update_ball():
global ball_x_speed
global ball_y_speed
global score
global game_over_box
ball.x = ball.x + ball_x_speed
ball.y = ball.y + ball_y_speed
# checks weather the ball has hit the side walls
if (ball.x > WIDTH - 16) or (ball.x < 0):
ball_x_speed = ball_x_speed * -1
# checks weather the ball has hit the top or bottom wall, here speed -1 means reverse the direction
if (ball.y > HEIGHT - 16) or (ball.y < 0):
ball_y_speed = ball_y_speed * -1
# checks weather the ball had collide with paddle, if yes reverse the direction
if ball.colliderect(paddle):
ball_y_speed = ball_y_speed * -1
# checks the ball position, if collided at the bottom, the condition gave over becomes true
# which will draw the game over sign in the screen
if (ball.y > HEIGHT - 16):
game_over_box = True
for brick in bricks:
# checks the condition if ball collide with the bricks,the bricks gets removed
# and speed becomes -1, ball returns
# score increases by 10
if ball.colliderect(brick):
bricks.remove(brick)
ball_y_speed = ball_y_speed * -1
score = score + score_point
def update():
update_paddle()
update_ball()
update_elapsed_time()
# this function is used to create the row of bricks with the given sprite and position
def place_brick_row(sprite, pos_x, pos_y):
any_brick = BRICKS_PER_ROW // 2
for i in range(BRICKS_PER_ROW):
brick = Actor(sprite)
brick.x = pos_x + i * 64
brick.y = pos_y
if i == any_brick:
any_brick = random.choice(middle_brick)
brick.image = any_brick
bricks.append(brick)
for brick_sprite in brick_sprites:
place_brick_row(brick_sprite, current_brick_pos_x, current_brick_pos_y)
current_brick_pos_y += 32
def game_over():
if game_over_box:
message = "Game Over"
restart_game = "Press Enter to Restart"
message_width = len(message) * 30
message_height = 50
# draws the message in the screen game over and want to restart
screen.draw.filled_rect(
Rect(WIDTH / 2 - message_width / 2, HEIGHT / 2 - message_height / 2, message_width, message_height),
(255, 0, 0))
screen.draw.text(message, center=(WIDTH / 2, HEIGHT / 2), fontsize=40, color="white")
screen.draw.text(restart_game, center=(WIDTH / 2, HEIGHT / 1.5), fontsize=40, color="white")
# if user press enter it will call restart function
if keyboard.RETURN:
restart()
# reset everything back as usual
def restart():
global score, ball_x_speed, ball_y_speed, game_over_box, current_brick_pos_x, current_brick_pos_y, bricks, start_time, elapsed_time
score = 0
start_time = time.time()
elapsed_time = 0
ball.x = 320
ball.y = 340
ball_x_speed = 2
ball_y_speed = 2
paddle.x = 320
paddle.y = 440
bricks = []
current_brick_pos_x = 64 / 2
current_brick_pos_y = 32 / 2
current_brick_pos_y = 32 / 2
for brick_sprite in brick_sprites:
place_brick_row(brick_sprite, current_brick_pos_x, current_brick_pos_y)
current_brick_pos_y += 32
game_over_box = False
pgzrun.go() | Nirrdsh/py-game | Assignment.py | Assignment.py | py | 6,033 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pygame.time.mouse.get_rel",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame.time.mouse",
... |
32177625426 | from unittest import mock
from itertools import product
import pytest
@pytest.mark.parametrize(
'user_agent, session',
product(
[None, mock.Mock()],
[None, mock.Mock()]
)
)
def test_init(user_agent, session):
with mock.patch('Raitonoberu.raitonoberu.aiohttp') as m_aio:
from Raitonoberu.raitonoberu import Raitonoberu
# run
obj = Raitonoberu(user_agent, session)
# test
if user_agent is None:
obj.headers == {"User-Agent": "Raitonoberu"}
else:
obj.headers == user_agent
if session is None:
obj.session == m_aio.ClientSession.return_value
m_aio.ClientSession.assert_called_once_with(headers=obj.headers)
else:
obj.session == session
def test_del():
session = mock.Mock()
with mock.patch('Raitonoberu.raitonoberu.Raitonoberu.__init__', return_value=None):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
obj.session = session
# run
del obj
# test
session.close.assert_called_once_with()
@pytest.mark.asyncio
@pytest.mark.parametrize('term', ['term'])
async def test_get_search_page(term):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_search_page(term=term)
# test
# the actual result with 'term' as input is
# 'http://www.novelupdates.com/series/the-last-apostle/'
assert res.startswith('http://www.novelupdates.com/series/')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
(
'smiling proud wanderer',
{
'aliases': [
'Laughing in the Wind',
'State of Divinity',
'The Peerless Gallant Errant',
'The Proud and Gallant Wanderer',
'Xiao Ao Jiang Hu',
'笑傲江湖'
],
'artists': None,
'authors': ['Jin Yong'],
'completely_translated': True,
'cover': 'http://cdn.novelupdates.com/images/2017/02/IMG_2801.jpg',
'description': (
'The Smiling, Proud Wanderer is a wuxia novel by Jin Yong (Louis Cha). '
'It was first serialised in Hong Kong in the newspaper Ming Pao '
'from 20 April 1967 to 12 October 1969. The Chinese title of the novel, '
'Xiao Ao Jiang Hu, '
'literally means to live a carefree life in a mundane world of strife. '
'Alternate English translations of the title include '
'Laughing in the Wind, '
'The Peerless Gallant Errant, and The Proud and Gallant Wanderer. '
'Another alternative title, State of Divinity, '
'is used for some of the novel’s adaptations.'
),
'english_publisher': None,
'genre': ['Action', 'Adventure', 'Martial Arts', 'Wuxia'],
'language': 'Chinese',
'licensed': False,
'link': 'http://www.novelupdates.com/series/smiling-proud-wanderer/',
'novel_status': '4 Volumes (Completed)\n40 Chapters (Completed)',
'publisher': 'Ming Pao',
'related_series': None,
'tags': [
'Adapted To Drama', 'Adapted to Manhua', 'Adapted To Movie', 'Betrayal',
'Misunderstandings', 'Politics', 'Revenge', 'Special Abilities'
],
'title': 'Smiling Proud Wanderer',
'type': 'Chinese Novel',
'year': '1967'
}
)
]
)
async def test_get_first_search_result(term, exp_res):
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
assert res == exp_res
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
(
'I shall seal the heavens',
[
'Xian Ni (Shared Universe)',
'Beseech The Devil (Shared Universe)',
'Against Heaven (Shared Universe)',
'A Will Eternal (Shared Universe)'
]
),
('Curing incurable diseases with semen', None),
(
'S.A.O.',
[
'Sword Art Online Alternative – Gun Gale Online (Spin-Off)',
'Sword Art Online – Progressive (Spin-Off)',
'Mahouka Koukou no Rettousei x Sword Art Online (Spin-Off)',
'Sword Art Online Alternative – Clover’s Regret (Spin-Off)',
]
),
]
)
async def test_related_series(term, exp_res):
"""test related series category."""
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
res['related_series'] == exp_res
@pytest.mark.asyncio
@pytest.mark.parametrize(
'term, exp_res',
[
['Curing incurable diseases with semen', None],
['S.A.O.', 'Yen Press'],
['I shall seal the heavens', None],
]
)
async def test_english_publisher(term, exp_res):
"""test related series category."""
from Raitonoberu.raitonoberu import Raitonoberu
obj = Raitonoberu()
# run
res = await obj.get_first_search_result(term=term)
# test
res['english_publisher'] == exp_res
| byronvanstien/Raitonoberu | tests/test_raitonoberu.py | test_raitonoberu.py | py | 5,643 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "unittest.mock.patch",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "Raitonoberu.raitonoberu.Raitonoberu",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.