id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6672346 | from flask import Flask, render_template, jsonify, redirect, request, send_from_directory
from lxml import html
from bs4 import BeautifulSoup as bs
from bson.json_util import dumps
from re import sub
from decimal import Decimal
import requests
import json
import pymongo
import pandas as pd
import numpy as np
import csv
import os
import io
app = Flask(__name__)
# search for house for sale in the provided city
# example kansas-city_rb, overland-park_rb
def get_zillow_data(city):
# use Mongo db
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.zillow
collection = db.listings
# remove old zillow data
collection.drop()
collection = db.listings
# set valid request headers
req_headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
}
# get data from this url
with requests.Session() as s:
url = 'https://www.zillow.com/homes/for_sale/' + city + '/?fromHomePage=true&shouldFireSellPageImplicitClaimGA=false&fromHomePageTab=buy'
r = s.get(url, headers=req_headers)
# display return code 200=good
print(r)
# scrape the content
soup = bs(r.content, 'lxml')
price = soup.find_all('span', {'class': 'zsg-photo-card-price'})
info = soup.find_all('span', {'class': 'zsg-photo-card-info'})
address = soup.find_all('span', {'itemprop': 'address'})
price_listing = []
bedroom_listing = []
bathroom_listing = []
sqft_listing = []
description_listing = []
for index, value in enumerate(price):
try:
price_value = int('{:.0f}'.format(Decimal(sub(r'[^\d.]', '', value.text))))
print("price = ", price_value)
price_listing.append(price_value)
except:
print("price data error")
price_value = 0
info_value = info[index].text.split(" ")
print(len(info_value))
try:
bedroom_count = int(float(info_value[0].split(" ")[0]))
print("bed rooms = ", bedroom_count)
bedroom_listing.append(bedroom_count)
except:
print("bedroom data error")
bedroom_count = 0
try:
bathroom_count = int(info_value[3].split(" ")[0])
print("bath rooms = ", bathroom_count)
bathroom_listing.append(bathroom_count)
except:
print("bathroom data error")
bathroom_count = 0
try:
sqft_count = int('{:.0f}'.format(Decimal(sub(r'[^\d.]', '', info_value[6].split(" ")[0]))))
print("sqft = ", sqft_count)
sqft_listing.append(sqft_count)
except:
print("sqft data error")
sqft_count = 0
description_listing.append(str(bedroom_count) + " bds " + str(bathroom_count) + " ba: " + address[index].text)
each_listing = {"price": price_value, "bedroom": bedroom_count, "bathroom": bathroom_count, "sqft": sqft_count, "address": address[index].text}
collection.insert_one(each_listing)
data = [{
"x": price_listing,
"y": sqft_listing,
"mode": "markers",
"text": description_listing,
"marker": {
"color": "rgb(219, 64, 82)",
"size": 20,
"line": {
"color": "white",
"width": 0.5
}
},
"type": "scatter"
}]
return jsonify(data)
# calculate KC graduation rate in 2016
def get_graduation_rate():
csv_path = os.path.join("data", "KCDataSet3.csv")
kcdata = pd.read_csv(csv_path)
kcdata = kcdata.replace(0, np.NaN)
graduation_rate = kcdata["Graduation Rate"].mean()
rate = "%.2f" % round(graduation_rate,2)
return rate
# home page
@app.route("/")
def home():
return send_from_directory("templates", "index.html")
# get graduation rate for the dashboard
@app.route("/dashboard")
def dashboard_data():
return get_graduation_rate()
# build data for the plotly chart from Zillow housing data
@app.route("/zillow")
def get_data():
city = "kansas-city_rb"
return get_zillow_data(city)
@app.route('/<path:path>')
def send_static_file(path):
return send_from_directory("", path)
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
5111716 | import sqlalchemy as sqlalchemy
from kafka import KafkaConsumer, TopicPartition
from json import loads
from contextlib import closing
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import psycopg2
from sqlalchemy import Integer, String, Column, Sequence, create_engine, table, VARCHAR
class XactionConsumer:
def __init__(self):
self.consumer = KafkaConsumer('bank-customer-events',
bootstrap_servers=['localhost:9092'],
# auto_offset_reset='earliest',
value_deserializer=lambda m: loads(m.decode('ascii')))
## These are two python dictionarys
# Ledger is the one where all the transaction get posted
self.ledger = {}
# custBalances is the one where the current blance of each customer
# account is kept.
self.custBalances = {}
self.limit = -5000
# self.wth_sum = 0
# self.dep_sum = 0
self.total = 0
# THE PROBLEM is every time we re-run the Consumer, ALL our customer
# data gets lost!
# add a way to connect to your database here.
self.engine = create_engine('postgresql+psycopg2://rich:Coder20!@localhost:5432/kafka',
encoding='latin1', echo=True)
self.conn = self.engine.connect()
#Go back to the readme.
def handleMessages(self):
for message in self.consumer:
message = message.value
print('{} received'.format(message))
self.ledger[message['custid']] = message
# add message to the transaction table in your SQL usinf SQLalchemy
if message['custid'] not in self.custBalances:
self.custBalances[message['custid']] = 0
if message['type'] == 'dep':
self.custBalances[message['custid']] += message['amt']
self.total += message['amt']
if message['type'] == 'wth' and self.total < self.limit:
print(f"amt total:{self.total} is less than or equal to the limit of -5000:")
# if message['type'] == 'wth' and self.custBalances[message['custid']] < self.limit:
# print('withdraw limit reach')
else:
self.custBalances[message['custid']] -= message['amt']
self.total -= message['amt']
# print(self.custBalances)
# for self.custBalances[message['custid']] in self.custBalances:
# if message['amt'] <= self.limit:
# print(f"cust. id {message['custid']}: minimum balance exceeded")
# for key, value in self.custBalances.items():
# if value <= self.limit:
# print(f"amt total:{value} is less than or equal to the limit of -5000:")
# print(key, value)
if __name__ == "__main__":
c = XactionConsumer()
c.handleMessages() | StarcoderdataPython |
33833 | #python -m marbles test_semantic_columns.py
import unittest
from marbles.mixins import mixins
import pandas as pd
import requests
from pyspark.sql import SparkSession
import psycopg2 as pg
import pandas as pd
import marbles
from pyspark.sql.types import StructType, StructField, StringType
import psycopg2 as pg
#from src.features.build_features import crear_features
from src import(
MY_USER,
MY_PASS,
MY_HOST,
MY_PORT,
MY_DB,
)
def get_clean_data_test():
clean_rita = StructType([StructField('year', StringType(), True),
StructField('quarter', StringType(), True),
StructField('month', StringType(), True),
StructField('dayofmonth', StringType(), True),
StructField('dayofweek', StringType(), True),
StructField('flightdate', StringType(), True),
StructField('reporting_airline', StringType(), True),
StructField('dot_id_reporting_airline', StringType(), True),
StructField('iata_code_reporting_airline', StringType(), True),
StructField('tail_number', StringType(), True),
StructField('flight_number_reporting_airline', StringType(), True),
StructField('originairportid', StringType(), True),
StructField('originairportseqid', StringType(), True),
StructField('origincitymarketid', StringType(), True),
StructField('origin', StringType(), True),
StructField('origincityname', StringType(), True),
StructField('originstate', StringType(), True),
StructField('originstatefips', StringType(), True),
StructField('originstatename', StringType(), True),
StructField('originwac', StringType(), True),
StructField('destairportid', StringType(), True),
StructField('destairportseqid', StringType(), True),
StructField('destcitymarketid', StringType(), True),
StructField('dest', StringType(), True),
StructField('destcityname', StringType(), True),
StructField('deststate', StringType(), True),
StructField('deststatefips', StringType(), True),
StructField('deststatename', StringType(), True),
StructField('destwac', StringType(), True),
StructField('crsdeptime', StringType(), True),
StructField('deptime', StringType(), True),
StructField('depdelay', StringType(), True),
StructField('depdelayminutes', StringType(), True),
StructField('depdel15', StringType(), True),
StructField('departuredelaygroups', StringType(), True),
StructField('deptimeblk', StringType(), True),
StructField('taxiout', StringType(), True),
StructField('wheelsoff', StringType(), True),
StructField('wheelson', StringType(), True),
StructField('taxiin', StringType(), True),
StructField('crsarrtime', StringType(), True),
StructField('arrtime', StringType(), True),
StructField('arrdelay', StringType(), True),
StructField('arrdelayminutes', StringType(), True),
StructField('arrdel15', StringType(), True),
StructField('arrivaldelaygroups', StringType(), True),
StructField('arrtimeblk', StringType(), True),
StructField('cancelled', StringType(), True),
StructField('diverted', StringType(), True),
StructField('crselapsedtime', StringType(), True),
StructField('actualelapsedtime', StringType(), True),
StructField('airtime', StringType(), True),
StructField('flights', StringType(), True),
StructField('distance', StringType(), True),
StructField('distancegroup', StringType(), True),
StructField('divairportlandings', StringType(), True),
StructField('rangoatrasohoras', StringType(), True)
])
config_psyco = "host='{0}' dbname='{1}' user='{2}' password='{3}'".format(MY_HOST,MY_DB,MY_USER,MY_PASS)
connection = pg.connect(config_psyco)
pdf = pd.read_sql_query('select * from clean.rita limit 1;',con=connection)
spark = SparkSession.builder.config('spark.driver.extraClassPath', 'postgresql-9.4.1207.jar').getOrCreate()
df = spark.createDataFrame(pdf, schema=clean_rita)
return df
def crear_features_test(base):
from pyspark.sql import functions as f
base = base.withColumn('findesemana', f.when(f.col('dayofweek') == 5, 1).when(f.col('dayofweek') == 6, 1).when(f.col('dayofweek') == 7, 1).otherwise(0))
base = base.withColumn('quincena', f.when(f.col('dayofmonth') == 15, 1).when(f.col('dayofmonth') == 14, 1).when(f.col('dayofmonth') == 16, 1).when(f.col('dayofmonth') == 29, 1).when(f.col('dayofmonth') == 30, 1).when(f.col('dayofmonth') == 31, 1).when(f.col('dayofmonth') == 1, 1).when(f.col('dayofmonth') == 2, 1).when(f.col('dayofmonth') == 3, 1).otherwise(0))
base = base.withColumn('dephour', f.when(f.col('dayofweek') == 5, 1).otherwise(0))
base = base.withColumn('seishoras', f.when(f.col('dephour') == 6, 1).when(f.col('dephour') == 12, 1).when(f.col('dephour') == 18, 1).when(f.col('dephour') == 0, 1).otherwise(0))
return base
| StarcoderdataPython |
11370092 | import tempfile
import cv2, win32gui, win32con, win32api, numpy as np
import time
from typing import Union, List
from pathlib import Path
from PIL import ImageGrab
class Point2D:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
def to_tuple(self) -> tuple:
return (self.x,self.y)
class Region:
def __init__(self, x: int, y: int, x2: int, y2: int):
self.x = x
self.y = y
self.x2 = x2
self.y2 = y2
def to_tuple(self) -> tuple:
return (self.x, self.y, self.x2, self.y2)
class RGB:
def __init__(self, red: int, green: int, blue:int):
self.r = red
self.g = green
self.b = blue
def to_color_ref(self) -> int:
return int('%02x%02x%02x' % (self.b,self.g,self.r), 16)
class QAutoRPAImage():
def __init__(self):
pass
@classmethod
def find_image(cls, path_to_image: Union[Path,str]) -> Union[Region, None]:
"""[summary]
Args:
path_to_image (Union[Path,str]): [description]
Returns:
Union[Region, None]: [description]
"""
outline = None
# Tempfile, koska en keksinyt miten tehdä muokkaukset binäärinä
tmpfile = tempfile.NamedTemporaryFile(suffix='.png')
tmpfile.close()
# Screenshot. Tän voinee tehdä myös binäärinä ilman tallennusta...
scrshot = ImageGrab.grab()
tmp = cv2.cvtColor(np.array(scrshot), cv2.COLOR_RGB2BGR)
cv2.imwrite(tmpfile.name, tmp)
img_rgb = cv2.imread(tmpfile.name, cv2.IMREAD_GRAYSCALE)
template = cv2.imread(path_to_image, cv2.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]
result = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.9)
if len(list(zip(*loc))) <= 0:
return None
# Jos nyt koitetaan eka vaan yhdellä.
for pt in zip(*loc[::-1]):
outline = Region(pt[0], pt[1], pt[0] + w, pt[1] + h)
break
cls.draw_rectangle(outline)
Path(tmpfile.name).unlink()
return outline
@classmethod
def get_center_point(cls, outline: Region) -> Point2D:
"""[summary]
Args:
outline (Region): [description]
Returns:
Point2D: [description]
"""
return Point2D(int((outline.x + outline.x2) / 2), int((outline.y + outline.y2) / 2))
@classmethod
def draw_line(cls, points: List[Point2D], color: RGB = None) -> None:
"""[summary]
Args:
points (List[Point2D]): [description]
color (RGB, optional): [description]. Defaults to None.
"""
if not color:
color = RGB(0, 255, 0)
# GetDC(hwnd), jos haluaa nimenomaan tietylle ikkunalle...
dc = win32gui.GetDC(0)
pen = win32gui.CreatePen(win32con.PS_SOLID, 2, color.to_color_ref())
win32gui.SelectObject(dc, pen)
lista = [p.to_tuple() for p in points]
win32gui.Polyline(dc, lista)
win32gui.DeleteObject(pen)
win32gui.DeleteDC(dc)
@classmethod
def draw_rectangle(cls, region: Region, color: RGB = None) -> None:
"""Draws a colour bordered transparent rectangle around given region
Args:
region (Region): [description]
color (RGB, optional): [description]. Defaults to None.
"""
if not color:
color = RGB(0, 255, 0)
dc = win32gui.GetDC(0)
pen = win32gui.CreatePen(win32con.PS_SOLID, 2, color.to_color_ref())
brush = win32gui.CreateBrushIndirect({'Style': win32con.BS_NULL, 'Color': -1, 'Hatch': win32con.HS_DIAGCROSS})
win32gui.SelectObject(dc, pen)
win32gui.SelectObject(dc, brush)
win32gui.Rectangle(dc, *region.to_tuple())
win32gui.DeleteObject(pen)
win32gui.DeleteObject(brush)
win32gui.DeleteDC(dc)
@classmethod
def draw_focus_rectangle(cls, region: Region) -> None:
"""Draw a highlight region around given region
Args:
region (Region): [description]
"""
dc = win32gui.GetDC(0)
pen = win32gui.CreatePen(win32con.PS_SOLID, 2, 0)
win32gui.DrawFocusRect(dc, region.to_tuple())
win32gui.SelectObject(dc, pen)
win32gui.DeleteDC(dc)
@classmethod
def draw_ellipse(cls, region: Region, color: RGB = None) -> None:
"""Draws a colored ellipse around given region
Args:
region (Region): [description]
color (RGB, optional): [description]. Defaults to None.
"""
if not color:
color = RGB(0, 255, 0)
dc = win32gui.GetDC(0)
pen = win32gui.CreatePen(win32con.PS_SOLID, 2, color.to_color_ref())
brush = win32gui.CreateBrushIndirect({'Style': win32con.BS_NULL, 'Color': -1, 'Hatch': win32con.HS_DIAGCROSS})
win32gui.SelectObject(dc, pen)
win32gui.SelectObject(dc, brush)
win32gui.Ellipse(dc, *region.to_tuple())
win32gui.DeleteObject(pen)
win32gui.DeleteObject(brush)
win32gui.DeleteDC(dc)
@classmethod
def wait_for_image(cls, image, timeout=30):
"""Waits for image and returns center point
Args:
image (path): [description] path to image file
timeout (int, optional): [description]. default timeout 30
"""
for x in range(timeout):
time.sleep(1)
found_image = cls.find_image(image)
if found_image is not None:
return cls.get_center_point(found_image)
raise TimeoutError(f"Image: '{image}' was not found")
@classmethod
def click_image(cls, image, timeout=30):
"""Left clicks at center of image
Args:
image (path): [description] path to image file
timeout (int, optional): [description]. default timeout 30
"""
point = cls.wait_for_image(image, timeout)
x, y = point.x, point.y
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
@classmethod
def double_click_image(cls, image, timeout=30):
"""Double clicks at center of image
Args:
image (path): [description] path to image file
timeout (int, optional): [description]. default timeout 30
"""
point = cls.wait_for_image(image, timeout)
x, y = point.x, point.y
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
@classmethod
def right_click_image(cls, image, timeout=30):
"""Right clicks at center of image
Args:
image (path): [description] path to image file
timeout (int, optional): [description]. default timeout 30
"""
point = cls.wait_for_image(image, timeout)
x, y = point.x, point.y
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, x, y, 0, 0)
| StarcoderdataPython |
88475 | from typing import List
class Solution:
def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:
l = len(multipliers)
def dfs(i, j, idx=0, memo={}):
key = f'{<KEY>
if key in memo:
return memo[key]
if idx >= len(multipliers):
return 0
res = max(nums[i] * multipliers[idx] + dfs(i + 1, j, idx + 1, memo),
nums[j] * multipliers[idx] + dfs(i, j - 1, idx + 1, memo))
memo[key] = res
return res
return dfs(0, len(nums) - 1)
def maximumScore_dp(self, nums: List[int], muls: List[int]) -> int:
l = len(nums)
dp = [[0 for _ in range(l)] for _ in range(l)]
def getScore(i, j, idx=0):
if idx >= len(muls):
return 0
if dp[i][j] == 0:
dp[i][j] = max(nums[i] * muls[idx] + getScore(i + 1, j, idx + 1),
nums[j] * muls[idx] + getScore(i, j - 1, idx + 1))
return dp[i][j]
getScore(0, l - 1)
return dp[0][-1]
nums = [-5, -3, -3, -2, 7, 1]
multipliers = [-10, -5, 3, 4, 6]
s = Solution()
print(s.maximumScore(nums, multipliers))
print(s.maximumScore_dp(nums, multipliers))
| StarcoderdataPython |
6472724 | <filename>tests/test_numpy.py
#!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import climetlab as cml
"""
Test if a numpy array can be plotted using another field as metadata,
i.e. for bounding box, style, etc.
"""
# def test_numpy_grib():
# s = cml.load_source("file", "docs/examples/test.grib")
# x = s.to_xarray()
# cml.plot_map(x.msl.values, metadata=s[1])
# def test_numpy_netcdf():
# s = cml.load_source("file", "docs/examples/test.nc")
# x = s.to_xarray()
# cml.plot_map(x.msl.values, metadata=s[1])
def test_numpy_xarray():
s = cml.load_source("file", "docs/examples/test.nc")
x = s.to_xarray()
cml.plot_map(x.msl.values, metadata=x.msl)
if __name__ == "__main__":
for k, f in sorted(globals().items()):
if k.startswith("test_") and callable(f):
print(k)
f()
| StarcoderdataPython |
5143198 | # -*- coding: utf-8 -*-
"""
Author: <NAME> <<EMAIL>>
License: MIT
"""
import numpy as np
import seaborn as sns
if __name__ == "__main__":
# Parameters
dtype = "float64"
max_iter, n_dim = 10000, 2
models_file = "../output/models_%d_%d.bin" % (max_iter, n_dim)
energy_file = "../output/energy_%d.bin" % max_iter
# Load files
models = np.fromfile(models_file, dtype = dtype).reshape((max_iter, n_dim), order = "F")
energy = np.fromfile(energy_file, dtype = dtype)
# Plot
sns.set_style("ticks")
sns.jointplot(models[:,0], models[:,1], kind = "kde") | StarcoderdataPython |
6431962 | <gh_stars>0
#----------------------------------Import modules------------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Input
from keras import optimizers, regularizers
from sklearn.metrics import confusion_matrix,accuracy_score, roc_curve, auc
from sklearn import preprocessing
sns.set_style("whitegrid")
np.random.seed(697)
#Import data
df = pd.read_excel('dataset.xls', header = 1)
df = df.rename(columns = {'default payment next month': 'Default'})
#---------------------------------Pre-processing--------------------------------
#Check for missing values
df.isnull().sum() #No missing values thus no imputations needed
#Drop unneeded variables
df = df.drop(['ID'], axis = 1)
#Encode categorical variables to ONE-HOT
print('Converting categorical variables to numeric...')
categorical_columns = ['SEX', 'EDUCATION', 'MARRIAGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']
df = pd.get_dummies(df, columns = categorical_columns)
#Scale variables to [0,1] range
columns_to_scale = ['LIMIT_BAL', 'AGE', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5'
, 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
df[columns_to_scale]=df[columns_to_scale].apply(lambda x: (x-x.min())/(x.max()-x.min()))
#Split in 75% train and 25% test set
train, test_df = train_test_split(df, test_size = 0.15, random_state= 1984)
train_df, dev_df = train_test_split(train, test_size = 0.15, random_state= 1984)
# Check distribution of labels in train and test set
train_df.Default.sum()/train_df.shape[0] #0.2210
dev_df.Default.sum()/dev_df.shape[0] #0.2269
test_df.Default.sum()/test_df.shape[0] #0.2168
# Define the final train and test sets
train_y = train_df.Default
dev_y = dev_df.Default
test_y = test_df.Default
train_x = train_df.drop(['Default'], axis = 1)
dev_x = dev_df.drop(['Default'], axis = 1)
test_x = test_df.drop(['Default'], axis = 1)
train_x =np.array(train_x)
dev_x =np.array(dev_x)
test_x = np.array(test_x)
train_y = np.array(train_y)
dev_y = np.array(dev_y)
test_y = np.array(test_y)
#------------------------------------Build the AutoEncoder------------------------------------
# Choose size of our encoded representations (we will reduce our initial features to this number)
encoding_dim = 16
# Define input layer
input_data = Input(shape=(train_x.shape[1],))
# Define encoding layer
encoded = Dense(encoding_dim, activation='elu')(input_data)
# Define decoding layer
decoded = Dense(train_x.shape[1], activation='sigmoid')(encoded)
# Create the autoencoder model
autoencoder = Model(input_data, decoded)
#Compile the autoencoder model
autoencoder.compile(optimizer='adam',
loss='binary_crossentropy')
#Fit to train set, validate with dev set and save to hist_auto for plotting purposes
hist_auto = autoencoder.fit(train_x, train_x,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(dev_x, dev_x))
# Summarize history for loss
plt.figure()
plt.plot(hist_auto.history['loss'])
plt.plot(hist_auto.history['val_loss'])
plt.title('Autoencoder model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# Create a separate model (encoder) in order to make encodings (first part of the autoencoder model)
encoder = Model(input_data, encoded)
# Create a placeholder for an encoded input
encoded_input = Input(shape=(encoding_dim,))
# Retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# Create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
# Encode and decode our test set (compare them vizually just to get a first insight of the autoencoder's performance)
encoded_x = encoder.predict(test_x)
decoded_output = decoder.predict(encoded_x)
#--------------------------------Build new model using encoded data--------------------------
#Encode data set from above using the encoder
encoded_train_x = encoder.predict(train_x)
encoded_test_x = encoder.predict(test_x)
model = Sequential()
model.add(Dense(16, input_dim=encoded_train_x.shape[1],
kernel_initializer='normal',
#kernel_regularizer=regularizers.l2(0.02),
activation="relu"
)
)
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss="binary_crossentropy", optimizer='adam')
history = model.fit(encoded_train_x, train_y, validation_split=0.2, epochs=10, batch_size=64)
# Summarize history for loss
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Encoded model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
#---------------------------------Predictions and visuallizations-----------------------
#Predict on test set
predictions_NN_prob = model.predict(encoded_test_x)
predictions_NN_prob = predictions_NN_prob[:,0]
predictions_NN_01 = np.where(predictions_NN_prob > 0.5, 1, 0) #Turn probability to 0-1 binary output
#Print accuracy
acc_NN = accuracy_score(test_y, predictions_NN_01)
print('Overall accuracy of Neural Network model:', acc_NN)
#Print Area Under Curve
false_positive_rate, recall, thresholds = roc_curve(test_y, predictions_NN_prob)
roc_auc = auc(false_positive_rate, recall)
plt.figure()
plt.title('Receiver Operating Characteristic (ROC)')
plt.plot(false_positive_rate, recall, 'b', label = 'AUC = %0.3f' %roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1], [0,1], 'r--')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out (1-Specificity)')
plt.show()
#Print Confusion Matrix
cm = confusion_matrix(test_y, predictions_NN_01)
labels = ['No Default', 'Default']
plt.figure(figsize=(8,6))
sns.heatmap(cm,xticklabels=labels, yticklabels=labels, annot=True, fmt='d', cmap="Blues", vmin = 0.2);
plt.title('Confusion Matrix')
plt.ylabel('True Class')
plt.xlabel('Predicted Class')
plt.show()
| StarcoderdataPython |
4984243 | <filename>cogs/commands/info/stats.py
import datetime
import os
import platform
import traceback
from math import floor
import discord
import humanize
import psutil
from asyncio import sleep
from discord.ext import commands
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.start_time = datetime.datetime.now()
@commands.guild_only()
@commands.command(name="roleinfo")
async def roleinfo(self, ctx: commands.Context, role: discord.Role) -> None:
"""Get number of users of a role
Example usage
-------------
`!roleinfo <@role/ID>`
Parameters
----------
role : discord.Role
Role to get info of
"""
bot_chan = self.bot.settings.guild().channel_botspam
if not self.bot.settings.permissions.hasAtLeast(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
raise commands.BadArgument(
f"Command only allowed in <#{bot_chan}>")
embed = discord.Embed(title="Role Statistics")
embed.description = f"{len(role.members)} members have role {role.mention}"
embed.color = role.color
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.message.reply(embed=embed)
@commands.guild_only()
@commands.command(name="ping")
async def ping(self, ctx: commands.Context) -> None:
"""Pong
Example usage:
`!ping`
"""
bot_chan = self.bot.settings.guild().channel_botspam
if not self.bot.settings.permissions.hasAtLeast(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
raise commands.BadArgument(
f"Command only allowed in <#{bot_chan}>")
b = datetime.datetime.utcnow()
embed = discord.Embed(
title=f"Pong!", color=discord.Color.blurple())
embed.set_thumbnail(url=self.bot.user.avatar_url)
embed.description = "Latency: testing..."
m = await ctx.message.reply(embed=embed)
ping = floor((datetime.datetime.utcnow() - b).total_seconds() * 1000)
await sleep(1)
embed.description = f"Latency: {ping} ms"
await m.edit(embed=embed)
@commands.guild_only()
@commands.command(name="stats")
async def stats(self, ctx: commands.Context) -> None:
"""Statistics about the bot
Example usage:
`!stats`
"""
bot_chan = self.bot.settings.guild().channel_botspam
if not self.bot.settings.permissions.hasAtLeast(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
raise commands.BadArgument(
f"Command only allowed in <#{bot_chan}>")
process = psutil.Process(os.getpid())
diff = datetime.datetime.now() - self.start_time
diff = humanize.naturaldelta(diff)
embed = discord.Embed(
title=f"{self.bot.user.name} Statistics", color=discord.Color.blurple())
embed.set_thumbnail(url=self.bot.user.avatar_url)
embed.add_field(name="Uptime", value=diff)
embed.add_field(name="CPU Usage", value=f"{psutil.cpu_percent()}%")
embed.add_field(name="Memory Usage",
value=f"{floor(process.memory_info().rss/1000/1000)} MB")
embed.add_field(name="Python Version", value=platform.python_version())
await ctx.message.reply(embed=embed)
@commands.guild_only()
@commands.command(name="serverinfo")
async def serverinfo(self, ctx: commands.Context) -> None:
"""Displays info about the server
Example usage:
`!serverinfo`
"""
bot_chan = self.bot.settings.guild().channel_botspam
if not self.bot.settings.permissions.hasAtLeast(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
raise commands.BadArgument(
f"Command only allowed in <#{bot_chan}>")
guild = ctx.guild
embed = discord.Embed(title="Server Information")
embed.color = discord.Color.blurple()
embed.set_thumbnail(url=guild.icon_url)
embed.add_field(name="Region", value=guild.region, inline=True)
embed.add_field(name="Boost Tier",
value=guild.premium_tier, inline=True)
embed.add_field(name="Users", value=guild.member_count, inline=True)
embed.add_field(name="Channels", value=len(
guild.channels) + len(guild.voice_channels), inline=True)
embed.add_field(name="Roles", value=len(guild.roles), inline=True)
embed.add_field(name="Owner", value=guild.owner.mention, inline=True)
embed.add_field(name="Created", value=guild.created_at.strftime(
"%B %d, %Y, %I:%M %p"), inline=True)
embed.set_footer(text=f"Requested by {ctx.author}")
await ctx.message.reply(embed=embed)
@serverinfo.error
@roleinfo.error
@stats.error
@ping.error
async def info_error(self, ctx, error):
await ctx.message.delete(delay=5)
if (isinstance(error, commands.MissingRequiredArgument)
or isinstance(error, commands.BadArgument)
or isinstance(error, commands.BadUnionArgument)
or isinstance(error, commands.MissingPermissions)
or isinstance(error, commands.NoPrivateMessage)):
await self.bot.send_error(ctx, error)
else:
await self.bot.send_error(ctx, "A fatal error occured. Tell <@109705860275539968> about this.")
traceback.print_exc()
def setup(bot):
bot.add_cog(Stats(bot))
| StarcoderdataPython |
9662069 | <reponame>Anphisa/city-scrapers
# THIS IS JUST A MIXIN. IT MAY USE THINGS THAT ARE NOT ACTUALLY USABLE YET,
# BUT IT WILL BE INTEGRATED INTO A REGULAR AGENCY SPIDER.
# -*- coding: utf-8 -*-
from datetime import datetime
from urllib.parse import urljoin
from dateutil.parser import parse as dateparse
from city_scrapers.constants import CANCELED, COMMITTEE
class WayneCommissionMixin:
timezone = 'America/Detroit'
allowed_domains = ['www.waynecounty.com']
classification = COMMITTEE
location = {
'name': '7th floor meeting room, Guardian Building',
'address': '500 Griswold St, Detroit, MI 48226',
'neighborhood': '',
}
description = ''
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
for item in self._parse_entries(response):
data = {
'_type': 'event',
'name': self.meeting_name,
'event_description': self.description,
'classification': self.classification,
'start': self._parse_start(item),
'end': {
'date': None,
'time': None,
'note': ''
},
'all_day': False,
'location': self.location,
'documents': self._parse_documents(item, response.url),
'sources': [{
'url': response.url,
'note': ''
}]
}
data['id'] = self._generate_id(data)
status_str = ' '.join(item.xpath('.//td//text()').extract())
data['status'] = self._generate_status(data, text=status_str)
yield data
def _parse_entries(self, response):
return response.xpath('//tbody/tr[child::td/text()]')
@staticmethod
def _parse_documents(item, base_url):
documents = []
for doc_link in item.xpath('td/a'):
url = doc_link.xpath('@href').extract_first()
url = urljoin(base_url, url) if url is not None else ''
if url != '':
note = doc_link.xpath('text()').extract_first()
note = note if note is not None else ''
documents.append({'url': url, 'note': note})
return documents
def _parse_start(self, item):
"""
Parse start date and time.
"""
# Calendar shows only meetings in current year.
yearStr = datetime.now().year
# Dateparse can't always handle the inconsistent dates, so
# let's normalize them using scrapy's regular expressions.
month_str = item.xpath('.//td[2]/text()').re(r'[a-zA-Z]{3}')[0]
day_str = item.xpath('.//td[2]/text()').re(r'\d+')[0]
time_str = item.xpath('.//td[3]/text()').extract_first()
date_str = dateparse('{0} {1} {2} {3}'.format(month_str, day_str, yearStr, time_str))
return {'date': date_str.date(), 'time': date_str.time(), 'note': ''}
def _parse_status(self, item, data):
"""
Parse or generate status of meeting.
Postponed meetings will be considered cancelled.
"""
status_str = item.xpath('.//td[4]//text()').extract_first()
# If the agenda column text contains "postpone" or "cancel" we consider it cancelled.
if ('cancel' in status_str.lower()) or ('postpone' in status_str.lower()):
return CANCELED
# If it's not one of the above statuses, use the status logic from spider.py
else:
return self._generate_status(data, '')
| StarcoderdataPython |
3494152 | """
This module provides a name_to_constructor dict for all models/estimators in scikit-learn, plus a couple test models and
error handling functions
"""
import warnings
import inspect
import sklearn.base
import sklearn.utils.testing
import joblib
import numpy as np
import os
# Sometimes xgboost is hard to install so make it optional
try:
import xgboost as xgb
except:
pass
import keras
from keras.models import model_from_json
from keras.models import load_model
from keras.models import Sequential
import random
random.seed(0)
import pandas as pd
#from . import keras_models
from mastml import utils
import pickle
from scipy import stats
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
name_to_constructor = dict(sklearn.utils.testing.all_estimators())
class AlwaysFive(sklearn.base.RegressorMixin):
"""
Class used as a test model that always predicts a value of 5.
Args:
constant: (int), the value to predict. Always 5 by default
Methods:
fit: Just passes through to maintain scikit-learn structure
predict: Provides predicted model values based on X features
Args:
X: (numpy array), array of X features
Returns:
(numpy array), prediction array where all values are equal to constant
"""
def __init__(self, constant = 5):
self.five = constant
def fit(self, X, y, groups=None):
return self
def predict(self, X):
return np.array([self.five for _ in range(len(X))])
class RandomGuesser(sklearn.base.RegressorMixin):
"""
Class used as a test model that always predicts random values for y data.
Args:
None
Methods:
fit: Constructs possible predicted values based on y data
Args:
y: (numpy array), array of y data
predict: Provides predicted model values based on X features
Args:
X: (numpy array), array of X features
Returns:
(numpy array), prediction array where all values are random selections of y data
"""
def __init__(self):
pass
def fit(self, X, y, groups=None):
self.possible_answers = y
return self
def predict(self, X):
return np.random.choice(self.possible_answers, size=X.shape[0])
class KerasRegressor():
def __init__(self, conf_dict):
self.conf_dict = conf_dict
self.model = self.build_model()
def build_model(self):
model_vals = self.conf_dict
model = Sequential()
for layer_dict, layer_val in model_vals.items():
if (layer_dict != 'FitParams'):
layer_type = layer_val.get('layer_type')
layer_name_asstr = layer_type
if layer_name_asstr == 'Dense':
neuron_num = int(layer_val.get('neuron_num'))
if (layer_dict == 'Layer1'):
input_dim = int(layer_val.get('input_dim'))
kernel_initializer = layer_val.get('kernel_initializer')
activation = layer_val.get('activation')
elif layer_name_asstr == 'Dropout':
rate = float(layer_val.get('rate'))
for layer_name, cls in inspect.getmembers(keras.layers, inspect.isclass):
layer_type = getattr(keras.layers, layer_name_asstr) # (neuron_num)
else:
if layer_val.get('rate'):
self.rate = float(layer_val.get('rate'))
if layer_val.get('epochs'):
self.epochs = int(layer_val.get('epochs'))
else:
self.epochs = 1
if layer_val.get('batch_size'):
self.batch_size = int(layer_val.get('batch_size'))
else:
self.batch_size = None
if layer_val.get('loss'):
self.loss = str(layer_val.get('loss'))
else:
self.loss = 'mean_squared_error'
if layer_val.get('optimizer'):
self.optimizer = str(layer_val.get('optimizer'))
else:
self.optimizer = 'adam'
if layer_val.get('metrics'):
self.metrics = layer_val.get('metrics').split(',')
else:
self.metrics = ['mae']
if layer_val.get('verbose'):
self.verbose = str(layer_val.get('verbose'))
else:
self.verbose = 0
if layer_val.get('shuffle'):
self.shuffle = bool(layer_val.get('shuffle'))
else:
self.shuffle = True
if layer_val.get('validation_split'):
self.validation_split = float(layer_val.get('validation_split'))
else:
self.validation_split = 0.0
continue
if (layer_dict == 'Layer1'):
model.add(layer_type(neuron_num, input_dim=input_dim, kernel_initializer=kernel_initializer,
activation=activation))
else:
if layer_name_asstr == 'Dense':
model.add(layer_type(neuron_num, kernel_initializer=kernel_initializer, activation=activation))
if layer_name_asstr == 'Dropout':
model.add(layer_type(rate=rate))
return model
def fit(self, X, Y):
# Need to rebuild and re-compile model at every fit instance so don't have information of weights from other fits
self.model = self.build_model()
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
return self.model.fit(X, Y, epochs=self.epochs, batch_size=self.batch_size, verbose=self.verbose,
validation_split=self.validation_split, shuffle=self.shuffle)
def predict(self, X):
return self.model.predict(X)
def summary(self):
return self.model.summary()
# ref: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingRegressor.html#sklearn.ensemble.BaggingRegressor
# NOTE: in order to use this, other models for the custom ensemble must be defined
# in the conf file with "_ensemble" somewhere in the name
class EnsembleRegressor():
def __init__(self, num_samples, model_list, num_models):
self.model_list = model_list # should be list of strings
self.num_models = num_models # how many of each of the specified models should be included in the ensemble
self.n_estimators = sum(self.num_models)
self.num_samples = num_samples
self.max_samples = num_samples
self.bootstrapped_datasets = []
self.bootstrapped_idxs = []
self.all_preds = []
self.path = ""
self.model = self.build_models() # actually a list of models for use as the members in the ensemble
self.fold = -1
self.bootstrap = True
def build_models(self):
model = []
for i, num_m in enumerate(self.num_models):
for j in range(num_m):
model.append(self.model_list[i])
return model
def setup(self, path):
self.fold += 1
self.bootstrapped_idxs = []
self.bootstrapped_datasets = []
self.path = path
def fit(self, X, Y):
X = X.values
Y = Y.values
idxs = np.arange(len(X))
# fit each model in the ensemble
for i in range(self.n_estimators):
model = self.model[i]
# do bootstrapping given the validation data
bootstrap_idxs = random.choices(idxs, k=self.num_samples)
bootstrap_X = X[bootstrap_idxs]
bootstrap_Y = Y[bootstrap_idxs]
if 1 == len(bootstrap_X.shape):
bootstrap_X = np.expand_dims(np.asarray(bootstrap_X), -1)
if 1 == len(bootstrap_Y.shape):
bootstrap_Y = np.expand_dims(np.asarray(bootstrap_Y), -1)
self.bootstrapped_idxs.append(bootstrap_idxs)
self.bootstrapped_datasets.append(bootstrap_X)
model.fit(bootstrap_X, bootstrap_Y)
def predict(self, X, return_std=False):
if isinstance(X, pd.DataFrame):
X = X.values
all_preds = []
means = []
for x_i in range(len(X)):
preds = []
for i in range(self.n_estimators):
sample_X = X[x_i]
if 1 == len(sample_X.shape):
sample_X = np.expand_dims(np.asarray(sample_X), 0)
preds.append(self.model[i].predict(sample_X))
all_preds.append(preds)
means.append(np.mean(preds))
# NOTE if manual implementation is desired
# https://www.jpytr.com/post/random_forests_and_jackknife_variance/
# https://github.com/scikit-learn-contrib/forest-confidence-interval/tree/master/forestci
# http://contrib.scikit-learn.org/forest-confidence-interval/reference/forestci.html
self.all_preds = all_preds
return np.asarray(means)
# check for failed fits, warn users, and re-calculate
def stats_check_models(self, X, Y):
if self.n_estimators > 10:
maes = []
for i in range(self.n_estimators):
abs_errors = np.absolute(np.absolute(np.squeeze(np.asarray(self.all_preds)[:,i])) - Y)
maes.append(sum(abs_errors) / len(abs_errors))
alpha = 0.01
bad_idxs = []
for i in range(self.n_estimators):
other_maes = np.delete(maes, [i])
# ref: https://towardsdatascience.com/statistical-significance-hypothesis-testing-the-normal-curve-and-p-values-93274fa32687
z_score = (maes[i] - np.mean(other_maes)) / np.std(other_maes)
# ref: https://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in/3508321
p_val = stats.norm.sf(abs(z_score))*2
if p_val <= alpha:
# TODO ok to print these/how to print/log properly?
print("Estimator {} failed under statistical significance threshold {} (p_val {}), relevant dataset output to file with name format \'<fold>_<estimator idx>_bootstrapped_dataset.csv\'".format(i, alpha, p_val))
print("bad estimator mae: {}".format(maes[i]))
print("mean mae (for ref):")
print(np.mean(maes))
np.savetxt(self.path + "\\{}_{}_bootstrapped_dataset.csv".format(self.fold, i), self.bootstrapped_datasets[i], delimiter=",")
bad_idxs.append(i)
if len(bad_idxs) == self.n_estimators:
print("ALL models failed, wtf is your data")
return
#self.all_preds = np.delete(self.all_preds, bad_idxs, 1)
y_preds = []
for idx, x_i in enumerate(self.all_preds):
y_preds.append(np.mean(x_i))
return np.asarray(y_preds)
class ModelImport():
"""
Class used to import pickled models from previous machine learning fits
Args:
model_path (str): string designating the path to load the saved .pkl model file
Methods:
fit: Does nothing, present for compatibility purposes
Args:
X: Nonetype
y: Nonetype
groups: Nonetype
predict: Provides predicted model values based on X features
Args:
X: (numpy array), array of X features
Returns:
(numpy array), prediction array using imported model
"""
def __init__(self, model_path):
self.model_path = model_path
self.model = joblib.load(self.model_path)
def fit(self, X=None, y=None, groups=None):
""" Only here for compatibility """
return
def predict(self, X):
return self.model.predict(X)
# Optional to have xgboost working
try:
custom_models = {
'AlwaysFive': AlwaysFive,
'RandomGuesser': RandomGuesser,
'ModelImport': ModelImport,
'XGBRegressor': xgb.XGBRegressor,
'XGBClassifier': xgb.XGBClassifier,
'KerasRegressor': KerasRegressor,
'EnsembleRegressor': EnsembleRegressor
#'DNNClassifier': keras_models.DNNClassifier
}
except NameError:
custom_models = {
'AlwaysFive': AlwaysFive,
'RandomGuesser': RandomGuesser,
'ModelImport': ModelImport,
'KerasRegressor': KerasRegressor,
'EnsembleRegressor': EnsembleRegressor
# 'DNNClassifier': keras_models.DNNClassifier
}
name_to_constructor.update(custom_models)
def find_model(model_name):
"""
Method used to check model names conform to scikit-learn model/estimator names
Args:
model_name: (str), the name of a model/estimator
Returns:
(str), the scikit-learn model name or raises InvalidModel error
"""
try:
return name_to_constructor[model_name]
except KeyError:
raise utils.InvalidModel(f"Model '{model_name}' does not exist in scikit-learn.")
def check_models_mixed(model_names):
"""
Method used to check whether the user has mixed regression and classification tasks
Args:
model_names: (list), list containing names of models/estimators
Returns:
(bool), whether or not a classifier was found, or raises exception if both regression and classification models present.
"""
found_classifier = found_regressor = False
for name in model_names:
if name in custom_models: continue
class1 = find_model(name)
if issubclass(class1, sklearn.base.ClassifierMixin):
found_classifier = True
elif issubclass(class1, sklearn.base.RegressorMixin):
found_regressor = True
else:
raise Exception(f"Model '{name}' is neither a classifier nor a regressor")
if found_classifier and found_regressor:
raise Exception("Both classifiers and regressor models have been included")
return found_classifier
| StarcoderdataPython |
3316857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the Cartpole robotic platform.
"""
import os
import numpy as np
import pybullet_data
import sympy
import sympy.physics.mechanics as mechanics
from pyrobolearn.robots.robot import Robot
from pyrobolearn.utils.transformation import get_symbolic_matrix_from_axis_angle, get_matrix_from_quaternion
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class CartPole(Robot):
r"""CartPole robot
In its original formulation, the Cart-pole robot is a cart mounted by an inverted pendulum.
The number of links for the pendulum can be specified during runtime.
References:
- [1] "Reinforcement Learning: an Introduction", Barto and Sutton, 1998
- [2] Cartpole bullet environment:
github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/bullet/cartpole_bullet.py
- [3] "PyDy Tutorial: Human Standing": https://github.com/pydy/pydy-tutorial-human-standing
- [4] "Dynamics with Python balancing the five link pendulum": http://www.moorepants.info/blog/npendulum.html
"""
def __init__(self, simulator, position=(0, 0, 0), orientation=(0, 0, 0, 1), scale=1., fixed_base=True,
num_links=1, inverted_pole=False, pole_mass=1,
urdf=os.path.join(pybullet_data.getDataPath(), "cartpole.urdf")): # pole_mass=10
"""
Initialize the Cartpole robot.
Args:
simulator (Simulator): simulator instance.
position (np.array[float[3]]): Cartesian world position.
orientation (np.array[float[4]]): Cartesian world orientation expressed as a quaternion [x,y,z,w].
fixed_base (bool): if True, the robot base will be fixed in the world.
scale (float): scaling factor that is used to scale the robot.
num_links (int): number of links / poles connected to each other.
inverted_pole (bool): if we should start with inverted poles, or not.
pole_mass (float): mass of each link/pole.
urdf (str): path to the urdf. Do not change it unless you know what you are doing.
"""
# check parameters
if position is None:
position = (0., 0., 0.)
if len(position) == 2: # assume x, y are given
position = tuple(position) + (0.,)
if orientation is None:
orientation = (0, 0, 0, 1)
if fixed_base is None:
fixed_base = True
super(CartPole, self).__init__(simulator, urdf, position, orientation, fixed_base, scale)
self.name = 'cartpole'
# create dynamically other links if necessary
# Refs:
# 1. https://pybullet.org/Bullet/phpBB3/viewtopic.php?t=12345
# 2. https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_utils/urdfEditor.py
# if num_links > 1:
# # get info about the link (dimensions, mass, etc).
# jointInfo = self.get_joint_info(self.joints[-1])
# linkInfo = self.getLinkStates(self.joints[-1])
# dynamicInfo = self.sim.g
#
# dims =
#
# info =
# jointType = info[2]
# joint_axis, parent = info[-4]
# parentLinkIndex = info
#
# # create visual, collision shapes, and the body
# collision_shape = self.sim.create_collision_shape(self.sim.GEOM_BOX, half_extents=dimensions)
# visual_shape = self.sim.create_visual_shape(self.sim.GEOM_BOX, half_extents=dimensions, rgba_color=color)
#
# for i in range(num_links - 1):
# # create new link and attached it to the previous link
# linkId = self.sim.create_body(baseMass=mass,
# base_collision_shapeIndex=collision_shape,
# base_visual_shapeIndex=visual_shape,
# position=position,
# baseOrientation=orientation,
# linkParentIndices=[self.joints[-1]],
# linkJointTypes=[self.sim.JOINT_REVOLUTE])
# create dynamically the cartpole because currently we can not add a link with a revolute joint in PyBullet;
# we have to build the whole multibody system
# The values are from the cartpole URDF: https://github.com/bulletphysics/bullet3/blob/master/data/cartpole.urdf
# remove body
self.sim.remove_body(self.id)
# create slider
dims = (15, 0.025, 0.025)
color = (0, 0.8, 0.8, 1)
mass = 0
collision_shape = self.sim.create_collision_shape(self.sim.GEOM_BOX, half_extents=dims)
visual_shape = self.sim.create_visual_shape(self.sim.GEOM_BOX, half_extents=dims, rgba_color=color)
# create cart and pole
cart_dims = (0.25, 0.25, 0.1)
cart_collision_shape = self.sim.create_collision_shape(self.sim.GEOM_BOX, half_extents=cart_dims)
cart_visual_shape = self.sim.create_visual_shape(self.sim.GEOM_BOX, half_extents=cart_dims,
rgba_color=(0, 0, 0.8, 1))
pole_dims = (0.025, 0.025, 0.5)
pole_collision_shape = self.sim.create_collision_shape(self.sim.GEOM_BOX, half_extents=pole_dims)
pole_visual_shape = self.sim.create_visual_shape(self.sim.GEOM_BOX, half_extents=pole_dims,
rgba_color=(1, 1, 1, 1))
radius = 0.05
sphere_collision_shape = self.sim.create_collision_shape(self.sim.GEOM_SPHERE, radius=radius)
sphere_visual_shape = self.sim.create_visual_shape(self.sim.GEOM_SPHERE, radius=radius,
rgba_color=(1, 0, 0, 1))
link_masses = [1]
link_collision_shape_ids = [cart_collision_shape]
link_visual_shape_ids = [cart_visual_shape]
link_positions = [[0, 0, 0]]
link_orientations = [[0, 0, 0, 1]]
link_inertial_frame_positions = [[0, 0, 0]]
link_inertial_frame_orientations = [[0, 0, 0, 1]]
parent_indices = [0]
joint_types = [self.sim.JOINT_PRISMATIC]
joint_axis = [[1, 0, 0]]
# for each new link
if num_links > 0:
link_masses += [0.001, pole_mass] * num_links
link_collision_shape_ids += [sphere_collision_shape, pole_collision_shape] * num_links
link_visual_shape_ids += [sphere_visual_shape, pole_visual_shape] * num_links
if inverted_pole:
link_positions += [[0, 0, 0], [0, 0, -0.5]]
link_positions += [[0, 0, -0.5], [0, 0, -0.5]] * (num_links - 1)
else:
link_positions += [[0, 0, 0], [0, 0, 0.5]]
link_positions += [[0, 0, 0.5], [0, 0, 0.5]] * (num_links - 1)
link_orientations += [[0, 0, 0, 1]] * 2 * num_links
link_inertial_frame_positions += [[0, 0, 0]] * 2 * num_links
link_inertial_frame_orientations += [[0, 0, 0, 1]] * 2 * num_links
parent_indices += range(1, 1 + 2 * num_links)
joint_types += [self.sim.JOINT_REVOLUTE, self.sim.JOINT_FIXED] * num_links
joint_axis += [[0, 1, 0], [0, 1, 0]] * num_links
# create the whole body
self.id = self.sim.create_body(mass=mass, collision_shape_id=collision_shape, visual_shape_id=visual_shape,
position=position, orientation=orientation, baseInertialFramePosition=[0, 0, 0],
baseInertialFrameOrientation=[0, 0, 0, 1], linkMasses=link_masses,
linkCollisionShapeIndices=link_collision_shape_ids,
linkVisualShapeIndices=link_visual_shape_ids,
linkPositions=link_positions, linkOrientations=link_orientations,
linkInertialFramePositions=link_inertial_frame_positions,
linkInertialFrameOrientations=link_inertial_frame_orientations,
linkParentIndices=parent_indices, linkJointTypes=joint_types,
linkJointAxis=joint_axis)
# useful variables
self.joints = [] # non-fixed joint/link indices in the simulator
self.joint_names = {} # joint name to id in the simulator
self.link_names = {} # link name to id in the simulator
for joint in range(self.num_joints):
# Get joint info
jnt = self.sim.get_joint_info(self.id, joint)
self.joint_names[jnt[1]] = jnt[0]
self.link_names[jnt[12]] = jnt[0]
# remember actuated joints
if jnt[2] != self.sim.JOINT_FIXED:
self.joints.append(jnt[0])
# disable the joints for the pole links
# self.disable_motor(self.joints[1:])
self.disable_motor(parent_indices[1::2])
def get_symbolic_equations_of_motion(self, verbose=False):
"""
This returns the symbolic equation of motions of the robot (using the URDF). Internally, this used the
`sympy.mechanics` module.
"""
# gravity and time
g, t = sympy.symbols('g t')
# create the world inertial frame of reference and its origin
world_frame = mechanics.ReferenceFrame('Fw')
world_origin = mechanics.Point('Pw')
world_origin.set_vel(world_frame, mechanics.Vector(0))
# create the base frame (its position, orientation and velocities) + generalized coordinates and speeds
base_id = -1
# Check if the robot has a fixed base and create the generalized coordinates and speeds based on that,
# as well the base position, orientation and velocities
if self.has_fixed_base():
# generalized coordinates q(t) and speeds dq(t)
q = mechanics.dynamicsymbols('q:{}'.format(len(self.joints)))
dq = mechanics.dynamicsymbols('dq:{}'.format(len(self.joints)))
pos, orn = self.get_base_pose()
lin_vel, ang_vel = [0,0,0], [0,0,0] # 0 because fixed base
joint_id = 0
else:
# generalized coordinates q(t) and speeds dq(t)
q = mechanics.dynamicsymbols('q:{}'.format(7 + len(self.joints)))
dq = mechanics.dynamicsymbols('dq:{}'.format(6 + len(self.joints)))
pos, orn = q[:3], q[3:7]
lin_vel, ang_vel = dq[:3], dq[3:6]
joint_id = 7
# set the position, orientation and velocities of the base
base_frame = world_frame.orientnew('Fb', 'Quaternion', [orn[3], orn[0], orn[1], orn[2]])
base_frame.set_ang_vel(world_frame, ang_vel[0] * world_frame.x + ang_vel[1] * world_frame.y + ang_vel[2] *
world_frame.z)
base_origin = world_origin.locatenew('Pb', pos[0] * world_frame.x + pos[1] * world_frame.y + pos[2] *
world_frame.z)
base_origin.set_vel(world_frame, lin_vel[0] * world_frame.x + lin_vel[1] * world_frame.y + lin_vel[2] *
world_frame.z)
# inputs u(t) (applied torques)
u = mechanics.dynamicsymbols('u:{}'.format(len(self.joints)))
joint_id_u = 0
# kinematics differential equations
kd_eqs = [q[i].diff(t) - dq[i] for i in range(len(self.joints))]
# define useful lists/dicts for later
bodies, loads = [], []
frames = {base_id: (base_frame, base_origin)}
# frames = {base_id: (worldFrame, worldOrigin)}
# go through each joint/link (each link is associated to a joint)
for link_id in range(self.num_links):
# get useful information about joint/link kinematics and dynamics from simulator
info = self.sim.get_dynamics_info(self.id, link_id)
mass, local_inertia_diagonal = info[0], info[2]
info = self.sim.get_link_state(self.id, link_id)
local_inertial_frame_position, local_inertial_frame_orientation = info[2], info[3]
# worldLinkFramePosition, worldLinkFrameOrientation = info[4], info[5]
info = self.sim.get_joint_info(self.id, link_id)
joint_name, joint_type = info[1:3]
# jointDamping, jointFriction = info[6:8]
link_name, joint_axis_in_local_frame, parent_frame_position, parent_frame_orientation, \
parent_idx = info[-5:]
xl, yl, zl = joint_axis_in_local_frame
# get previous references
parent_frame, parent_point = frames[parent_idx]
# create a reference frame with its origin for each joint
# set frame orientation
if joint_type == self.sim.JOINT_REVOLUTE:
R = get_matrix_from_quaternion(parent_frame_orientation)
R1 = get_symbolic_matrix_from_axis_angle(joint_axis_in_local_frame, q[joint_id])
R = R1.dot(R)
frame = parent_frame.orientnew('F' + str(link_id), 'DCM', sympy.Matrix(R))
else:
x, y, z, w = parent_frame_orientation # orientation of the joint in parent CoM inertial frame
frame = parent_frame.orientnew('F' + str(link_id), 'Quaternion', [w, x, y, z])
# set frame angular velocity
ang_vel = 0
if joint_type == self.sim.JOINT_REVOLUTE:
ang_vel = dq[joint_id] * (xl * frame.x + yl * frame.y + zl * frame.z)
frame.set_ang_vel(parent_frame, ang_vel)
# create origin of the reference frame
# set origin position
x, y, z = parent_frame_position # position of the joint in parent CoM inertial frame
pos = x * parent_frame.x + y * parent_frame.y + z * parent_frame.z
if joint_type == self.sim.JOINT_PRISMATIC:
pos += q[joint_id] * (xl * frame.x + yl * frame.y + zl * frame.z)
origin = parent_point.locatenew('P' + str(link_id), pos)
# set origin velocity
if joint_type == self.sim.JOINT_PRISMATIC:
vel = dq[joint_id] * (xl * frame.x + yl * frame.y + zl * frame.z)
origin.set_vel(world_frame, vel.express(world_frame))
else:
origin.v2pt_theory(parent_point, world_frame, parent_frame)
# define CoM frame and position (and velocities) wrt the local link frame
x, y, z, w = local_inertial_frame_orientation
com_frame = frame.orientnew('Fc' + str(link_id), 'Quaternion', [w, x, y, z])
com_frame.set_ang_vel(frame, mechanics.Vector(0))
x, y, z = local_inertial_frame_position
com = origin.locatenew('C' + str(link_id), x * frame.x + y * frame.y + z * frame.z)
com.v2pt_theory(origin, world_frame, frame)
# define com particle
# com_particle = mechanics.Particle('Pa' + str(linkId), com, mass)
# bodies.append(com_particle)
# save
# frames[linkId] = (frame, origin)
# frames[linkId] = (frame, origin, com_frame, com)
frames[link_id] = (com_frame, com)
# define mass and inertia
ixx, iyy, izz = local_inertia_diagonal
inertia = mechanics.inertia(com_frame, ixx, iyy, izz, ixy=0, iyz=0, izx=0)
inertia = (inertia, com)
# define rigid body associated to frame
body = mechanics.RigidBody(link_name, com, frame, mass, inertia)
bodies.append(body)
# define dynamical forces/torques acting on the body
# gravity force applied on the CoM
force = (com, - mass * g * world_frame.z)
loads.append(force)
# if prismatic joint, compute force
if joint_type == self.sim.JOINT_PRISMATIC:
force = (origin, u[joint_id_u] * (xl * frame.x + yl * frame.y + zl * frame.z))
# force = (com, u[jointIdU] * (x * frame.x + y * frame.y + z * frame.z) - mass * g * worldFrame.z)
loads.append(force)
# if revolute joint, compute torque
if joint_type == self.sim.JOINT_REVOLUTE:
v = (xl * frame.x + yl * frame.y + zl * frame.z)
# torqueOnPrevBody = (parentFrame, - u[jointIdU] * v)
torque_on_prev_body = (parent_frame, - u[joint_id_u] * v)
torque_on_curr_body = (frame, u[joint_id_u] * v)
loads.append(torque_on_prev_body)
loads.append(torque_on_curr_body)
# if joint is not fixed increment the current joint id
if joint_type != self.sim.JOINT_FIXED:
joint_id += 1
joint_id_u += 1
if verbose:
print("\nLink name with type: {} - {}".format(link_name, self.get_joint_types(joint_ids=link_id)))
print("------------------------------------------------------")
print("Position of joint frame wrt parent frame: {}".format(origin.pos_from(parent_point)))
print("Orientation of joint frame wrt parent frame: {}".format(frame.dcm(parent_frame)))
print("Linear velocity of joint frame wrt parent frame: {}".format(origin.vel(world_frame).express(parent_frame)))
print("Angular velocity of joint frame wrt parent frame: {}".format(frame.ang_vel_in(parent_frame)))
print("------------------------------------------------------")
print("Position of joint frame wrt world frame: {}".format(origin.pos_from(world_origin)))
print("Orientation of joint frame wrt world frame: {}".format(frame.dcm(world_frame).simplify()))
print("Linear velocity of joint frame wrt world frame: {}".format(origin.vel(world_frame)))
print("Angular velocity of joint frame wrt parent frame: {}".format(frame.ang_vel_in(world_frame)))
print("------------------------------------------------------")
# print("Local position of CoM wrt joint frame: {}".format(com.pos_from(origin)))
# print("Local linear velocity of CoM wrt joint frame: {}".format(com.vel(worldFrame).express(frame)))
# print("Local angular velocity of CoM wrt joint frame: {}".format(com_frame.ang_vel_in(frame)))
# print("------------------------------------------------------")
if joint_type == self.sim.JOINT_PRISMATIC:
print("Input value (force): {}".format(loads[-1]))
elif joint_type == self.sim.JOINT_REVOLUTE:
print("Input value (torque on previous and current bodies): {} and {}".format(loads[-2], loads[-1]))
print("")
if verbose:
print("Summary:")
print("Generalized coordinates: {}".format(q))
print("Generalized speeds: {}".format(dq))
print("Inputs: {}".format(u))
print("Kinematic differential equations: {}".format(kd_eqs))
print("Bodies: {}".format(bodies))
print("Loads: {}".format(loads))
print("")
# TODO: 1. account for external forces applied on different rigid-bodies (e.g. contact forces)
# TODO: 2. account for constraints (e.g. holonomic, non-holonomic, etc.)
# Get the Equation of Motion (EoM) using Kane's method
kane = mechanics.KanesMethod(world_frame, q_ind=q, u_ind=dq, kd_eqs=kd_eqs)
kane.kanes_equations(bodies=bodies, loads=loads)
# get mass matrix and force vector (after simplifying) such that :math:`M(x,t) \dot{x} = f(x,t)`
M = sympy.trigsimp(kane.mass_matrix_full)
f = sympy.trigsimp(kane.forcing_full)
# mechanics.find_dynamicsymbols(M)
# mechanics.find_dynamicsymbols(f)
# save useful info for future use (by other methods)
constants = [g]
constant_values = [9.81]
parameters = (dict(zip(constants, constant_values)))
self.symbols = {'q': q, 'dq': dq, 'kane': kane, 'parameters': parameters}
# linearize
# parameters = dict(zip(constants, constant_values))
# M_, A_, B_, u_ = kane.linearize()
# A_ = A_.subs(parameters)
# B_ = B_.subs(parameters)
# M_ = kane.mass_matrix_full.subs(parameters)
# self.symbols = {'A_': A_, 'B_': B_, 'M_': M_}
# return M_, A_, B_, u_
return M, f
def linearize_equations_of_motion(self, point=None, verbose=False):
r"""
Linearize the equation of motions around the given point (=state). That is, instead of having
:math:`\dot{x} = f(x,u)` where :math:`f` is in general a non-linear function, it linearizes it around
a certain point.
.. math:: \dot{x} = A x + B u
where :math:`x` is the state vector, :math:`u` is the control input vector, and :math:`A` and :math:`B` are
the matrices.
"""
if self.symbols is None:
self.get_symbolic_equations_of_motion()
if point is None: # take current state
point = list(self.get_joint_positions()) + list(self.get_joint_velocities())
point = dict(zip(self.symbols['q'] + self.symbols['dq'], point))
kane, parameters = self.symbols['kane'], self.symbols['parameters']
# linearizer = self.symbols['kane'].to_linearizer()
# A, B = linearizer.linearize(op_point=[point, parameters], A_and_B=True)
M_, A_, B_, u_ = kane.linearize()
fA = A_.subs(parameters).subs(point)
fB = B_.subs(parameters).subs(point)
M = kane.mass_matrix_full.subs(parameters).subs(point)
# compute A and B
Minv = M.inv()
A = np.array(Minv * fA).astype(np.float64)
B = np.array(Minv * fB).astype(np.float64)
if verbose:
print("M_ = {}".format(M_))
print("A_ = {}".format(A_))
print("B_ = {}".format(B_))
print("u_ = {}".format(u_))
print("fA = {}".format(fA))
print("fB = {}".format(fB))
print("M = {}".format(M))
print("inv(M) = {}".format(Minv))
return A, B
import control
from scipy.linalg import solve_continuous_are
class LQR(object):
r"""Linear Quadratic Regulator
Type: Model-based (optimal control)
LQR assumes that the dynamics are described by a set of linear differential equations, and a quadratic cost.
That is, the dynamics can written as :math:`\dot{x} = A x + B u`, where :math:`x` is the state vector, and
:math:`u` is the control vector, and the cost is given by:
.. math:: J = x(T)^T F(T) x(T) + \int_0^T (x(t)^T Q x(t) + u(t)^T R u(t) + 2 x(t)^T N u(t)) dt
where :math:`Q` and :math:`R` represents weight matrices which allows to specify the relative importance
of each state/control variable. These are normally set by the user.
The goal is to find the feedback control law :math:`u` that minimizes the above cost :math:`J`. Solving it
gives us :math:`u = -K x`, where :math:`K = R^{-1} (B^T S + N^T)` with :math:`S` is found by solving the
continuous time Riccati differential equation :math:`S A + A^T S - (S B + N) R^{-1} (B^T S + N^T) + Q = 0`.
Thus, LQR requires thus the model/dynamics of the system to be given (i.e. :math:`A` and :math:`B`).
If the dynamical system is described by a set of nonlinear differential equations, we first have to linearize
them around fixed points.
Time complexity: O(M^3) where M is the size of the state vector
Note: A desired state xd can also be given to the system: u = -K (x - xd) (P control)
See also:
- `ilqr.py`: iterative LQR
- `lqg.py`: LQG = LQR + LQE
- `ilqg.py`: iterative LQG
"""
def __init__(self, A, B, Q=None, R=None, N=None):
if not self.is_controllable(A, B):
raise ValueError("The system is not controllable")
self.A = A
self.B = B
if Q is None:
Q = np.identity(A.shape[1])
self.Q = Q
if R is None:
R = np.identity(B.shape[1])
self.R = R
self.N = N
self.K = None
@staticmethod
def is_controllable(A, B):
return np.linalg.matrix_rank(control.ctrb(A, B)) == A.shape[0]
def get_riccati_solution(self):
S = solve_continuous_are(self.A, self.B, self.Q, self.R, s=self.N)
return S
def get_gain_k(self):
# S = self.get_riccati_solution()
# S1 = self.B.T.dot(S)
# if self.N is not None: S1 += self.N.T
# K = np.linalg.inv(self.R).dot(S1)
if self.N is None:
K, S, E = control.lqr(self.A, self.B, self.Q, self.R)
else:
K, S, E = control.lqr(self.A, self.B, self.Q, self.R, self.N)
return K
def compute(self, x, xd=None):
"""Return the u."""
if self.K is None:
self.K = self.get_gain_k()
if xd is None:
return self.K.dot(x)
else:
return self.K.dot(xd - x)
# Test
if __name__ == "__main__":
import numpy as np
from itertools import count
from pyrobolearn.simulators import Bullet
from pyrobolearn.worlds import World
# Create simulator
sim = Bullet()
# create world
world = World(sim)
# create robot
num_links = 1
robot = CartPole(sim, num_links=num_links)
# print information about the robot
robot.print_info()
robot.get_symbolic_equations_of_motion()
eq_point = np.zeros((num_links + 1) * 2) # state = [q, dq]
A, B = robot.linearize_equations_of_motion(eq_point)
# LQR controller
lqr = LQR(A, B)
K = lqr.get_gain_k()
for i in count():
# control
x = np.concatenate((robot.get_joint_positions(), robot.get_joint_velocities()))
u = K.dot(eq_point - x)
robot.set_joint_torques(u[0], 0)
print("U[0] = {}".format(u[0]))
# step in simulation
world.step(sleep_dt=1./240)
| StarcoderdataPython |
5106463 | <filename>palo_alto_pan_os/komand_palo_alto_pan_os/actions/edit/action.py
import komand
from .schema import EditInput, EditOutput
from komand.exceptions import PluginException
# Custom imports below
class Edit(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='edit',
description='Edit an existing object',
input=EditInput(),
output=EditOutput())
def run(self, params={}):
xpath = params.get("xpath")
element = params.get("element")
output = self.connection.request.edit_(xpath=xpath, element=element)
try:
return {"response": output['response']}
except KeyError:
raise PluginException(cause='The output did not contain expected keys.',
assistance='Contact support for help.',
data=output)
| StarcoderdataPython |
394854 | import os
import json
from tornado import template
GRAPHIQL_VERSION = '0.7.1'
MARKUP_CONTENT_TYPES = [
'text/html', 'application/xhtml+xml', 'application/xml;q=0.9'
]
def request_wants_html(mimetypes):
return set(MARKUP_CONTENT_TYPES).issubset(mimetypes)
def should_render_graphiql(accept_header_mimetypes):
return True if request_wants_html(accept_header_mimetypes) else False
class GraphiQLRenderer(object):
@classmethod
def render(cls, query, result, variables, operation_name):
loader = template.Loader(
os.path.join(os.path.dirname(__file__), 'templates'))
return loader.load('graphiql.html').generate(
graphiql_version=GRAPHIQL_VERSION, query=json.dumps(query),
result=json.dumps(result), variables=json.dumps(variables),
operation_name=json.dumps(operation_name))
| StarcoderdataPython |
3293705 | <reponame>skostya64/Music_store_test
class CheckBassNamePage:
def __init__(self, driver):
self.driver = driver
def click_menu_bass_guitars(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/cat-BASS']").click()
def click_electric_bass_guitars(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/-/cat-BASS-BASEBASS']").click()
def click_four_strings(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/4-/cat-BASS-BASEB4']").click()
def select_name_brand(self):
self.driver.find_element_by_xpath("//span[text() = 'Производитель']").click()
self.driver.find_element_by_xpath("//span[@title = 'Epiphone']").click()
self.driver.find_element_by_xpath("//span[@class = 'apply btn btn-ms-std btn-lg']").click()
def check_name_brand_in_products(self):
for i in range(len(self.driver.find_elements_by_xpath("//div[@id = 'tile-product-BAS0008210-000']"))):
self.driver.find_elements_by_xpath("//div[@id = 'tile-product-BAS0008210-000']")[i].click()
brand_name = self.driver.find_element_by_xpath("//img[@title = 'Epiphone']").text
assert brand_name == "Epiphone"
| StarcoderdataPython |
3520602 | <gh_stars>1-10
from typing import Optional, TYPE_CHECKING
from typing_extensions import TypedDict
if TYPE_CHECKING:
from checkov.common.models.enums import CheckResult
class _CheckResult(TypedDict, total=False):
result: "CheckResult"
suppress_comment: str
class _SkippedCheck(TypedDict, total=False):
bc_id: Optional[str]
id: str
suppress_comment: str
| StarcoderdataPython |
6694976 | import random
def bogo_sort(arr):
"""Bogo Sort
Best Case Complexity: O(n)
Worst Case Complexity: O(∞)
Average Case Complexity: O(n(n-1)!)
"""
def is_sorted(arr):
#check the array is inorder
i = 0
arr_len = len(arr)
while i+1 < arr_len:
if arr[i] > arr[i+1]:
return False
i += 1
return True
while not is_sorted(arr):
random.shuffle(arr)
return arr
| StarcoderdataPython |
6678855 | <filename>pyqtcli/makerc.py
import os
import subprocess
from pyqtcli import verbose as v
# Error message send by pyrcc5 when qrc file doesn't contain resources
NO_QRESOURCE = b"No resources in resource description.\n"
INVALID_QRC = b"pyrcc5 Parse Error:"
def generate_rc(qrc_files, verbose):
"""Generate python module to access qrc resources via pyrcc5 tool.
Args:
qrc_files (list or tuple): A tuple containing all paths to qrc files
to process.
verbose (bool): True if the user pass '-v' or '--verbose' option
to see what's happening.
Examples:
This example will create two files: res_rc.py and qtc/another_res_rc.py
>>> generate_rc(["res.qrc", "qrc/another_res.qrc"])
"""
for qrc_file in qrc_files:
# rc file name
result_file = os.path.splitext(qrc_file)[0] + "_rc.py"
# generate rc file corresponding to qrc file
result = subprocess.run(["pyrcc5", qrc_file, "-o", result_file],
stderr=subprocess.PIPE)
# Case where qrc has no more resources -> can't generate rc file
if result.stderr == NO_QRESOURCE:
v.warning(
("{} has no more resources and cannot generates its "
"corresponding rc file.").format(qrc_file))
continue
elif result.stderr.startswith(INVALID_QRC):
v.warning("Qrc file: \'{}\' is not valid.".format(qrc_file))
continue
elif result.stderr:
v.warning(result.stderr.decode("utf-8"))
continue
v.info("Python qrc file '{}' created.".format(result_file), verbose)
| StarcoderdataPython |
1725367 | <reponame>projectweekend/aa<filename>aa/test_web_api.py
import datetime
import falcon
from falcon import testing
import pytest
from aa.app import create_application
from aa.unit.info import *
@pytest.fixture()
def client():
return testing.TestClient(create_application())
def test_get_a_land_battle_template(client):
result = client.simulate_get('/land-battle')
expected_attacker_keys = [unit for unit in
LAND_UNITS + AIR_UNITS + (CRUISER, BATTLESHIP)]
expected_defender_keys = [unit for unit in LAND_UNITS + AIR_UNITS]
for k, v in result.json['attacker'].items():
assert k in expected_attacker_keys
assert v == 0
for k, v in result.json['defender'].items():
assert k in expected_defender_keys
assert v == 0
def test_simulate_land_battle(client):
battle_config = client.simulate_get('/land-battle').json
battle_config['attacker']['Infantry'] = 5
battle_config['defender']['Infantry'] = 5
result = client.simulate_post('/land-battle', json=battle_config)
assert isinstance(result.json['wins']['attacker'], int)
assert isinstance(result.json['wins']['defender'], int)
assert isinstance(result.json['wins']['draw'], int)
assert isinstance(result.json['stats']['attackers_remaining'], float)
assert isinstance(result.json['stats']['defenders_remaining'], float)
assert isinstance(result.json['stats']['attackers_ipc_lost'], float)
assert isinstance(result.json['stats']['defenders_ipc_lost'], float)
def test_get_unit_info(client):
result = client.simulate_get('/unit-info')
for unit in result.json[LAND]:
assert unit == UNIT_INFO[unit['name']]
def test_simulate_battle(client):
battle = {
'attacker': {
'Infantry': 5,
'Tank': 5
},
'defender': {
'Infantry': 10
}
}
result = client.simulate_post('/', json=battle)
assert isinstance(result.json['wins']['attacker'], int)
assert isinstance(result.json['wins']['defender'], int)
assert isinstance(result.json['wins']['draw'], int)
def test_simulate_battle_missing_army(client):
battle_missing_army = {
'attacker': {
'Tank': 5
}
}
result = client.simulate_post('/', json=battle_missing_army)
assert result.status == falcon.HTTP_400
def test_simulate_battle_army_with_no_units(client):
battle_army_with_no_units = {
'attacker': {},
'defender': {}
}
result = client.simulate_post('/', json=battle_army_with_no_units)
assert result.status == falcon.HTTP_400
def test_simulate_battle_invalid_unit_name(client):
battle_invalid_unit_name = {
'attacker': {
'Tank': 5
},
'defender': {
'Paratrooper': 5
}
}
result = client.simulate_post('/', json=battle_invalid_unit_name)
assert result.status == falcon.HTTP_400
def test_simulate_battle_invalid_unit_count(client):
battle_invalid_unit_count = {
'attacker': {
'Tank': 5
},
'defender': {
'Infantry': 'not int'
}
}
result = client.simulate_post('/', json=battle_invalid_unit_count)
assert result.status == falcon.HTTP_400
def test_ipctracker_create_valid(client):
new_ipc_tracker = {
'name': 'A&A Game',
'starting_ipc': 30
}
result = client.simulate_post('/ipc-tracker', json=new_ipc_tracker)
assert result.status == falcon.HTTP_201
assert result.json['starting_ipc'] == 30
assert result.json['name'] == 'A&A Game'
assert type(result.json['game_id']) == str
assert type(result.json['created_at']) == int
| StarcoderdataPython |
8063362 | # 13. Recursive Power
# Create a recursive function called recursive_power() which should receive a number and a power.
# Using recursion return the result of number ** power. Submit only the function in the judge system.
def recursive_power(number, power):
if power == 0:
return 1
return number * recursive_power(number, power-1)
# Test code for
print(recursive_power(2, 10))
print(recursive_power(10, 100))
| StarcoderdataPython |
5124480 | <gh_stars>0
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
mimicDict = {}
tokens = []
f = open(filename,'r')
for line in f:
for t in line.split(' '):
tokens.append(t)
f.close()
# print(tokens)
for t in tokens:
if t not in mimicDict.keys():
mimicDict[t]=[]
for i in range(len(tokens)-1):
mimicDict[tokens[i]].append(tokens[i+1])
# for k,v in zip(mimicDict.keys(), mimicDict.values()):
# print(k,v)
return mimicDict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
i=0
res = ''
# print(mimic_dict[word][random.randint(0,len(mimic_dict[word]))])
while i<=200:
res += word + ' '
word = mimic_dict[word][random.randint(0,len(mimic_dict[word])-1)]
i+=1
print(res)
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print('usage: python mimic.py file-to-read')
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| StarcoderdataPython |
9676875 | <filename>patches/droppedKey.py
from assembler import ASM
def fixDroppedKey(rom):
# Patch the rendering code to use the dropped key rendering code.
rom.patch(0x03, 0x1C99, None, ASM("""
ld a, $04
rst 8
jp $5CA6
"""))
# Patch the key pickup code to use the chest pickup code.
rom.patch(0x03, 0x248F, None, ASM("""
ldh a, [$F6] ; load room nr
cp $7C ; L4 Side-view room where the key drops
jr nz, notSpecialSideView
ld hl, $D969 ; status of the room above the side-view where the key drops in dungeon 4
set 4, [hl]
notSpecialSideView:
call $512A ; mark room as done
; Handle item effect
ld a, $02
rst 8
ldh a, [$F1] ; Load active sprite variant
cp $1A
jr z, isAKey
;Show message (if not a key)
ld a, $03
rst 8
isAKey:
ret
"""))
rom.patch(0x03, 0x24B7, "3E", "3E") # sanity check
# Mark all dropped keys as keys by default.
for n in range(0x316):
rom.banks[0x3E][0x3800 + n] = 0x1A
# Set the proper angler key by default
rom.banks[0x3E][0x3800 + 0x0CE] = 0x12
rom.banks[0x3E][0x3800 + 0x1F8] = 0x12
# Set the proper bird key by default
rom.banks[0x3E][0x3800 + 0x27A] = 0x14
# Set the proper face key by default
rom.banks[0x3E][0x3800 + 0x27F] = 0x13
# Set the proper hookshot key by default
rom.banks[0x3E][0x3800 + 0x180] = 0x03
# Set the proper golden leaves
rom.banks[0x3E][0x3800 + 0x058] = 0x15
rom.banks[0x3E][0x3800 + 0x05a] = 0x15
rom.banks[0x3E][0x3800 + 0x2d2] = 0x15
rom.banks[0x3E][0x3800 + 0x2c5] = 0x15
rom.banks[0x3E][0x3800 + 0x2c6] = 0x15
# Set the slime key drop.
rom.banks[0x3E][0x3800 + 0x0C6] = 0x0F
# Set the heart pieces
rom.banks[0x3E][0x3800 + 0x000] = 0x80
rom.banks[0x3E][0x3800 + 0x2A4] = 0x80
rom.banks[0x3E][0x3800 + 0x2B1] = 0x80 # fishing game, unused
rom.banks[0x3E][0x3800 + 0x044] = 0x80
rom.banks[0x3E][0x3800 + 0x2AB] = 0x80
rom.banks[0x3E][0x3800 + 0x2DF] = 0x80
rom.banks[0x3E][0x3800 + 0x2E5] = 0x80
rom.banks[0x3E][0x3800 + 0x078] = 0x80
rom.banks[0x3E][0x3800 + 0x2E6] = 0x80
rom.banks[0x3E][0x3800 + 0x1E8] = 0x80
rom.banks[0x3E][0x3800 + 0x1F2] = 0x80
rom.banks[0x3E][0x3800 + 0x2BA] = 0x80
# Set the seashells
rom.banks[0x3E][0x3800 + 0x0A3] = 0x20
rom.banks[0x3E][0x3800 + 0x2B2] = 0x20
rom.banks[0x3E][0x3800 + 0x0A5] = 0x20
rom.banks[0x3E][0x3800 + 0x0A6] = 0x20
rom.banks[0x3E][0x3800 + 0x08B] = 0x20
rom.banks[0x3E][0x3800 + 0x074] = 0x20
rom.banks[0x3E][0x3800 + 0x0A4] = 0x20
rom.banks[0x3E][0x3800 + 0x0D2] = 0x20
rom.banks[0x3E][0x3800 + 0x0E9] = 0x20
rom.banks[0x3E][0x3800 + 0x0B9] = 0x20
rom.banks[0x3E][0x3800 + 0x0F8] = 0x20
rom.banks[0x3E][0x3800 + 0x0A8] = 0x20
rom.banks[0x3E][0x3800 + 0x0FF] = 0x20
rom.banks[0x3E][0x3800 + 0x1E3] = 0x20
rom.banks[0x3E][0x3800 + 0x0DA] = 0x20
rom.banks[0x3E][0x3800 + 0x00C] = 0x20
| StarcoderdataPython |
182790 | """Implement of fnn layer"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from mxnet.gluon import nn
from mxnet import nd
import numpy as np
import mxnet as mx
class FeedForwardNetwork(nn.Block):
"""Fully connected feedforward network"""
def __init__(self, hidden_size, filter_size, relu_dropout, train, **kwargs):
super(FeedForwardNetwork, self).__init__(**kwargs)
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.train = train
with self.name_scope():
self.filter_dense_layer = nn.Dense(self.filter_size, activation='relu', use_bias=True, flatten=False)
self.output_dense_layer = nn.Dense(self.hidden_size, use_bias=True, flatten=False)
self.dropout = nn.Dropout(1.0 - self.relu_dropout)
def forward(self, x, padding=None):
ctx = x.context
batch_size = x.shape[0]
length = x.shape[1]
if padding is not None:
# Flattten padding to [batch_size * length]
pad_mask = nd.reshape(padding, (-1))
nonpad_ids = nd.array(np.where(pad_mask.asnumpy() < 1e-9), ctx=ctx)
# Reshape x to [batch_size*length, hidden_size] to remove padding
x = nd.reshape(x, (-1, self.hidden_size))
x = nd.gather_nd(x, indices=nonpad_ids)
# Reshape x from 2 dimensions to 3 dimensions
x = nd.expand_dims(x, axis=0)
output = self.filter_dense_layer(x)
if self.train:
output = self.dropout(output)
output = self.output_dense_layer(output)
if padding is not None:
output = nd.squeeze(output, axis=0)
output = nd.scatter_nd(data=output, indices=nonpad_ids, shape=(batch_size * length, self.hidden_size))
output = nd.reshape(output, shape=(batch_size, length, self.hidden_size))
return output
| StarcoderdataPython |
1702360 | <reponame>conroy-cheers/django-sites-multidb
from django.contrib import admin
from .forms import DBConfigForm
class DBConfigAdmin(admin.ModelAdmin):
form = DBConfigForm
| StarcoderdataPython |
11219435 | <filename>phpide.py
import os
import sublime
import sys
import threading
if os.name == 'nt':
from ctypes import windll, create_unicode_buffer
class Prefs:
@staticmethod
def load():
settings = sublime.load_settings('PHPIDE.sublime-settings')
Prefs.plugins = settings.get('plugins', [])
Prefs.debug = settings.get('debug', 0)
Prefs.load()
def add_to_path(path):
# Python 2.x on Windows can't properly import from non-ASCII paths, so
# this code added the DOC 8.3 version of the lib folder to the path in
# case the user's username includes non-ASCII characters
if os.name == 'nt':
buf = create_unicode_buffer(512)
if windll.kernel32.GetShortPathNameW(path, buf, len(buf)):
path = buf.value
if path not in sys.path:
sys.path.append(path)
# pull Package Control's files into our path
pc_folder = os.path.join(sublime.packages_path(), 'Package Control')
add_to_path(pc_folder)
# now we can load the Package Control code
os.chdir(pc_folder)
from package_control.package_manager import PackageManager
class PHPIDE(threading.Thread):
def __init__(self):
self.manager = PackageManager()
threading.Thread.__init__(self)
def run(self):
installed_packages = self.manager.list_packages()
for plugin in Prefs.plugins:
print "- checking plugin " + plugin
if not plugin in installed_packages:
self.manager.install_package(plugin)
sublime.set_timeout(lambda: PHPIDE().start(), 3000)
| StarcoderdataPython |
1605497 | <reponame>chrisliatas/py4e_code
# Use words.txt as the file name
fname = input("Enter file name: ")
try:
fh = open(fname)
except:
print('File cannot be opened:', fname)
quit()
for line in fh:
line = line.rstrip()
print(line.upper())
| StarcoderdataPython |
64877 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-16 13:27
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('visualizer', '0011_visualization_data_source'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('service_builder', '0024_auto_20190716_1627'),
('dashboard_builder', '0014_auto_20190716_1627'),
('aggregator', '0041_auto_20190716_1627'),
]
operations = [
migrations.CreateModel(
name='UniqueDashboardViewsView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dashboard_id', models.IntegerField(default=1)),
('count', models.IntegerField(default=1)),
],
options={
'db_table': 'unique_dashboard_views_view',
'managed': False,
},
),
migrations.CreateModel(
name='UniqueDatasetPreview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataset_id', models.IntegerField(default=1)),
('count', models.IntegerField(default=1)),
],
options={
'db_table': 'unique_dataset_preview',
'managed': False,
},
),
migrations.CreateModel(
name='UniqueServiceUsesView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_id', models.IntegerField(default=1)),
('count', models.IntegerField(default=1)),
],
options={
'db_table': 'unique_service_uses_view',
'managed': False,
},
),
migrations.CreateModel(
name='BDO_Plan',
fields=[
('plan_name', models.TextField(primary_key=True, serialize=False)),
('plan_title', models.TextField(default='Untitled Plan')),
('query_limit', models.IntegerField(default=120, null=True)),
('price', models.FloatField(default=0, null=True)),
('access_to_beta_services', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='DashboardDisplays',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dash_display_count', models.IntegerField(default=1)),
('dashboard', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dashboard_displays_dashboard', to='dashboard_builder.Dashboard')),
],
),
migrations.CreateModel(
name='DashboardUniqueViews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dash_display_count', models.IntegerField(default=1)),
('dashboard', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dashboard_unique_views_dashboard', to='dashboard_builder.Dashboard')),
('dashboard_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dashboard_unique_views_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DatasetCombined',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('combination_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_combined_dataset', to='aggregator.Dataset')),
],
),
migrations.CreateModel(
name='DatasetExplored',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exploration_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_explored_dataset', to='aggregator.Dataset')),
],
),
migrations.CreateModel(
name='DatasetPageViews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preview_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_page_views_dataset', to='aggregator.Dataset')),
],
),
migrations.CreateModel(
name='DatasetUniqueViews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preview_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_unique_views_dataset', to='aggregator.Dataset')),
('dataset_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_unique_views_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DatasetUseInService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('use_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_use_in_service_dataset', to='aggregator.Dataset')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_use_in_service_service', to='service_builder.Service')),
],
),
migrations.CreateModel(
name='DatasetUseInVisualisation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('viz_use_count', models.IntegerField(default=1)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_dataset_use_in_visualisation_dataset', to='aggregator.Dataset')),
],
),
migrations.CreateModel(
name='MareProtectionService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scenario', models.IntegerField(default=1)),
('simulation_length', models.IntegerField(default=24)),
('time_interval', models.IntegerField(default=2)),
('ocean_circulation_model', models.CharField(default='Poseidon High Resolution Aegean Model', max_length=100)),
('wave_model', models.CharField(default='Poseidon WAM Cycle 4 for the Aegean', max_length=100)),
('natura_layer', models.BooleanField(default=False)),
('ais_layer', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='ServicePerUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_runs', models.IntegerField(default=1)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_per_user_service', to='service_builder.Service')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_per_user_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ServiceUse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serv_use_count', models.IntegerField(default=1)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_service_use_service', to='service_builder.Service')),
],
),
migrations.CreateModel(
name='ServiceUsers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serv_use_count', models.IntegerField(default=1)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_service_users_service', to='service_builder.Service')),
('service_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_service_users_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserPlans',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_start', models.DateTimeField(auto_now_add=True)),
('date_end', models.DateTimeField(default=datetime.datetime(2019, 8, 15, 16, 27, 30, 138000))),
('active', models.BooleanField(default=True)),
('auto_renewal', models.BooleanField(default=True)),
('query_count', models.IntegerField(default=0)),
('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plan_plan', to='website_analytics.BDO_Plan')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plan_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VisualisationTypeUses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('viz_use_count', models.IntegerField(default=1)),
('visualisation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_visualisation_type_uses_visualisation', to='visualizer.Visualization')),
],
),
migrations.CreateModel(
name='WaveEnergyResourceAssessment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_nester_statistics_dataset', to='aggregator.Dataset')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='analytics_nester_statistics_service', to='service_builder.Service')),
],
),
]
| StarcoderdataPython |
3312923 | import sys,re,os,urllib,urllib2,base64,syslog,socket
# Author: <NAME>
# Website: www.williamlam.com
# Product: VMware ESXi
# Description: Python script to call into vSphere MOB to add ESXi host to VC
# Reference: http://www.williamlam.com/2011/03/how-to-automatically-add-esxi-host-to.html
# vCenter server
vcenter_server = "vcenter51-1.primp-industries.com"
# vCenter Cluster path
cluster = "datacenter/host/cluster"
# vCenter credentials using encoded base64 password
vc_username = "vcjoin"
vc_encodedpassword = "<PASSWORD>=="
vc_password = <PASSWORD>64.b64decode(vc_encodedpassword)
# ESX(i) credentials using encoded base64 password
host_username = "root"
host_encodedpasssword = "<PASSWORD>"
host_password = base64.b64decode(host_encodedpasssword)
### DO NOT EDIT PAST HERE ###
# vCenter mob URL for findByInventoryPath
url = "https://" + vcenter_server + "/mob/?moid=SearchIndex&method=findByInventoryPath"
# Create global variables
global passman,authhandler,opener,req,page,page_content,nonce,headers,cookie,params,e_params,syslogGhetto,clusterMoRef
# syslog key for eaiser troubleshooting
syslogGhetto = 'GHETTO-JOIN-VC'
syslog.syslog(syslogGhetto + ' Starting joinvCenter process - ' + url)
# Code to build opener with HTTP Basic Authentication
try:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None,url,vc_username,vc_password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
except IOError, e:
opener.close()
syslog.syslog(syslogGhetto + ' Failed HTTP Basic Authentication!')
sys.exit(1)
else:
syslog.syslog(syslogGhetto + ' Succesfully built HTTP Basic Authentication')
# Code to capture required page data and cookie required for post back to meet CSRF requirements
# Thanks to user klich - http://communities.vmware.com/message/1722582#1722582
try:
req = urllib2.Request(url)
page = urllib2.urlopen(req)
page_content= page.read()
except IOError, e:
opener.close()
syslog.syslog(syslogGhetto + ' Failed to retrieve MOB data -> ' + str(e.args))
sys.exit(1)
else:
syslog.syslog(syslogGhetto + ' Succesfully requested MOB data')
# regex to get the vmware-session-nonce value from the hidden form entry
reg = re.compile('name="vmware-session-nonce" type="hidden" value="?([^\s^"]+)"')
nonce = reg.search(page_content).group(1)
# get the page headers to capture the cookie
headers = page.info()
cookie = headers.get("Set-Cookie")
# Code to search for vCenter Cluster
params = {'vmware-session-nonce':nonce,'inventoryPath':cluster}
e_params = urllib.urlencode(params)
req = urllib2.Request(url, e_params, headers={"Cookie":cookie})
page = urllib2.urlopen(req).read()
clusterMoRef = re.search('domain-c[0-9]*',page)
if clusterMoRef:
syslog.syslog(syslogGhetto + ' Succesfully located cluster "' + cluster + '"!')
else:
opener.close()
syslog.syslog(syslogGhetto + ' Failed to find cluster "' + cluster + '"!')
sys.exit(1)
# Code to compute SHA1 hash
cmd = "openssl x509 -sha1 -in /etc/vmware/ssl/rui.crt -noout -fingerprint"
tmp = os.popen(cmd)
tmp_sha1 = tmp.readline()
tmp.close()
s1 = re.split('=',tmp_sha1)
s2 = s1[1]
s3 = re.split('\n', s2)
sha1 = s3[0]
if sha1:
syslog.syslog(syslogGhetto + ' Succesfully computed SHA1 hash: "' + sha1 + '"!')
else:
opener.close()
syslog.syslog(syslogGhetto + ' Failed to compute SHA1 hash!')
sys.exit(1)
# Code to create ConnectHostSpec
xml = '<spec xsi:type="HostConnectSpec"><hostName>%hostname</hostName><sslThumbprint>%sha</sslThumbprint><userName>%user</userName><password><PASSWORD></password><force>1</force></spec>'
# Code to extract IP Address to perform DNS lookup to add FQDN to vCenter
hostip = socket.gethostbyname(socket.gethostname())
if hostip:
syslog.syslog(syslogGhetto + ' Successfully extracted IP Address ' + hostip.strip())
else:
opener.close()
syslog.syslog(syslogGhetto + ' Failed to extract IP Address!')
sys.exit(1)
try:
host = socket.getnameinfo((hostip, 0), 0)[0]
except IOError, e:
syslog.syslog(syslogGhetto + ' Failed to perform DNS lookup for ' + hostipt.strip())
sys.exit(1)
else:
syslog.syslog(syslogGhetto + ' Successfully performed DNS lookup for ' + hostip.strip() + ' is ' + host)
xml = xml.replace("%hostname",host)
xml = xml.replace("%sha",sha1)
xml = xml.replace("%user",host_username)
xml = xml.replace("%pass",host_password)
# Code to join host to vCenter Cluster
try:
url = "https://" + vcenter_server + "/mob/?moid=" + clusterMoRef.group() + "&method=addHost"
params = {'vmware-session-nonce':nonce,'spec':xml,'asConnected':'1','resourcePool':'','license':''}
syslog.syslog(syslogGhetto + ' ' + url)
e_params = urllib.urlencode(params)
req = urllib2.Request(url, e_params, headers={"Cookie":cookie})
page = urllib2.urlopen(req).read()
except IOError, e:
opener.close()
syslog.syslog(syslogGhetto + ' Failed to join vCenter!')
syslog.syslog(syslogGhetto + ' HOSTNAME: ' + host)
syslog.syslog(syslogGhetto + ' USERNAME: ' + host_username)
#syslog.syslog(syslogGhetto + ' PASSWORD: ' + host_password)
sys.exit(1)
else:
syslog.syslog(syslogGhetto + ' Succesfully joined vCenter!')
syslog.syslog(syslogGhetto + ' Logging off vCenter')
url = "https://" + vcenter_server + "/mob/?moid=SessionManager&method=logout"
params = {'vmware-session-nonce':nonce}
e_params = urllib.urlencode(params)
req = urllib2.Request(url, e_params, headers={"Cookie":cookie})
page = urllib2.urlopen(req).read()
sys.exit(0)
| StarcoderdataPython |
133549 | <filename>moabb/pipelines/__init__.py
"""
Pipeline defines all steps required by an algorithm to obtain predictions.
Pipelines are typically a chain of sklearn compatible transformers and end
with an sklearn compatible estimator.
"""
| StarcoderdataPython |
8064049 | <filename>document_similarity.py
from scipy import spatial
from preprocess import pre_process
from inference import get_inference_vector
import re
def get_document_similarity (doc_0, doc_1) :
sentences_0 = doc_0.split (".")
document_0_processed = pre_process (sentences_0)
flat_document_0 = [item for sublist in document_0_processed for item in sublist]
vector_0 = get_inference_vector (flat_document_0)
sentences_1 = doc_1.split (".")
document_1_processed = pre_process (sentences_1)
flat_document_1 = [item for sublist in document_1_processed for item in sublist]
vector_1 = get_inference_vector (flat_document_1)
return (1 - spatial.distance.cosine (vector_0, vector_1))
| StarcoderdataPython |
11213557 | <filename>refinery/units/strings/cupper.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from refinery.units import Unit
class cupper(Unit):
"""
Transforms the input data to uppercase.
"""
def process(self, data):
return data.upper()
| StarcoderdataPython |
202644 | from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras.optimizers import SGD
from PIL import Image
from keras.preprocessing.image import img_to_array
import numpy as np
class VGG16FeatureExtractor:
model = None
def __init__(self):
self.model = VGG16(include_top=True, weights='imagenet')
self.model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
def predict(self, filename):
img = Image.open(filename)
img = img.resize((224, 224), Image.ANTIALIAS)
input = img_to_array(img)
input = np.expand_dims(input, axis=0)
input = preprocess_input(input)
output = decode_predictions(self.model.predict(input), top=3)
return output[0]
def extract(self, filename):
img = Image.open(filename)
img = img.resize((224, 224), Image.ANTIALIAS)
input = img_to_array(img)
input = np.expand_dims(input, axis=0)
input = preprocess_input(input)
feature = self.model.predict(input)[0]
return feature / np.linalg.norm(feature)
def extract_all(self, filenames):
result = []
for filename in filenames:
result.append(self.extract(filename))
return result
def run_test(self):
print(self.extract('./data/images/Pokemon1.jpg'))
def main():
classifier = VGG16FeatureExtractor()
classifier.run_test()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6607254 | <filename>MessageEvents.py
#!/usr/bin/env python3
from enum import Enum
"""
Enumeration that represents the events that the server can receive
"""
class MessageEvents(Enum):
IDENTIFY = 1
STATUS = 2
USERS = 3
MESSAGE = 4
PUBLICMESSAGE = 5
CREATEROOM = 6
INVITE = 7
JOINROOM = 8
ROOMESSAGE = 9
DISCONNECT = 10
"""
To String method
"""
def __str__(self):
return{
'IDENTIFY': "IDENTIFY ",
'STATUS': "STATUS ",
'USERS': "USERS ",
'MESSAGE': "MESSAGE ",
'PUBLICMESSAGE': "PUBLICMESSAGE ",
'CREATEROOM': "CREATEROOM ",
'INVITE': "INVITE ",
'JOINROOM': "JOINROOM ",
'ROOMESSAGE': "ROOMESSAGE ",
'DISCONNECT': "DISCONNECT "
}[self.name]
"""
Returns a message with the correct syntax for all events
"""
def validList():
stringMessage = '''Sintaxis invalida, la sintaxis de los mensajes es:\n
\t IDENTIFY nuevoNombre\n
\t STATUS status\n
\t USERS\n
\t MESSAGE destinatario mensaje\n
\t PUBLICMESSAGE mensaje\n
\t CREATEROOM nombreSala\n
\t INVITE nombreSala usuario1 usuario2...\n
\t JOINROOM nombreSala\n
\t ROOMESSAGE nombreSala mensaje\n
\t DISCONNECT'''
return stringMessage
| StarcoderdataPython |
8083277 | import re
import numpy as np
import json
from decimal import Decimal
from fuzzywuzzy import fuzz
from flask import Flask, request, jsonify
from weighted_levenshtein import lev, osa, dam_lev
app = Flask(__name__)
@app.route("/get-receipt-info", methods=["GET"])
def get_receipt_info():
receipt = request.data.decode('utf-8')
print (type(receipt))
store = get_store(receipt)
date = get_date(receipt)
items = get_items(receipt)
total = get_total(receipt)
return jsonify(date=date, items=items, store=store, total=total, unknowns=[])
def get_store(receipt):
# find start of address line
lines = receipt.splitlines()
curr = 0
while "<p class=\"MsoNormal\" align=\"center\" style=\"text-align:center\">" \
"<span style=\"font-size:10.0pt;font-family:Courier\"><" not in lines[curr]:
curr += 1
# error if no address found
if curr == len(lines):
return None
address = re.findall(r'>(?:[A-Za-z]|\d|\s|\.)+</a>', lines[curr])[0]
address = address[1:-4]
curr += 1
address += " " + re.findall(r'(.*?)<u>', lines[curr])[0]
return address
def get_date(receipt):
return re.findall(r'\d\d\/\d\d\/\d\d\d\d', receipt)[0]
def get_total(receipt):
return re.findall(r'\$\d?\d\.\d\d', receipt)[0]
def get_items(receipt):
# find start of line items
lines = receipt.splitlines()
curr = 0
while not lines[curr].startswith("<pre style=\"text-align:center\">"):
curr += 1
# error if no items found
if curr == len(lines):
return None
# parse out items and price
items = []
while "Order Total" not in lines[curr]:
if "Promotion" in lines[curr]:
# subtract promotion from previous line
print(re.findall(r'(?:\d?\d?\d\.\d\d)', lines[curr])[0])
promotion = Decimal(re.findall(r'(?:\d?\d?\d\.\d\d)', lines[curr])[0])
items[-1][1] = str(Decimal(items[-1][1]) - promotion)
elif "You Saved" not in lines[curr]:
item = re.findall(r'(?:[A-Z]|\d)(?:[A-Z]|\s|\d|\.|%)+', lines[curr])
if len(item) == 1:
curr += 1
# get price from next line
next_line = re.findall(r'(?:\d?\d?\d\.\d\d)', lines[curr])
item.append(next_line[-1])
items.append(item)
else:
items.append(item[:2])
curr += 1
print("items " + str(items))
names = []
for item in items:
names.append(item[0])
return names
# return translate_items(items)
# work will be done here
def translate_items(items):
translated = []
for item in items:
translated.append(find_closest_string(item[0]))
# translated.append(find_closest_string_weighted(item[0]))
return translated
def find_closest_string(string):
print("string: " + string)
products = open("resources/Products")
closest_ratio = None
closest_string = None
for line in products.readlines():
distance = fuzz.ratio(string, line.lower())
if closest_ratio is None or distance > closest_ratio:
closest_ratio = distance
closest_string = line.lower()
print("closest_string: " + closest_string)
return closest_string
def find_closest_string_weighted(string):
print("string: " + string)
with open('../approaches/edit_distance/cleaned_bucket_data.json', encoding="ASCII") as f:
data = json.load(f)
# find first letter of every word in the string
words = string.split()
letters = [word[0] for word in words]
# get corresponding buckets
first_letter = string[0]
products = []
for bucket in data:
if bucket[0][0] == first_letter:
products += bucket
# remove non-ascii characters
cleaned_products = []
for entry in products:
cleaned_entry = ""
for character in entry:
if ord(character) <= 128:
cleaned_entry += character
cleaned_products.append(cleaned_entry)
insert_costs = np.full(128, .3, dtype=np.float64) # make an array of all 1's of size 128, the number of ASCII characters
transpose_costs = np.full((128, 128), .7, dtype=np.float64)
delete_costs = np.full(128, 1.2, dtype=np.float64)
closest_distance = 999999
closest_string = None
for line in cleaned_products:
distance = osa(string.lower(), line.lower(), insert_costs=insert_costs, transpose_costs=transpose_costs, delete_costs=delete_costs)
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_string = line.lower()
print("closest_string: " + closest_string)
return closest_string
if __name__ == "__main__":
app.run(debug=True, use_reloader=False, port=8080) | StarcoderdataPython |
4994744 | <reponame>donghun2018/adclick-simulator<filename>sim_lib.py
"""
Simulator library
<NAME> 2018
"""
import pickle
import numpy as np
from policy_loader import get_pols, get_puids
def load_auction_p(fname):
"""
loads auction output from a file
:param fname: usually, auction_???.p
:return: auct. This can be fed to simulator through read_in_auction method
"""
return pickle.load(open(fname, "rb"))
def load_policies(all_attrs, possible_bids, max_T):
"""
loads policies from individual .py files. See policy_loader for more info
:return: policy object list, policy-unique-id (puid) string list
"""
pols = [pol(all_attrs, possible_bids, max_T) for pol in get_pols()]
return pols, get_puids()
def get_click_prob(theta, bid):
"""
click probability, given theta and bid, using logistic function
:param theta: dict, needs keys 'a', 'bid', '0', and 'max_click_prob'
:param bid: will be converted to float
:return: probability of click
"""
th = theta['a'] + theta['bid'] * float(bid) + theta['0'] # TODO: really? have bid in this section????
p_click = theta['max_click_prob'] / (1 + np.exp(-th)) # TODO: introduce more robust function
return p_click
def compute_second_price_cost(bids, size=1):
""" returns second price cost. if all bids are the same, then cost is the bid.
:param bids: list.
:return:
"""
ubids = sorted(list(set(bids)))
if len(ubids) >= 2:
return [ubids[-2]] * size
else:
return [ubids[0]] * size
def _compute_actual_second_price_cost(bid, sorted_unique_bid_list):
"""
:param bid: bid price
:param sorted_unique_bid_list: MUST BE SORTED AND UNIQUE (increasing order)
must have bid as its element
:return: second price cost (if lowest, then itself)
"""
ix = sorted_unique_bid_list.index(bid)
cost_ix = 0 if ix == 0 else ix - 1
return sorted_unique_bid_list[cost_ix]
def max_ix(l):
""" returns all indices of l whose element is the max value
:param l: iterable
:return: index list
"""
max_l = max(l)
ret = []
for ix, item in enumerate(l):
if item == max_l:
ret.append(ix)
return ret
def top_K_max(l, K=1, prng=None):
"""
returns K elements such that no element in returned list is less than the largest element of l that are not returned
:param l: list
:param K: int
:param prng: numpy-compatible PRNG, can be obtained from np.random.RandomState()
:return: length-K list containing elements, and second return length-K list containing corresponding index in input l
Note that ties are randomly broken by random.shuffle function from numpy
"""
if prng is None:
prng = np.random.RandomState()
ret_ix = []
l2 = sorted(list(set(l)), reverse=True)
for v in l2:
indices = []
for ix, item in enumerate(l):
if item == v:
indices.append(ix)
prng.shuffle(indices)
ret_ix.extend(indices)
if len(ret_ix) > K:
break
ret2 = ret_ix[:K]
ret1 = [l[i] for i in ret2]
return ret1, ret2
| StarcoderdataPython |
4998771 | <reponame>tsabi/modoboa-stats
"""Modoboa stats forms."""
import rrdtool
from pkg_resources import parse_version
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django import forms
from modoboa.lib import form_utils
from modoboa.parameters import forms as param_forms
class ParametersForm(param_forms.AdminParametersForm):
"""Stats global parameters."""
app = "modoboa_stats"
general_sep = form_utils.SeparatorField(label=ugettext_lazy("General"))
logfile = forms.CharField(
label=ugettext_lazy("Path to the log file"),
initial="/var/log/mail.log",
help_text=ugettext_lazy("Path to log file used to collect statistics"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
rrd_rootdir = forms.CharField(
label=ugettext_lazy("Directory to store RRD files"),
initial="/tmp/modoboa",
help_text=ugettext_lazy(
"Path to directory where RRD files are stored"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
greylist = form_utils.YesNoField(
label=ugettext_lazy("Show greylisted messages"),
initial=False,
help_text=ugettext_lazy(
"Differentiate between hard and soft rejects (greylisting)")
)
def __init__(self, *args, **kwargs):
"""Check RRDtool version."""
super(ParametersForm, self).__init__(*args, **kwargs)
rrd_version = parse_version(rrdtool.lib_version())
required_version = parse_version("1.6.0")
test_mode = getattr(settings, "RRDTOOL_TEST_MODE", False)
if rrd_version < required_version and not test_mode:
del self.fields["greylist"]
| StarcoderdataPython |
3392963 | # Generated by Django 3.2.6 on 2021-09-15 13:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0005_auto_20210915_1129'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'verbose_name': 'Наказ', 'verbose_name_plural': 'Накази'},
),
migrations.AlterModelOptions(
name='statusorder',
options={'verbose_name': 'Статус', 'verbose_name_plural': 'Статуси'},
),
migrations.AlterField(
model_name='order',
name='status',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.statusorder', verbose_name='статус'),
),
migrations.AlterField(
model_name='statusorder',
name='name',
field=models.CharField(choices=[('APP', 'Appointment'), ('MOV', 'Moving'), ('RA', 'Rank')], max_length=512, verbose_name="Ім'я"),
),
]
| StarcoderdataPython |
3545339 | <reponame>stroxler/bottleneck
from bottleneck.slow.func import *
from bottleneck.slow.move import *
| StarcoderdataPython |
4888643 | <reponame>bryanforbes/poetry-core
# -*- coding: utf-8 -*-
import ast
import shutil
import tarfile
from email.parser import Parser
import pytest
from poetry.core.factory import Factory
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.packages import Package
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.vcs_dependency import VCSDependency
from poetry.core.utils._compat import Path
from poetry.core.utils._compat import encode
from poetry.core.utils._compat import to_str
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(autouse=True)
def setup():
clear_samples_dist()
yield
clear_samples_dist()
def clear_samples_dist():
for dist in fixtures_dir.glob("**/dist"):
if dist.is_dir():
shutil.rmtree(str(dist))
def project(name):
return Path(__file__).parent / "fixtures" / name
def test_convert_dependencies():
package = Package("foo", "1.2.3")
result = SdistBuilder.convert_dependencies(
package,
[
Dependency("A", "^1.0"),
Dependency("B", "~1.0"),
Dependency("C", "1.2.3"),
VCSDependency("D", "git", "https://github.com/sdispater/d.git"),
],
)
main = [
"A>=1.0,<2.0",
"B>=1.0,<1.1",
"C==1.2.3",
"D @ git+https://github.com/sdispater/d.git@master",
]
extras = {}
assert result == (main, extras)
package = Package("foo", "1.2.3")
package.extras = {"bar": [Dependency("A", "*")]}
result = SdistBuilder.convert_dependencies(
package,
[
Dependency("A", ">=1.2", optional=True),
Dependency("B", "~1.0"),
Dependency("C", "1.2.3"),
],
)
main = ["B>=1.0,<1.1", "C==1.2.3"]
extras = {"bar": ["A>=1.2"]}
assert result == (main, extras)
c = Dependency("C", "1.2.3")
c.python_versions = "~2.7 || ^3.6"
d = Dependency("D", "3.4.5", optional=True)
d.python_versions = "~2.7 || ^3.4"
package.extras = {"baz": [Dependency("D", "*")]}
result = SdistBuilder.convert_dependencies(
package,
[Dependency("A", ">=1.2", optional=True), Dependency("B", "~1.0"), c, d],
)
main = ["B>=1.0,<1.1"]
extra_python = (
':python_version >= "2.7" and python_version < "2.8" '
'or python_version >= "3.6" and python_version < "4.0"'
)
extra_d_dependency = (
'baz:python_version >= "2.7" and python_version < "2.8" '
'or python_version >= "3.4" and python_version < "4.0"'
)
extras = {extra_python: ["C==1.2.3"], extra_d_dependency: ["D==3.4.5"]}
assert result == (main, extras)
def test_make_setup():
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert ns["packages"] == [
"my_package",
"my_package.sub_pkg1",
"my_package.sub_pkg2",
"my_package.sub_pkg3",
]
assert ns["install_requires"] == ["cachy[msgpack]>=0.2.0,<0.3.0", "cleo>=0.6,<0.7"]
assert ns["entry_points"] == {
"console_scripts": [
"extra-script = my_package.extra:main[time]",
"my-2nd-script = my_package:main2",
"my-script = my_package:main",
]
}
assert ns["extras_require"] == {
'time:python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"': [
"pendulum>=1.4,<2.0"
]
}
def test_make_pkg_info(mocker):
get_metadata_content = mocker.patch(
"poetry.core.masonry.builders.builder.Builder.get_metadata_content"
)
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
builder.build_pkg_info()
assert get_metadata_content.called
def test_make_pkg_info_any_python():
poetry = Factory().create_poetry(project("module1"))
builder = SdistBuilder(poetry)
pkg_info = builder.build_pkg_info()
p = Parser()
parsed = p.parsestr(to_str(pkg_info))
assert "Requires-Python" not in parsed
def test_find_files_to_add():
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
result = builder.find_files_to_add()
assert sorted(result) == sorted(
[
Path("LICENSE"),
Path("README.rst"),
Path("my_package/__init__.py"),
Path("my_package/data1/test.json"),
Path("my_package/sub_pkg1/__init__.py"),
Path("my_package/sub_pkg2/__init__.py"),
Path("my_package/sub_pkg2/data2/data.json"),
Path("my_package/sub_pkg3/foo.py"),
Path("pyproject.toml"),
]
)
def test_make_pkg_info_multi_constraints_dependency():
poetry = Factory().create_poetry(
Path(__file__).parent.parent.parent
/ "fixtures"
/ "project_with_multi_constraints_dependency"
)
builder = SdistBuilder(poetry)
pkg_info = builder.build_pkg_info()
p = Parser()
parsed = p.parsestr(to_str(pkg_info))
requires = parsed.get_all("Requires-Dist")
assert requires == [
'pendulum (>=1.5,<2.0); python_version < "3.4"',
'pendulum (>=2.0,<3.0); python_version >= "3.4" and python_version < "4.0"',
]
def test_find_packages():
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
base = project("complete")
include = PackageInclude(base, "my_package")
pkg_dir, packages, pkg_data = builder.find_packages(include)
assert pkg_dir is None
assert packages == [
"my_package",
"my_package.sub_pkg1",
"my_package.sub_pkg2",
"my_package.sub_pkg3",
]
assert pkg_data == {
"": ["*"],
"my_package": ["data1/*"],
"my_package.sub_pkg2": ["data2/*"],
}
poetry = Factory().create_poetry(project("source_package"))
builder = SdistBuilder(poetry)
base = project("source_package")
include = PackageInclude(base, "package_src", source="src")
pkg_dir, packages, pkg_data = builder.find_packages(include)
assert pkg_dir == str(base / "src")
assert packages == ["package_src"]
assert pkg_data == {"": ["*"]}
def test_package():
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
builder.build()
sdist = fixtures_dir / "complete" / "dist" / "my-package-1.2.3.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "my-package-1.2.3/LICENSE" in tar.getnames()
def test_setup_py_context():
poetry = Factory().create_poetry(project("complete"))
builder = SdistBuilder(poetry)
project_setup_py = poetry.file.parent / "setup.py"
assert not project_setup_py.exists()
try:
with builder.setup_py() as setup:
assert setup.exists()
assert project_setup_py == setup
with open(str(setup), "rb") as f:
# we convert to string and replace line endings here for compatibility
data = to_str(encode(f.read())).replace("\r\n", "\n")
assert data == to_str(builder.build_setup())
assert not project_setup_py.exists()
finally:
if project_setup_py.exists():
project_setup_py.unlink()
def test_module():
poetry = Factory().create_poetry(project("module1"))
builder = SdistBuilder(poetry)
builder.build()
sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "module1-0.1/module1.py" in tar.getnames()
def test_prelease():
poetry = Factory().create_poetry(project("prerelease"))
builder = SdistBuilder(poetry)
builder.build()
sdist = fixtures_dir / "prerelease" / "dist" / "prerelease-0.1b1.tar.gz"
assert sdist.exists()
def test_with_c_extensions():
poetry = Factory().create_poetry(project("extended"))
builder = SdistBuilder(poetry)
builder.build()
sdist = fixtures_dir / "extended" / "dist" / "extended-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "extended-0.1/build.py" in tar.getnames()
assert "extended-0.1/extended/extended.c" in tar.getnames()
def test_with_c_extensions_src_layout():
poetry = Factory().create_poetry(project("src_extended"))
builder = SdistBuilder(poetry)
builder.build()
sdist = fixtures_dir / "src_extended" / "dist" / "extended-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "extended-0.1/build.py" in tar.getnames()
assert "extended-0.1/src/extended/extended.c" in tar.getnames()
def test_with_src_module_file():
poetry = Factory().create_poetry(project("source_file"))
builder = SdistBuilder(poetry)
# Check setup.py
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert ns["package_dir"] == {"": "src"}
assert ns["modules"] == ["module_src"]
builder.build()
sdist = fixtures_dir / "source_file" / "dist" / "module-src-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "module-src-0.1/src/module_src.py" in tar.getnames()
def test_with_src_module_dir():
poetry = Factory().create_poetry(project("source_package"))
builder = SdistBuilder(poetry)
# Check setup.py
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert ns["package_dir"] == {"": "src"}
assert ns["packages"] == ["package_src"]
builder.build()
sdist = fixtures_dir / "source_package" / "dist" / "package-src-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "package-src-0.1/src/package_src/__init__.py" in tar.getnames()
assert "package-src-0.1/src/package_src/module.py" in tar.getnames()
def test_default_with_excluded_data(mocker):
# Patch git module to return specific excluded files
p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files")
p.return_value = [
(
(
Path(__file__).parent
/ "fixtures"
/ "default_with_excluded_data"
/ "my_package"
/ "data"
/ "sub_data"
/ "data2.txt"
)
.relative_to(project("default_with_excluded_data"))
.as_posix()
)
]
poetry = Factory().create_poetry(project("default_with_excluded_data"))
builder = SdistBuilder(poetry)
# Check setup.py
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert "package_dir" not in ns
assert ns["packages"] == ["my_package"]
assert ns["package_data"] == {
"": ["*"],
"my_package": ["data/*", "data/sub_data/data3.txt"],
}
builder.build()
sdist = (
fixtures_dir / "default_with_excluded_data" / "dist" / "my-package-1.2.3.tar.gz"
)
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
names = tar.getnames()
assert len(names) == len(set(names))
assert "my-package-1.2.3/LICENSE" in names
assert "my-package-1.2.3/README.rst" in names
assert "my-package-1.2.3/my_package/__init__.py" in names
assert "my-package-1.2.3/my_package/data/data1.txt" in names
assert "my-package-1.2.3/pyproject.toml" in names
assert "my-package-1.2.3/setup.py" in names
assert "my-package-1.2.3/PKG-INFO" in names
# all last modified times should be set to a valid timestamp
for tarinfo in tar.getmembers():
assert 0 < tarinfo.mtime
def test_src_excluded_nested_data():
module_path = fixtures_dir / "exclude_nested_data_toml"
poetry = Factory().create_poetry(module_path)
builder = SdistBuilder(poetry)
builder.build()
sdist = module_path / "dist" / "my-package-1.2.3.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
names = tar.getnames()
assert len(names) == len(set(names))
assert "my-package-1.2.3/LICENSE" in names
assert "my-package-1.2.3/README.rst" in names
assert "my-package-1.2.3/pyproject.toml" in names
assert "my-package-1.2.3/setup.py" in names
assert "my-package-1.2.3/PKG-INFO" in names
assert "my-package-1.2.3/my_package/__init__.py" in names
assert "my-package-1.2.3/my_package/data/sub_data/data2.txt" not in names
assert "my-package-1.2.3/my_package/data/sub_data/data3.txt" not in names
assert "my-package-1.2.3/my_package/data/data1.txt" not in names
assert "my-package-1.2.3/my_package/data/data2.txt" in names
assert "my-package-1.2.3/my_package/puplic/publicdata.txt" in names
assert "my-package-1.2.3/my_package/public/item1/itemdata1.txt" not in names
assert (
"my-package-1.2.3/my_package/public/item1/subitem/subitemdata.txt"
not in names
)
assert "my-package-1.2.3/my_package/public/item2/itemdata2.txt" not in names
def test_proper_python_requires_if_two_digits_precision_version_specified():
poetry = Factory().create_poetry(project("simple_version"))
builder = SdistBuilder(poetry)
pkg_info = builder.build_pkg_info()
p = Parser()
parsed = p.parsestr(to_str(pkg_info))
assert parsed["Requires-Python"] == ">=3.6,<3.7"
def test_proper_python_requires_if_three_digits_precision_version_specified():
poetry = Factory().create_poetry(project("single_python"))
builder = SdistBuilder(poetry)
pkg_info = builder.build_pkg_info()
p = Parser()
parsed = p.parsestr(to_str(pkg_info))
assert parsed["Requires-Python"] == "==2.7.15"
def test_excluded_subpackage():
poetry = Factory().create_poetry(project("excluded_subpackage"))
builder = SdistBuilder(poetry)
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert ns["packages"] == ["example"]
| StarcoderdataPython |
1660828 | import numpy as np
from vg.compat import v2 as vg
__all__ = [
"rectangular_prism",
"cube",
"triangular_prism",
]
def _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces):
if ret_unique_vertices_and_faces:
return vertices, faces
else:
return vertices[faces]
def rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned rectangular prism. One vertex is `origin`. The
diametrically opposite vertex is `origin + size`.
Args:
origin (np.ndarray): A 3D point vector containing the point on the
prism with the minimum x, y, and z coords.
size (np.ndarray): A 3D vector specifying the prism's length, width,
and height, which should be positive.
ret_unique_vertices_and_faces (bool): When `True` return a vertex
array containing the unique vertices and an array of faces (i.e.
vertex indices). When `False`, return a flattened array of
triangle coordinates.
Returns:
object:
- With `ret_unique_vertices_and_faces=True`: a tuple containing
an `8x3` array of vertices and a `12x3` array of triangle faces.
- With `ret_unique_vertices_and_faces=False`: a `12x3x3` matrix of
flattened triangle coordinates.
"""
from ..tri import quads_to_tris
vg.shape.check(locals(), "origin", (3,))
vg.shape.check(locals(), "size", (3,))
lower_base_plane = np.array(
[
# Lower base plane
origin,
origin + np.array([size[0], 0, 0]),
origin + np.array([size[0], 0, size[2]]),
origin + np.array([0, 0, size[2]]),
]
)
upper_base_plane = lower_base_plane + np.array([0, size[1], 0])
vertices = np.vstack([lower_base_plane, upper_base_plane])
faces = np.array(
quads_to_tris(
np.array(
[
[0, 1, 2, 3], # lower base (-y)
[7, 6, 5, 4], # upper base (+y)
[4, 5, 1, 0], # -z face
[5, 6, 2, 1], # +x face
[6, 7, 3, 2], # +z face
[3, 7, 4, 0], # -x face
],
)
),
)
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
def cube(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned cube. One vertex is `origin`. The diametrically
opposite vertex is `size` units along `+x`, `+y`, and `+z`.
Args:
origin (np.ndarray): A 3D point vector containing the point on the
prism with the minimum x, y, and z coords.
size (float): The length, width, and height of the cube, which should
be positive.
ret_unique_vertices_and_faces (bool): When `True` return a vertex
array containing the unique vertices and an array of faces (i.e.
vertex indices). When `False`, return a flattened array of
triangle coordinates.
Returns:
object:
- With `ret_unique_vertices_and_faces=True`: a tuple containing
an `8x3` array of vertices and a `12x3` array of triangle faces.
- With `ret_unique_vertices_and_faces=False`: a `12x3x3` matrix of
flattened triangle coordinates.
"""
vg.shape.check(locals(), "origin", (3,))
if not isinstance(size, float):
raise ValueError("`size` should be a number")
return rectangular_prism(
origin,
np.repeat(size, 3),
ret_unique_vertices_and_faces=ret_unique_vertices_and_faces,
)
def triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
"""
Tesselate a triangular prism whose base is the triangle `p1`, `p2`, `p3`.
If the vertices are oriented in a counterclockwise direction, the prism
extends from behind them.
Args:
p1 (np.ndarray): A 3D point on the base of the prism.
p2 (np.ndarray): A 3D point on the base of the prism.
p3 (np.ndarray): A 3D point on the base of the prism.
height (float): The height of the prism, which should be positive.
ret_unique_vertices_and_faces (bool): When `True` return a vertex
array containing the unique vertices and an array of faces (i.e.
vertex indices). When `False`, return a flattened array of
triangle coordinates.
Returns:
object:
- With `ret_unique_vertices_and_faces=True`: a tuple containing
an `6x3` array of vertices and a `8x3` array of triangle faces.
- With `ret_unique_vertices_and_faces=False`: a `8x3x3` matrix of
flattened triangle coordinates.
"""
from .. import Plane
vg.shape.check(locals(), "p1", (3,))
vg.shape.check(locals(), "p2", (3,))
vg.shape.check(locals(), "p3", (3,))
if not isinstance(height, float):
raise ValueError("`height` should be a number")
base_plane = Plane.from_points(p1, p2, p3)
lower_base_to_upper_base = height * -base_plane.normal
vertices = np.vstack(([p1, p2, p3], [p1, p2, p3] + lower_base_to_upper_base))
faces = np.array(
[
[0, 1, 2], # base
[0, 3, 4],
[0, 4, 1], # side 0, 3, 4, 1
[1, 4, 5],
[1, 5, 2], # side 1, 4, 5, 2
[2, 5, 3],
[2, 3, 0], # side 2, 5, 3, 0
[5, 4, 3], # base
],
)
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
| StarcoderdataPython |
4976002 | <gh_stars>0
import pytest
import json
from datetime import date
from core.models import Event
from api.serializers import EventSerializer
@pytest.mark.django_db()
class TestsEventSerialization:
@pytest.fixture()
def event(self):
instance = Event.objects.create(
event='some',
count=2,
)
yield instance
def test_serializing_an_event_object(self, event):
spected = {
'date': event.date.isoformat(),
'event': event.event,
'count': event.count,
}
actual = EventSerializer(event, many=False).data
assert actual == spected
| StarcoderdataPython |
1712002 | <reponame>Ally-s-Lab/miRmedon
import os
def alignment_to_emiRbase(fastq_file, path_to_star, threads, star_ref_dir, path_to_samtools):
params = '''--runThreadN {}
--alignIntronMin 1
--outFilterMultimapNmax 200
--outFilterMatchNmin 12
--outFilterMatchNminOverLread 0.66
--outFilterMismatchNoverLmax 0.08
--seedSearchStartLmax 6
--winAnchorMultimapNmax 2000
--outFilterMultimapScoreRange 0
--outSAMtype BAM Unsorted
--outReadsUnmapped Fastx
--outFilterMismatchNmax 1
--outSAMprimaryFlag AllBestScore
--outWigType None
--outSAMattributes NH AS NM MD'''.format(threads)
os.system('{} --genomeDir {} --readFilesIn {} --outFileNamePrefix {} {}'.format(path_to_star, star_ref_dir, fastq_file, '_', params.replace('\n', ' ')))
os.system('mv _Aligned.out.bam _Aligned.out.FCRC.bam')
os.system('{} view -b -h -F 16 _Aligned.out.FCRC.bam > _Aligned.out.bam'.format(path_to_samtools))
| StarcoderdataPython |
11243221 | from . import outputter
import smbus
import time
# I2C通信の設定
I2C_ADDR = 0x27 # I2Cアドレス
LCD_WIDTH = 16 # 表示文字数の上限
LCD_CHR = 1 # 文字列送信モードの識別子
LCD_CMD = 0 # コマンド送信モードの識別子
LCD_LINE_1 = 0x80 # 一行目に表示する文字列の書き込み先
LCD_LINE_2 = 0xC0 # 二行目に表示する文字列の書き込み先
LCD_BACKLIGHT = 0x08 # バックライトをOFFにするコマンド
bus = smbus.SMBus(1) # 接続されているバスの番号を指定
def init_display():
send_byte_to_data_pin(0x33,LCD_CMD)
send_byte_to_data_pin(0x32,LCD_CMD)
send_byte_to_data_pin(0x06,LCD_CMD)
send_byte_to_data_pin(0x0C,LCD_CMD)
send_byte_to_data_pin(0x28,LCD_CMD)
send_byte_to_data_pin(0x01,LCD_CMD)
time.sleep(0.0005)
def send_byte_to_data_pin(bits, mode):
upper_bits = mode | (bits & 0xF0) | LCD_BACKLIGHT
lower_bits = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT
bus.write_byte(I2C_ADDR, upper_bits)
enable_toggle_button(upper_bits)
bus.write_byte(I2C_ADDR, lower_bits)
enable_toggle_button(lower_bits)
def enable_toggle_button(bits):
time.sleep(0.0005)
bus.write_byte(I2C_ADDR, (bits | 0b00000100))
time.sleep(0.0005)
bus.write_byte(I2C_ADDR,(bits & ~0b00000100))
time.sleep(0.0005)
def send_string_to_display(message,line):
message = message.ljust(LCD_WIDTH," ")
send_byte_to_data_pin(line, LCD_CMD)
for i in range(LCD_WIDTH):
send_byte_to_data_pin(ord(message[i]),LCD_CHR)
class CsvOutputter(outputter.OutPut):
def __init__(self):
# LCDのメモリ初期化
init_display()
def put_data(self,data):
init_display()
send_string_to_display(f'lat:{data["lat"]},lon{data["lat"]}' , LCD_LINE_1) # 一行目
send_string_to_display(f"co2:", LCD_LINE_2) # 二行目
def finish(self):
pass
| StarcoderdataPython |
8545 | <filename>tests/__init__.py
# -*- coding: utf-8 -*-
"""Tests go in this directory."""
| StarcoderdataPython |
1855269 | <reponame>OnionPiece/networking-ipvs
#!/usr/bin/python2.7
import os
from neutron.agent.linux import ip_lib
from networking_ipvs.common import template
from networking_ipvs.common import constants as const
class FakeConf(object):
@property
def keepalived(self):
class TemplateConf(object):
@property
def keepalived_conf_path(self):
return "/etc/keepalived/keepalived.conf"
@property
def virtualserver_conf_path(self):
return "/etc/keepalived/networking_ipvs"
@property
def notify_emails(self):
return ["<EMAIL>", "<EMAIL>"]
@property
def notify_from(self):
return "lvs_cluster@cn"
@property
def smtp_server(self):
return "192.168.0.1"
@property
def smtp_timeout(self):
return 30
return TemplateConf()
@property
def ipvs(self):
class Fake(object):
@property
def enable_ipvs_fullnat(self):
return False
@property
def ipvs_vip_nic_mapping(self):
return '*:eth0'
@property
def ipvs_sync_daemon_nic(self):
return ''
@property
def ipvs_sync_daemon_ids(self):
return ''
return Fake()
@property
def revision(self):
class Fake(object):
@property
def revision_path(self):
return '/var/lib/neutron/networking_ipvs_revision'
return Fake()
class FakeRPC(object):
def __init__(self, context, plugin):
self.context = context
self.plugin = plugin
def get_revisions(self, start=None, end=None):
return self.plugin.get_revisions(self.context, start, end)
def get_ipvs_realservers(self, filters=None):
return self.plugin.get_ipvs_realservers(self.context, filters)
def get_ipvs_virtualservers(self, filters=None):
return self.plugin.get_ipvs_virtualservers(self.context, filters)
conf = FakeConf()
template_driver = template.KeepalivedTemplate(conf)
def cleanup():
for f in os.listdir(conf.keepalived.virtualserver_conf_path):
os.remove(os.path.join(conf.keepalived.virtualserver_conf_path, f))
eth0 = ip_lib.IPDevice('eth0')
for addr in eth0.addr.list():
if addr['cidr'].endswith('/32'):
eth0.addr.delete(addr['cidr'])
os.system('service keepalived restart')
# TODO: update for md5 check
def _md5_check_failed(*args, **kwargs):
return False
fake_md5_check = _md5_check_failed
def assert_vip(ip, exists=True):
_ip = ip + '/32'
eth0 = ip_lib.IPDevice('eth0')
ip_list = [addr['cidr'] for addr in eth0.addr.list()]
if exists:
assert _ip in ip_list
else:
assert _ip not in ip_list
def assert_vs_file(vs_info, all_rs):
listen_ip = vs_info[const.LISTEN_IP]
listen_port = vs_info[const.LISTEN_PORT]
file_path = os.path.join(conf.keepalived.virtualserver_conf_path,
'%s_%s' % (listen_ip, listen_port))
up = vs_info.get(const.ADMIN_STATE_UP, True)
if not up:
file_path += const.DOWN
if len(all_rs):
file_content = None
temp_content = None
try:
assert os.path.isfile(file_path)
file_content = open(file_path).read()
temp_content = template_driver.get_virtualserver_conf(
vs_info, all_rs)
assert file_content == temp_content
except AssertionError as e:
raise AssertionError(
'file_path: %s\nfile_content:\n%s\ntemp_content:\n%s' % (
file_path, file_content, temp_content))
else:
assert os.path.exists(file_path) is False
def common_assert(vs_info, all_rs, task_msg, vip_exists=True):
try:
assert_vip(vs_info[const.LISTEN_IP], vip_exists)
except AssertionError:
print task_msg + ".assert_vip....failed"
os.sys.exit(1)
else:
print task_msg + ".assert_vip....passed"
try:
assert_vs_file(vs_info, all_rs)
except AssertionError as e:
print task_msg + ".assert_vs_file....failed"
print e.message
os.sys.exit(1)
else:
print task_msg + ".assert_vs_file....passed"
| StarcoderdataPython |
11356690 | # Generated by Django 3.0.2 on 2020-02-12 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mortgages', '0007_auto_20200212_2101'),
]
operations = [
migrations.AlterField(
model_name='mortgageprograms',
name='programs_target',
field=models.ManyToManyField(blank=True, to='mortgages.TargetCredits', verbose_name='Цель программы'),
),
]
| StarcoderdataPython |
1924325 | import unittest
import numpy as np
from nuplan.common.actor_state.agent import Agent, PredictedTrajectory
from nuplan.common.actor_state.scene_object import SceneObjectMetadata
from nuplan.common.actor_state.state_representation import StateSE2, StateVector2D, TimePoint
from nuplan.common.actor_state.test.test_utils import get_sample_agent, get_sample_oriented_box
from nuplan.common.actor_state.tracked_objects_types import TrackedObjectType
from nuplan.common.actor_state.waypoint import Waypoint
class TestAgent(unittest.TestCase):
"""Test suite for the Agent class"""
def setUp(self) -> None:
"""Setup parameters for tests"""
self.sample_token = 'abc123'
self.track_token = 'abc123'
self.timestamp = 123
self.agent_type = TrackedObjectType.VEHICLE
self.sample_pose = StateSE2(1.0, 2.0, np.pi / 2.0)
self.wlh = (2.0, 4.0, 1.5)
self.velocity = StateVector2D(1.0, 2.2)
def test_agent_types(self) -> None:
"""Test that enum works for both existing and missing keys"""
self.assertEqual(TrackedObjectType(0), TrackedObjectType.VEHICLE)
self.assertEqual(TrackedObjectType.VEHICLE.fullname, "vehicle")
with self.assertRaises(ValueError):
TrackedObjectType('missing_key')
def test_construction(self) -> None:
"""Test that agents can be constructed correctly."""
oriented_box = get_sample_oriented_box()
agent = Agent(
metadata=SceneObjectMetadata(
token=self.sample_token, track_token=self.track_token, timestamp_us=self.timestamp, track_id=None
),
tracked_object_type=self.agent_type,
oriented_box=oriented_box,
velocity=self.velocity,
)
self.assertTrue(agent.angular_velocity is None)
def test_set_predictions(self) -> None:
"""Tests assignment of predictions to agents, and that this fails if the probabilities don't sum to one."""
agent = get_sample_agent()
waypoints = [Waypoint(TimePoint(t), get_sample_oriented_box(), StateVector2D(0.0, 0.0)) for t in range(5)]
predictions = [
PredictedTrajectory(0.3, waypoints),
PredictedTrajectory(0.7, waypoints),
]
agent.predictions = predictions
self.assertEqual(len(agent.predictions), 2)
self.assertEqual(0.3, agent.predictions[0].probability)
self.assertEqual(0.7, agent.predictions[1].probability)
# Check that we fail to assign the predictions if the sum of probabilities is not one
predictions += predictions
with self.assertRaises(ValueError):
agent.predictions = predictions
def test_set_past_trajectory(self) -> None:
"""Tests assignment of past trajectory to agents."""
agent = get_sample_agent()
waypoints = [
Waypoint(TimePoint(t), get_sample_oriented_box(), StateVector2D(0.0, 0.0))
for t in range(agent.metadata.timestamp_us + 1)
]
agent.past_trajectory = PredictedTrajectory(1, waypoints)
self.assertEqual(len(agent.past_trajectory.waypoints), 11)
with self.assertRaises(ValueError):
# Fail because the final state does not land at current ego's position
agent.past_trajectory = PredictedTrajectory(
1, [Waypoint(TimePoint(t), get_sample_oriented_box(), StateVector2D(0.0, 0.0)) for t in range(3)]
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11348656 |
class sd:
def __init__(self,device):
self.device = device
def get_stream(self):
raise Exception("NotImplementedException")
#return ["dd", "..."]
# Create a filename with datestamp for our current backup (without .img suffix)
#ofile="/mnt/usb/backup_$(date +%d-%b-%y_%T)"
# Create final filename, with suffix
#ofilefinal=$ofile.img
# Begin the backup process, should take about 1 hour from 8Gb SD card to HDD
#sudo dd if="/dev/mmcblk0" of=$ofile bs=1M
# Collect result of backup procedure
#result=$?
# If command has completed successfully, delete previous backups and exit
#if [ $result=0 ]; then rm -f /mnt/usb/backup_*.img; mv $ofile $ofilefinal; exit 0;fi
#If command has failed, then delete partial backup file
#if [ $result=1 ]; then rm -f $ofile; exit 1;fi | StarcoderdataPython |
3586701 | # coding=utf-8
"""Overrides for Discord.py classes"""
import contextlib
import inspect
import io
import itertools
import re
import discord
from discord.ext.commands import HelpFormatter as HelpF, Paginator, Command
from bot.utils import polr, privatebin
from bot.utils.args import ArgParseConverter as ArgPC
def create_help(cmd, parser):
"""Creates an updated usage for the help command"""
default = cmd.params['args'].default
if cmd.signature.split("[")[-1] == f"args={default}]" if default else "args]":
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
parser.print_help()
sio.seek(0)
s = sio.read()
# Strip the filename and trailing newline from help text
arg_part = s[(len(str(s[7:]).split()[0]) + 8):-1]
k = cmd.qualified_name
spt = len(k.split())
# Remove a duplicate command name + leading arguments
split_sig = cmd.signature.split()[spt:]
return "[".join((" ".join(split_sig)).split("[")[:-1]) + arg_part
return cmd.usage
class HelpFormatter(HelpF):
"""Custom override for the default help command"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._paginator = None
async def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
self._paginator = Paginator()
# we need a padding of ~80 or so
description = self.command.description if not self.is_cog() else inspect.getdoc(self.command)
if description:
# <description> portion
self._paginator.add_line(description, empty=True)
if isinstance(self.command, Command):
# <signature portion>
if self.command.params.get("args", None) and type(self.command.params['args'].annotation) == ArgPC:
self.command.usage = create_help(self.command, self.command.params['args'].annotation.parser)
signature = self.get_command_signature()
self._paginator.add_line(signature, empty=True)
# <long doc> section
if self.command.help:
self._paginator.add_line(self.command.help, empty=True)
# end it here if it's just a regular command
if not self.has_subcommands():
self._paginator.close_page()
return self._paginator.pages
max_width = self.max_name_size
def category(tup):
"""Splits the help command into categories for easier readability"""
cog = tup[1].cog_name
# we insert the zero width space there to give it approximate
# last place sorting position.
return cog + ':' if cog is not None else '\u200bNo Category:'
filtered = await self.filter_command_list()
if self.is_bot():
data = sorted(filtered, key=category)
for category, commands in itertools.groupby(data, key=category):
# there simply is no prettier way of doing this.
commands = sorted(commands)
if len(commands) > 0:
self._paginator.add_line(category)
self._add_subcommands_to_page(max_width, commands)
else:
filtered = sorted(filtered)
if filtered:
self._paginator.add_line('Commands:')
self._add_subcommands_to_page(max_width, filtered)
# add the ending note
self._paginator.add_line()
ending_note = self.get_ending_note()
self._paginator.add_line(ending_note)
return self._paginator.pages
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
async def _default_help_command(ctx, *commands: str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = await bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
pages = await bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await destination.send(bot.command_not_found.format(key))
return
except AttributeError:
await destination.send(bot.command_has_no_subcommands.format(command, key))
return
pages = await bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(len, pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
try:
await destination.send(page)
except discord.Forbidden:
destination = ctx.message.channel
await destination.send(page)
old_send = discord.abc.Messageable.send
async def send(self, content=None, **kwargs):
"""Overrides default send method in order to create a paste if the response is more than 2000 characters"""
if content is not None and any(x in str(content) for x in ["@everyone", "@here"]):
content = content.replace("@everyone", "@\u0435veryone").replace("@here", "@h\u0435re")
if content is not None and len(str(content)) > 2000:
if content.startswith("```py"):
content = "\n".join(content.split("\n")[1:-1])
paste = await privatebin.upload(content, expires="15min", server=self.bot.priv)
if self.bot.polr:
paste = await polr.shorten(paste, **self.bot.polr)
return await old_send(self, f"Hey, I couldn't handle all the text I was gonna send you, so I put it in a paste!"
f"\nThe link is **{paste}**, but it expires in 15 minutes, so get it quick!",
**kwargs)
else:
return await old_send(self, content, **kwargs)
| StarcoderdataPython |
1709001 | <filename>tests/test_theme.py
from typing import Any, Dict
import pytest
from grgr.ggplot2.theme import Theme, ThemeElement
@pytest.mark.parametrize("kwargs, answer", [
({
"foo": '"bar"'
}, 'test(foo="bar")'),
({
"foo_bar": '"bar"'
}, 'test(foo.bar="bar")'),
({
"foo": '"bar"',
"foo_bar": '"bar"'
}, 'test(foo="bar",foo.bar="bar")'),
({}, "test()"),
])
def test_theme(kwargs: Dict[str, Any], answer: str):
assert Theme("test", **kwargs).tor() == answer
@pytest.mark.parametrize("kwargs, answer", [
({
"foo": '"bar"'
}, 'test(foo="bar")'),
({
"foo_bar": '"bar"'
}, 'test(foo_bar="bar")'),
({
"foo": '"bar"',
"foo_bar": '"bar"'
}, 'test(foo="bar",foo_bar="bar")'),
({}, "test()"),
])
def test_theme_element(kwargs: Dict[str, Any], answer: str):
assert ThemeElement("test", **kwargs).tor() == answer
| StarcoderdataPython |
194137 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from cloudinary.models import CloudinaryField
from django.db.models.fields import EmailField
# Create your models here.
# user
#neighbourhood
#profile
#Business
class CustomUser(AbstractUser):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
def __str__(self):
return self.username
locations = [
('juja','juja'),
('highpoint','highpoint'),
('gach','gatch'),
('kroad','kroad'),
('toll','toll')
]
class Neighbourhood(models.Model):
name = models.CharField(max_length=200)
location = models.CharField(max_length=200, choices=locations)
description = models.TextField()
residents = models.IntegerField(blank = True, null=True)
admin = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
police = models.IntegerField(blank=True)
health = models.IntegerField(blank=True)
class Meta:
ordering = ['-pk']
def create_neighbourhood(self):
self.save()
def delete_neighbourhood(self):
self.delete()
# @classmethod
# def find_neighbourhood(cls, id):
# return cls.objects.get(id = id)
@classmethod
def update_hood(cls, id, update_des):
updated_hood = cls.objects.filter(id = id).update(description = update_des)
return updated_hood
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
email = models.CharField(max_length=200)
bio = models.TextField()
hood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, blank=True, null=True)
pp = CloudinaryField('image')
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def _str__(self):
return self.user.username
class Business(models.Model):
name = models.CharField(max_length=200)
bist_image = CloudinaryField('bist_image')
email = models,EmailField(max_length=100, blank = True)
mobile = models.CharField(max_length=15,blank = True)
hood= models.ForeignKey(Neighbourhood, on_delete = models.CASCADE)
personel = models.ForeignKey(Profile, on_delete=models.CASCADE)
def create_bist(self):
self.save()
def delete_bist(self):
self.delete()
@classmethod
def search_bist(cls, bist_name):
return cls.objects.filter(name__icontains = bist_name).all()
def update_bist(cls,bist_id, update_des):
bist = cls.objects.filter(id =bist_id ).update(description = update_des )
return bist
def __str__(self):
return self.name
class Posts(models.Model):
image = CloudinaryField('post_image')
title = models.CharField(max_length=200)
caption = models.TextField()
date_posted = models.DateTimeField(auto_now_add=True)
posted_by = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
hood = models.ForeignKey(Neighbourhood, on_delete = models.CASCADE)
def save_post(self):
self.save()
def __str__(self):
return self.title
class Meta:
ordering = ['-date_posted'] | StarcoderdataPython |
164888 | from django.contrib.contenttypes.fields import GenericRelation
from django.conf import settings
from django.db import models
from .custom_field import CustomField
from .model_mixins import SoftDeleteModel
class Organisation(SoftDeleteModel):
name = models.CharField(max_length=255)
owner = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
members = models.ManyToManyField(
settings.AUTH_USER_MODEL, blank=True, related_name='+')
custom_fields = GenericRelation(CustomField)
def __str__(self):
return self.name
| StarcoderdataPython |
3556345 | <filename>h2o-py/tests/testdir_algos/gbm/pyunit_mnist_manyCols_gbm_large.py
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def mnist_many_cols_gbm_large():
train = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
train.tail()
from h2o.estimators.gbm import H2OGradientBoostingEstimator
gbm_mnist = H2OGradientBoostingEstimator(ntrees=1,
max_depth=1,
min_rows=10,
learn_rate=0.01)
gbm_mnist.train(x=range(784), y=784, training_frame=train)
gbm_mnist.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(mnist_many_cols_gbm_large)
else:
mnist_many_cols_gbm_large()
| StarcoderdataPython |
1847317 | <reponame>fossabot/hotpot
import gzip
import ahocorasick
import pandas as pd
import ujson
from tqdm import tqdm
# ============================================
# Script purpose:
# Find words that contain a word within CEDICT
# ============================================
# speed up text search by using the Aho-Corasick algorithm
def characters_to_words(words):
# see https://stackoverflow.com/questions/34816775/python-optimal-search-for-substring-in-list-of-strings
mapping = {}
auto = ahocorasick.Automaton()
for word in words:
auto.add_word(word, word)
auto.make_automaton()
for word in tqdm(words):
for end_ind, found in auto.iter(word):
if found in mapping:
mapping[found].append(word)
else:
mapping[found] = [word]
return mapping
cedict = pd.read_csv(f"./data/intermediate/cedict.txt", sep="\t", index_col=0)
simplified_words = list(cedict["simplified"])
traditional_words = list(cedict["traditional"])
simplified_char_to_word = characters_to_words(simplified_words)
traditional_char_to_word = characters_to_words(traditional_words)
with gzip.open(
"./data/intermediate/simplified_containing_words.json.zip", "wt", encoding="utf-8"
) as f:
ujson.dump(simplified_char_to_word, f)
with gzip.open(
"./data/intermediate/traditional_containing_words.json.zip", "wt", encoding="utf-8"
) as f:
ujson.dump(traditional_char_to_word, f)
| StarcoderdataPython |
1980619 | <reponame>jatinarora2409/scout-scripts<gh_stars>0
import getpass
import os
import time
import platform
import json
from timeit import default_timer
import datetime
import click
import executor
from executor import execute
from executor.ssh.client import RemoteCommand
from scoutcli.utils import helper
from scoutcli.utils import aws as aws_helper
from scoutcli import myhadoop
from scoutcli.myhibench import HiBenchClusterProfiler
@click.group()
@click.option('--sparkperf_dir', default="/opt/spark-perf", type=click.Path(exists=True, resolve_path=True))
@click.option('--hadoop_dir', default="/opt/hadoop", type=click.Path(exists=True, resolve_path=True))
@click.option('--spark_dir', default="/opt/spark-1.5.2", type=click.Path(exists=True, resolve_path=True))
@click.option('--monitoring', type=click.Path(exists=False, resolve_path=True))
@click.option('--interval', type=int, default=5)
@click.pass_context
def cli(ctx, **kwargs):
"""This is a command line tool to benchmark a machine
"""
ctx.obj = kwargs
@cli.command()
@click.option('--master')
@click.option('--slaves')
@click.pass_context
def auto_configure(ctx, master, slaves):
# step
# 0. common setting
slave_list = list(sorted(slaves.split(' ')))
print(master)
print(slave_list)
hostname = aws_helper.Instance.get_private_ip()
instance_type = aws_helper.Instance.get_instance_type()
print("Instance Type:", instance_type)
num_cores = aws_helper.Instance.get_num_of_cores()
memory_size = ctx.invoke(get_memory, instance=instance_type)
# 1. configure Hadoop, HDFS and Yarn
am_cores = 1 # arbitrary
task_cores = 1 # map/reduce task use one core per task
memory_size = ctx.invoke(get_memory, instance=instance_type)
ctx.invoke(myhadoop.configure,
replicas=1,
hostname=hostname,
cores=num_cores,
memory=memory_size,
am_cores=am_cores,
task_cores=task_cores,
master=master,
slaves=slaves
)
@cli.command()
@click.option('--slaves')
@click.option('--mode')
@click.pass_context
def get_spark_env(ctx, slaves, mode):
instance_type = aws_helper.Instance.get_instance_type()
num_cores = aws_helper.Instance.get_num_of_cores()
memory_size = ctx.invoke(get_memory, instance=instance_type)
slave_list = list(sorted(slaves.split(' ')))
# AppMaster
am_cores = 1
memory_per_core = int(memory_size / num_cores)
am_memory_overhead = 512 if (am_cores * memory_per_core) <= 4096 else 1024
# am_memory_overhead = 1024 # should be fine for 2G to 8GB memory per core
am_memory = int(am_cores * memory_per_core - am_memory_overhead)
if mode == 'n+1':
driver_local_memory = memory_size
else:
driver_local_memory = memory_per_core
executor_cores = 1
executor_num = int((len(slave_list) * num_cores - am_cores) / executor_cores)
executor_memory_overhead = 512 if (executor_cores * memory_per_core) <= 4096 else 1024
executor_memory = int(memory_per_core*executor_cores - executor_memory_overhead - executor_memory_overhead)
map_parallelism = num_cores * len(slave_list) * 2
reduce_parallelism = num_cores * len(slave_list) * 2
env_settings = {
#'spark.driver.memory': '{}m'.format(driver_memory),
'spark.executor.instances': executor_num,
'spark.executor.cores': executor_cores,
'spark.executor.memory': '{}m'.format(executor_memory),
'spark.driver.memory': '{}m'.format(driver_local_memory),
'spark.yarn.driver.memoryOverhead': am_memory_overhead,
'spark.yarn.executor.memoryOverhead': executor_memory_overhead, # no unit, different from Spark 2.1
'spark.yarn.am.cores': am_cores,
'spark.yarn.am.memory': '{}m'.format(am_memory),
'spark.yarn.am.memoryOverhead': am_memory_overhead, # no unit, different from Spark 2.1
'spark.storage.memoryFraction': 0.66,
'spark.serializer': 'org.apache.spark.serializer.JavaSerializer',
'spark.shuffle.manager': 'SORT',
'spark.yarn.maxAppAttempts': 1,
'spark.task.maxFailures': 1, # for fair comparison among workloads
'sparkperf.executor.num': executor_num, # piggyback for num-partitions in spark-perf
}
return env_settings
@cli.command()
@click.pass_context
def start(ctx):
ctx.invoke(myhadoop.start)
@cli.command()
@click.pass_context
def stop(ctx):
ctx.invoke(myhadoop.stop)
@cli.command()
@click.pass_context
def init(ctx):
ctx.invoke(myhadoop.stop)
ctx.invoke(myhadoop.init)
@cli.command()
@click.pass_context
def clean(ctx):
pass
@cli.command()
@click.option('--instance', default='c4.large')
@click.pass_context
def get_config_profile(ctx, instance):
configure_profiles = {
# the last one filed is for the Hadoop application. Not used in spark-perf
# 'type.size': (am_driver_mem, am_driver_mem_overhead, executor_mem_overhead, hadoop_am_mem)
'c3.large': (2048, 512, 512, 1024),
'c3.xlarge': (2048, 512, 512, 1024),
'c3.2xlarge': (2048, 512, 512, 1024),
'c4.large': (2048, 512, 512, 1024),
'c4.xlarge': (2048, 512, 512, 1024),
'c4.2xlarge': (2048, 512, 512, 1024),
'm3.large': (4096, 512, 512, 1024),
'm3.xlarge': (4096, 512, 512, 1024),
'm3.2xlarge': (4096, 512, 512, 1024),
'm4.large': (4096, 512, 512, 1024),
'm4.xlarge': (4096, 512, 512, 1024),
'm4.2xlarge': (4096, 512, 512, 1024),
'r3.large': (8192, 512, 512, 1024),
'r3.xlarge': (8192, 512, 512, 1024),
'r3.2xlarge': (8192, 512, 512, 1024),
'r4.large': (8192, 512, 512, 1024),
'r4.xlarge': (8192, 512, 512, 1024),
'r4.2xlarge': (8192, 512, 512, 1024),
't2.large': (4096, 512, 512, 1024),
't2.xlarge': (4096, 512, 512, 1024),
't2.2xlarge': (4096, 512, 512, 1024),
}
return configure_profiles[instance]
@cli.command()
@click.option('--instance', default='c4.large')
@click.pass_context
def get_memory(ctx, instance):
# not accurate memory but used for HiBench
memory_sizes = {
'c3.large': 4096,
'c3.xlarge': 8192,
'c3.2xlarge': 16384,
'c4.large': 4096,
'c4.xlarge': 8192,
'c4.2xlarge': 16384,
'm3.large': 8192,
'm3.xlarge': 16384,
'm3.2xlarge': 32768,
'm4.large': 8192,
'm4.xlarge': 16384,
'm4.2xlarge': 32768,
'r3.large': 16384,
'r3.xlarge': 32768,
'r3.2xlarge': 65536,
'r4.large': 16384,
'r4.xlarge': 32768,
'r4.2xlarge': 65536,
't2.large': 8192,
't2.xlarge': 16384,
't2.2xlarge': 32768,
}
return memory_sizes[instance]
@cli.command()
@click.option('--workload', default='regression')
@click.option('--datasize', default='size1')
@click.option('--num_partitions', type=int, default=1)
@click.option('--output_dir', default=None, type=click.Path(exists=False, resolve_path=True))
@click.pass_context
def generate_command(ctx, workload, datasize, num_partitions, output_dir):
# @TODO: configure num.partitions
default_app_settings = {
'master': 'yarn',
'driver-memory': '1024m',
}
default_workload_settings = {
'num-trials': 1,
'inter-trial-wait': 1,
'random-seed': 5,
'num-partitions': num_partitions,
}
default_workload_table = {
'regression': {
'class': 'glm-regression',
'num-iterations': 20,
'feature-noise': 1.0,
'step-size': 0.001,
'reg-type': 'l2',
'reg-param': 0.1,
'elastic-net-param': 0.0,
'optimizer': 'sgd', # auto
'intercept': 0.0,
'label-noise': 0.1,
'loss': 'l2',
},
'classification': {
'class': 'glm-classification',
'num-iterations': 20,
'feature-noise': 1.0,
'step-size': 0.001,
'reg-type': 'l2',
'reg-param': 0.1,
'elastic-net-param': 0.0,
'optimizer': 'sgd', # l-bfgs
'loss': 'logistic',
'per-negative': 0.3,
},
'naive-bayes': {
'class': 'naive-bayes',
'feature-noise': 1,
'per-negative': 0.3,
'nb-lambda': 1,
'model-type': 'multinomial',
},
'decision-tree': {
'class': 'decision-tree',
'label-type': 0, # 2
'frac-categorical-features': 0.5,
'frac-binary-features': 0.5,
'max-bins': 32,
'ensemble-type': 'RandomForest', # GradientBoostedTrees | ml.GradientBoostedTree
'training-data': '',
'test-data': '',
'test-data-fraction': 0.2,
'tree-depth': 10, # 10,
'num-trees': 1, # 10,
'feature-subset-strategy': 'auto'
},
'random-forest': {
'class': 'decision-tree',
'label-type': 0, # 2
'frac-categorical-features': 0.5,
'frac-binary-features': 0.5,
'max-bins': 32,
'ensemble-type': 'RandomForest', # GradientBoostedTrees | ml.GradientBoostedTree
'training-data': '',
'test-data': '',
'test-data-fraction': 0.2,
'tree-depth': 10, # 10,
'num-trees': 10, # 10,
'feature-subset-strategy': 'auto'
},
'gradient-boosted-tree': {
'class': 'decision-tree',
'label-type': 0, # 2
'frac-categorical-features': 0.5,
'frac-binary-features': 0.5,
'max-bins': 32,
'ensemble-type': 'GradientBoostedTrees', # GradientBoostedTrees | ml.GradientBoostedTree
'training-data': '',
'test-data': '',
'test-data-fraction': 0.2,
'tree-depth': 5, # 10,
'num-trees': 10, # 10,
'feature-subset-strategy': 'auto'
},
'als': {
'class': 'als',
'num-iterations': 10,
'reg-param': 0.1,
'rank': 10
},
'kmeans': {
'class': 'kmeans',
'num-iterations': 20,
'num-centers': 20,
},
'gmm': {
'class': 'gmm',
'num-iterations': 20,
'num-centers': 20,
},
'lda': {
'class': 'lda',
'optimizer': 'em',
'num-iterations': 40,
'num-topics': 20,
'document-length': 100,
},
'pic': {
'class': 'pic',
'num-iterations': 20,
'num-centers': 40,
'node-degree': 20,
},
'svd': {
'class': 'svd',
'rank': 10,
},
'pca': {
'class': 'pca',
'rank': 50,
},
'summary-statistics': {
'class': 'summary-statistics',
'rank': 50,
},
'block-matrix-mult': {
'class': 'block-matrix-mult',
'block-size': 1024
},
'pearson': {
'class': 'pearson',
},
'spearman': {
'class': 'spearman',
},
'chi-sq-feature': {
'class': 'chi-sq-feature',
},
'chi-sq-gof': {
'class': 'chi-sq-gof',
},
'chi-sq-mat': {
'class': 'chi-sq-mat',
},
'word2vec': {
'class': 'word2vec',
'num-iterations': 10,
'vector-size': 100,
'min-count': 5,
},
'fp-growth': {
'class': 'fp-growth',
'min-support': 0.01,
},
'prefix-span': {
'class': 'prefix-span',
'min-support': 0.5,
'max-local-proj-db-size': 32000000,
'avg-sequence-size': 50,
'avg-itemset-size': 50,
'max-pattern-len': 10,
}
}
# size0 for warming up the system
workload_table = {
'regression': {
'warmup': {
'num-examples': 1000,
'num-features': 100,
},
'small': {
'num-examples': 50000,
'num-features': 10000,
},
'medium': {
'num-examples': 100000,
'num-features': 10000,
},
'large': {
'num-examples': 100000,
'num-features': 10000,
},
'huge': {
'num-examples': 500000,
'num-features': 25000,
},
'bigdata': {
'num-examples': 1000000,
'num-features': 50000,
},
},
'classification': {
'warmup': {
'num-examples': 1000,
'num-features': 100,
},
'small': {
'num-examples': 20000,
'num-features': 10000,
},
'medium': {
'num-examples': 40000,
'num-features': 10000,
},
'large': {
'num-examples': 80000,
'num-features': 10000,
},
},
'naive-bayes': {
'warmup': {
'num-examples': 10000,
'num-features': 1000,
},
'small': {
'num-examples': 100000,
'num-features': 10000,
},
'medium': {
'num-examples': 200000,
'num-features': 10000,
},
'large': {
'num-examples': 400000,
'num-features': 10000,
},
'huge': {
'num-examples': 1000000,
'num-features': 25000,
},
'bigdata': {
'num-examples': 1000000,
'num-features': 50000,
},
},
'decision-tree': {
'warmup': {
'num-examples': 100000,
'num-features': 100,
},
'small': {
'num-examples': 200000,
'num-features': 200,
},
'medium': {
'num-examples': 400000,
'num-features': 200,
},
'large': {
'num-examples': 800000,
'num-features': 200,
},
},
'random-forest': {
'warmup': {
'num-examples': 100000,
'num-features': 100,
},
'small': {
'num-examples': 400000,
'num-features': 100,
},
'medium': {
'num-examples': 600000,
'num-features': 100,
},
'large': {
'num-examples': 800000,
'num-features': 100,
},
},
'gradient-boosted-tree': {
'warmup': {
'num-examples': 10000,
'num-features': 100,
},
'small': {
'num-examples': 10000,
'num-features': 100,
},
'medium': {
'num-examples': 20000,
'num-features': 100,
},
'large': {
'num-examples': 800000,
'num-features': 100,
},
},
'als': {
'warmup': {
'num-users': 10000,
'num-products': 10000,
'num-rating': 10000,
},
'small': {
'num-users': 100000,
'num-products': 500000,
'num-rating': 1000000,
},
'medium': {
'num-users': 100000,
'num-products': 500000,
'num-rating': 1500000,
},
'large': {
'num-users': 200000,
'num-products': 500000,
'num-rating': 2000000,
},
},
'kmeans': {
'warmup': {
'num-examples': 1000,
'num-features': 100,
},
'small': {
'num-examples': 10000,
'num-features': 10000,
},
'medium': {
'num-examples': 25000,
'num-features': 10000,
},
'large': {
'num-examples': 50000,
'num-features': 10000,
},
'huge': {
'num-examples': 1000000,
'num-features': 10000,
},
'bigdata': {
'num-examples': 2000000,
'num-features': 10000,
},
},
'gmm': {
'warmup': {
'num-examples': 50000,
'num-features': 100,
},
'small': {
'num-examples': 100000,
'num-features': 100,
},
'medium': {
'num-examples': 200000,
'num-features': 100,
},
'large': {
'num-examples': 200000,
'num-features': 200,
},
},
'lda': {
'warmup': {
'num-documents': 2500,
'num-vocab': 1000,
},
'small': {
'num-documents': 10000,
'num-vocab': 10000,
},
'medium': {
'num-documents': 15000,
'num-vocab': 10000,
},
'large': {
'num-documents': 20000,
'num-vocab': 10000,
},
},
'pic': {
'warmup': {
'num-examples': 1000,
},
'small': {
'num-examples': 50000,
},
'medium': {
'num-examples': 100000,
},
'large': {
'num-examples': 150000,
},
},
'svd': {
'warmup': {
'num-rows': 50000,
'num-cols': 500,
},
'small': {
'num-rows': 100000,
'num-cols': 500,
},
'medium': {
'num-rows': 200000,
'num-cols': 500,
},
'large': {
'num-rows': 400000,
'num-cols': 500,
},
},
'pca': {
'warmup': {
'num-rows': 50000,
'num-cols': 1000,
},
'small': {
'num-rows': 100000,
'num-cols': 1000,
},
'medium': {
'num-rows': 200000,
'num-cols': 1000,
},
'large': {
'num-rows': 400000,
'num-cols': 1000,
},
},
'summary-statistics': {
'warmup': {
'num-rows': 50000,
'num-cols': 500,
},
'small': {
'num-rows': 100000,
'num-cols': 1000,
},
'medium': {
'num-rows': 200000,
'num-cols': 2000,
},
'large': {
'num-rows': 500000,
'num-cols': 5000,
},
},
# not working for some parameters
'block-matrix-mult': {
'warmup': {
'm': 1000,
'k': 10000,
'n': 10000,
},
'small': {
'm': 4000,
'k': 10000,
'n': 4000,
},
'medium': {
'm': 4000,
'k': 10000,
'n': 10000,
},
'large': {
'm': 5000,
'k': 5000,
'n': 5000,
},
},
'pearson': {
'warmup': {
'num-rows': 50000,
'num-cols': 500,
},
'small': {
'num-rows': 100000,
'num-cols': 1000,
},
'medium': {
'num-rows': 200000,
'num-cols': 1500,
},
'large': {
'num-rows': 200000,
'num-cols': 2000,
},
},
'spearman': {
'warmup': {
'num-rows': 50000,
'num-cols': 100,
},
'small': {
'num-rows': 40000,
'num-cols': 200,
},
'medium': {
'num-rows': 80000,
'num-cols': 200,
},
'large': {
'num-rows': 60000,
'num-cols': 200,
}
},
'chi-sq-feature': {
'warmup': {
'num-rows': 100000,
'num-cols': 500,
},
'small': {
'num-rows': 500000,
'num-cols': 1000,
},
'medium': {
'num-rows': 1000000,
'num-cols': 1000,
},
'large': {
'num-rows': 2000000,
'num-cols': 1000,
},
'huge': {
'num-rows': 10000000,
'num-cols': 1000,
},
},
# not usable due to Java out of Heap space
'chi-sq-gof': {
'warmup': {
'num-rows': 2500000,
'num-cols': 0,
},
'small': {
'num-rows': 5000000,
'num-cols': 0,
},
'medium': {
'num-rows': 1000000,
'num-cols': 0,
},
'large': {
'num-rows': 10000000,
'num-cols': 0,
},
},
# not usable due to Java out of Heap space
'chi-sq-mat': {
'warmup': {
'num-rows': 1000,
'num-cols': 10,
},
'small': {
'num-rows': 100000,
'num-cols': 0,
},
'medium': {
'num-rows': 200000,
'num-cols': 0,
},
'large': {
'num-rows': 5000,
'num-cols': 0,
},
},
'word2vec': {
'warmup': {
'num-sentences': 50000,
'num-words': 10000,
},
'small': {
'num-sentences': 100000,
'num-words': 10000,
},
'medium': {
'num-sentences': 200000,
'num-words': 10000,
},
'large': {
'num-sentences': 400000,
'num-words': 10000,
},
'huge': {
'num-iterations': 10,
'vector-size': 100,
'min-count': 5,
'num-sentences': 1000000,
'num-words': 100000,
}
},
'fp-growth': {
'warmup': {
'num-baskets': 1000,
'num-items': 100,
'avg-basket-size': 10,
},
'small': {
'num-baskets': 40000,
'num-items': 1000,
'avg-basket-size': 10,
},
'medium': {
'num-baskets': 60000,
'num-items': 1000,
'avg-basket-size': 10,
},
'large': {
'num-baskets': 80000,
'num-items': 1000,
'avg-basket-size': 10,
},
},
'prefix-span': {
'warmup': {
'num-items': 500,
'num-sequences': 250000,
},
'small': {
'num-items': 10000,
'num-sequences': 500000,
},
'medium': {
'num-items': 20000,
'num-sequences': 500000,
},
'large': {
'num-items': 40000,
'num-sequences': 500000,
},
}
}
workload_setting = {}
workload_setting.update(default_workload_settings)
workload_setting.update(default_workload_table[workload])
workload_setting.update(workload_table[workload][datasize])
cmd_spark = "{}/bin/spark-submit --class mllib.perf.TestRunner".format(ctx.obj['spark_dir'])
cmd_spark_params = "--class mllib.perf.TestRunner --master {master}".format(**default_app_settings)
#cmd_spark_params = "--class mllib.perf.TestRunner --master {master} --driver-memory {driver-memory}".format(**default_app_settings)
spark_program = '{}/mllib-tests/target/mllib-perf-tests-assembly.jar'.format(ctx.obj['sparkperf_dir'])
workload_class = workload_setting.pop('class')
cmd_workload_params = " ".join(['--{}={}'.format(k, workload_setting[k]) for k in workload_setting.keys()])
stdout_file = os.path.join(output_dir, 'log.out')
stderr_file = os.path.join(output_dir, 'log.err')
cmd = "{} {} {} {} {} 1>> {} 2>>{}".format(
cmd_spark, cmd_spark_params, spark_program, workload_class, cmd_workload_params,
stdout_file, stderr_file)
return cmd
@cli.command()
@click.option('-w', '--workload', help="workload.framework, e.g., wordcount.spark")
@click.option('--datasize', default='size1')
@click.option('--output_dir', default=None, type=click.Path(exists=False, resolve_path=True))
@click.option('--monitoring/--no-monitoring', default=True)
@click.option('--interval', type=int, default=5)
@click.option('--timeout', type=int, default=60*60*24)
@click.option('--slaves')
@click.option('--mode')
@click.pass_context
def run(ctx, workload, datasize, output_dir, monitoring, interval, timeout, slaves, mode):
execute("rm -rf {}; mkdir -p {}".format(output_dir, output_dir))
# 2. prepare env setting?
# @TODO: num-partitions? x cores?
spark_env = ctx.invoke(get_spark_env, slaves=slaves, mode=mode)
env_settings = {
'HADOOP_CONF_DIR': "{}/etc/hadoop".format(ctx.obj['hadoop_dir']),
'SPARK_SUBMIT_OPTS': " ".join(["-D{}={}".format(k, spark_env[k]) for k in spark_env.keys()])
}
execute("export | grep HADOOP_CONF_DIR", environment=env_settings)
execute("export | grep SPARK_SUBMIT_OPTS", environment=env_settings)
# 1. generate commands
cmd = ctx.invoke(generate_command, workload=workload, datasize=datasize, num_partitions=spark_env['sparkperf.executor.num'], output_dir=output_dir)
cmd = "timeout {}s {}".format(timeout, cmd)
print(cmd)
timestampt = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
if monitoring:
monitoring_output = os.path.join(output_dir, 'sar.csv')
slave_list = list(sorted(slaves.split(' ')))
with HiBenchClusterProfiler(slave_list, monitoring_output, interval) as app_profiler:
with helper.Timer() as timer:
successful = execute(cmd, environment=env_settings, check=False)
else:
with helper.Timer() as timer:
successful = execute(cmd, environment=env_settings, check=False)
report = {
'workload': workload,
'framework': 'spark1.5',
'datasize': datasize,
'completed': successful,
'program': workload,
'timestamp': timestampt,
'input_size': -1,
'elapsed_time': timer.elapsed_secs,
'throughput_cluster': -1,
'throughput_node': -1
}
report_json = os.path.join(output_dir, 'report.json')
with open(report_json, 'w') as f:
json.dump(report, f, indent=4, sort_keys=True)
return successful
def _clear_fs_cache():
execute("sudo bash -c 'sync; echo 3 > /proc/sys/vm/drop_caches'")
| StarcoderdataPython |
200156 | <gh_stars>100-1000
import sys
import json
import goramldir
import goramldir.User as User
# goramldir client
client = goramldir.Client()
def main(app_id, app_secret):
# get JWT token from itsyouonline
jwt_token = client.oauth2_client_itsyouonline.get_access_token(app_id, app_secret).text
# Set our goramldir client to use JWT token from itsyou.online
client.oauth2_client_itsyouonline.set_auth_header("Bearer " + jwt_token)
# try to make simple GET call to goramldir server
resp = client.users.users_get()
print("resp body =",resp.text)
# example on how to create object and encode it to json
user = User(name="iwan", username="ibk")
json_str = user.as_json()
print(json_str)
# example on how to create object from json object
user2 = User(json = json.loads(json_str))
print(user2.as_json())
if __name__ == "__main__":
'''
usage : python3 main.py application_id application_secret
'''
main(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
44142 | <reponame>freedmanlab/Context-Dependent-Gating<filename>plot_results.py
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import matplotlib
from itertools import product
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rcParams["font.family"] = "arial"
def plot_fig2_new():
#savedir = './savedir/mnist/'
savedir = '/media/masse/MySSDataStor1/Context-Dependent Gating/Final MNIST/'
savedir1 = '/media/masse/MySSDataStor1/Context-Dependent Gating/Final MNIST/'
all_same_scale = True
f, ax1 = plt.subplots(1,1,figsize=(4,2.5))
accuracy = {}
ylim_min = 0.6
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
base = 'mnist_no_stabilization'
EWC = 'mnist_EWC'
SI = 'mnist_SI'
XdG = 'mnist_nostab'
#SI= 'neurons_InpDO_mnist_SI'
b1 = plot_best_result(ax1, savedir + 'Baseline/', base, col=[0,0,0], label='Base')
b2 = plot_best_result(ax1, savedir + 'EWC Fixed/', EWC, col=[0,1,0], label='EWC')
b3 = plot_best_result(ax1, savedir + 'SI/', SI, col=[1,0,1], label='SI')
b4 = plot_best_result(ax1, savedir + 'XdG/', XdG, col=[0,0,0], label='XdG',linestyle='--')
SI_TD = 'neurons_InpDO_mnist_SI'
EWC_TD = 'neurons_InpDO_mnist_EWC'
b5 = plot_best_result(ax1, savedir + 'SI XdG/', SI_TD, col=[1,0,1], label='SI+XdG',linestyle='--')
b6 = plot_best_result(ax1, savedir + 'EWC XdG/', EWC_TD, col=[0,1,0], label='EWC+XdG',linestyle='--')
accuracy['base'] = b1
accuracy['EWC'] = b2
accuracy['SI'] = b3
accuracy['XdG'] = b4
accuracy['SI_XdG'] = b5
accuracy['EWC_XdG'] = b6
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,100)
add_subplot_details(ax1, [ylim_min,1],[0,100],[])
plt.tight_layout()
plt.savefig('Figure2.pdf', format='pdf')
plt.show()
f, (ax1, ax2) = plt.subplots(2,1,figsize=(4,6))
ylim_min = 0.85
b5 = plot_best_result(ax1, savedir + 'SI XdG/', SI_TD, col=[1,0,1], label='SI+XdG',linestyle='--')
b6 = plot_best_result(ax1, savedir + 'EWC XdG/', EWC_TD, col=[0,1,0], label='EWC+XdG',linestyle='--')
SI_TDP = 'mnist_SI_rule'
EWC_TDP = 'mnist_EWC_rule'
b1 = plot_best_result(ax1, savedir + 'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial')
b3 = plot_best_result(ax1, savedir + 'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partial')
accuracy['SI_Rule'] = b1
accuracy['EWC_Rule'] = b3
SI_TDPS = 'mnist_SI_split_rule'
EWC_TDPS = 'mnist_EWC_rule_split'
b2 = plot_best_result(ax2, savedir + 'SI Rule Split/', SI_TDPS, col=[1,0,1], label='Split SIRule')
b4 = plot_best_result(ax2, savedir + 'EWC Rule Split/', EWC_TDPS, col=[0,1,0],label='Split EWC+Rule')
b5 = plot_best_result(ax2, savedir + 'SI XdG/', SI_TD, col=[1,0,1], label='SI+XdG',linestyle='--')
b6 = plot_best_result(ax2, savedir + 'EWC XdG/', EWC_TD, col=[0,1,0], label='EWC+XdG',linestyle='--')
accuracy['SI_Rule_Split'] = b1
accuracy['EWC_Rule_Split'] = b4
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,100)
add_subplot_details(ax1, [ylim_min,1],[0,100],[])
ax2.legend(ncol=3, fontsize=9)
ax2.grid(True)
ax2.set_xlim(0,100)
add_subplot_details(ax2, [ylim_min,1],[0,100],[])
plt.tight_layout()
plt.savefig('Figure2_control.pdf', format='pdf')
plt.show()
"""
# Figure 2B
# Plotting: SI+TD Partial, SI, EWC+TD Partial, EWC
# No dropout on input layer
b2 = plot_best_result(ax2, savedir + 'SI/', SI, col=[1,0,1], label='SI')
b4 = plot_best_result(ax2, savedir + 'EWC Fixed/', EWC, col=[0,1,0], label='EWC')
b1 = plot_best_result(ax2, savedir + 'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial',linestyle='--')
b3 = plot_best_result(ax2, savedir + 'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partial',linestyle='--')
accuracy['SI_Partial'] = b1
accuracy['EWC_Partial'] = b3
ax2.legend(ncol=2, fontsize=9)
ax2.grid(True)
ax2.set_xlim(0,100)
add_subplot_details(ax2, [ylim_min,1],[0,100],[])
# Figure 2C
# Plotting: SI+TD Partial Split, SI+TD Partial, EWC+TD Partial, EWC+TD Partial Split
# Dropout irrelevant?
SI_TDPS = 'mnist_SI_split_rule'
EWC_TDPS = 'split_InpDO_mnist_EWC'
b1 = plot_best_result(ax3, savedir + 'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial',linestyle='--')
b3 = plot_best_result(ax3, savedir + 'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partial',linestyle='--')
b2 = plot_best_result(ax3, savedir + 'SI Rule Split/', SI_TDPS, col=[1,0,1], label='Split SI+Partial')
b4 = plot_best_result(ax3, savedir + 'EWC Split/', EWC_TDPS, col=[0,1,0],label='Split EWC+Partial')
accuracy['SI_Split'] = b2
accuracy['EWC_Split'] = b4
ax3.legend(ncol=2, fontsize=9)
ax3.grid(True)
ax3.set_xlim(0,100)
add_subplot_details(ax3, [ylim_min,1],[0,100],[])
# Figure 2D
# Plotting: SI+TD Partial Split, SI+TD Full, EWC+TD Full, EWC+TD Partial Split
# Dropout irrelevant?
SI_TD = 'neurons_InpDO_mnist_SI'
EWC_TD = 'neurons_InpDO_mnist_EWC'
b2 = plot_best_result(ax4, savedir + 'SI Rule Split/', SI_TDPS, col=[1,0,1], label='Split SI++Partial')
b4 = plot_best_result(ax4, savedir + 'EWC Split/', EWC_TDPS, col=[0,1,0],label='Split EWC++Partial')
b1 = plot_best_result(ax4, savedir + 'SI XdG/', SI_TD, col=[1,0,1], label='SI+XdG',linestyle='--')
b3 = plot_best_result(ax4, savedir + 'EWC XdG/', EWC_TD, col=[0,1,0], label='EWC+XdG',linestyle='--')
accuracy['SI_XdG'] = b1
accuracy['EWC_XdG'] = b3
ax4.grid(True)
ax4.set_xlim(0,100)
add_subplot_details(ax4, [ylim_min,1],[0,100],[])
ax4.legend(ncol=2, fontsize=9)
"""
for k, v in accuracy.items():
print(k, ' = ', v)
def plot_RNN_fig():
savedir = '/home/masse/Short-term-plasticity-RNN/savedir/CL3/'
all_same_scale = True
f, ax1 = plt.subplots(1,1,figsize=(4,3))
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
SI = 'RNN_CL_SI_short_h500_gating0'
SI_TD = 'RNN_CL_SI_short_h500_gating75'
b0 = plot_best_result(ax1, savedir, SI, col=[0,0,1], label='SI')
b1 = plot_best_result(ax1, savedir, SI_TD, col=[0,1,0], label='SI+TD Full')
print('A: ', b0 , b1)
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,17)
add_subplot_details(ax1, [0.5,1],[0,25],[])
ax1.set_xticks([4,8,12,16,20,24])
plt.tight_layout()
plt.savefig('Figure_ONR.pdf', format='pdf')
plt.show()
def plot_ONR_fig1():
savedir = './savedir/mnist/'
all_same_scale = True
f, (ax1,ax2) = plt.subplots(2,1,figsize=(3,6))
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
#base = 'mnist_no_stabilization'
EWC = 'mnist_EWC'
SI = 'mnist_SI'
SI_TD = 'neurons_InpDO_mnist_SI'
EWC_TD = 'neurons_InpDO_mnist_EWC'
b3 = plot_best_result(ax1, savedir, SI, col=[0,0,1], label='SI')
b2 = plot_best_result(ax1, savedir, EWC, col=[1,0,0], label='EWC')
b5 = plot_best_result(ax1, savedir, EWC_TD, col=[1,0,1], label='EWC+TD Full',linestyle='--')
b4 = plot_best_result(ax1, savedir, SI_TD, col=[0,1,0], label='SI+TD Full')
print('A: ', b2, b3, b4, b5)
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,100)
add_subplot_details(ax1, [0.0,1],[0,100],[])
savedir = './savedir/cifar/'
#base = 'cifar_no_stabilization'
EWC = 'cifar_EWC'
SI = 'cifar_SI'
SI_TD = 'neurons_cifar_SI'
EWC_TD = 'neurons_cifar_EWC'
#b1 = plot_best_result(ax2, savedir, base, col=[0,0,0], label='base')
b2 = plot_best_result(ax2, savedir, EWC, col=[1,0,0], label='EWC')
b3 = plot_best_result(ax2, savedir, SI, col=[0,0,1], label='SI')
b4 = plot_best_result(ax2, savedir, SI_TD, col=[0,1,0], label='SI+TD Full')
b5 = plot_best_result(ax2, savedir, EWC_TD, col=[1,0,1], label='EWC+TD Full',linestyle='--')
print('B: ', b2, b3, b4, b5)
ax2.legend(ncol=3, fontsize=9)
ax2.grid(True)
add_subplot_details(ax2, ylim = [0,1], xlim = [0,20])
plt.tight_layout()
plt.savefig('Figure_ONR.pdf', format='pdf')
plt.show()
def plot_fig3():
savedir = './savedir/mnist/'
all_same_scale = True
f, ax1 = plt.subplots(1,1,figsize=(4,3))
accuracy = {}
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
SI_5 = 'neurons_InpDO_mnist_SI'
SI_2 = 'neurons_InpDO_mnist_SI_1of2'
SI_3 = 'neurons_InpDO_mnist_SI_1of3'
SI_4 = 'neurons_InpDO_mnist_SI_1of4'
SI_6 = 'neurons_InpDO_mnist_SI_1of6'
#SI= 'neurons_InpDO_mnist_SI'
b1 = plot_best_result(ax1, savedir, SI_2, col=[1,0,0], label='SI_2')
b2 = plot_best_result(ax1, savedir, SI_3, col=[0,0,1], label='SI_3')
b3 = plot_best_result(ax1, savedir, SI_4, col=[0,1,0], label='SI_4')
b4 = plot_best_result(ax1, savedir, SI_5, col=[0,0,0], label='SI_5')
b5 = plot_best_result(ax1, savedir, SI_6, col=[1,0,1], label='SI_6')
accuracy['SI_2'] = b1
accuracy['SI_3'] = b2
accuracy['SI_4'] = b3
accuracy['SI_5'] = b4
accuracy['SI_6'] = b5
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,500)
add_subplot_details(ax1, [0.9,1],[0,100],[])
plt.tight_layout()
plt.savefig('Figure3.pdf', format='pdf')
plt.show()
for k, v in accuracy.items():
print(k , ' = ', v)
def plot_fig4():
savedir = '/home/masse/Spin-TD-Network/savedir/mnist/500perms/'
all_same_scale = True
f, ax1 = plt.subplots(1,1,figsize=(4,3))
accuracy = {}
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
SI = 'mnist_SI_500perms'
SI_XCG = 'neurons_InpDO_mnist_SI_500perms'
#SI= 'neurons_InpDO_mnist_SI'
b1 = plot_best_result(ax1, savedir, SI, col=[0,0,1], label='SI')
b2 = plot_best_result(ax1, savedir, SI_XCG, col=[1,0,0], label='SI_XCG')
accuracy['SI'] = b1
accuracy['SI XdG'] = b2
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,500)
add_subplot_details(ax1, [0.5,1],[0,500],[])
plt.tight_layout()
plt.savefig('Figure4_v3.pdf', format='pdf')
plt.show()
for k, v in accuracy.items():
print(k , ' = ', v)
def plot_fig2():
#savedir = './savedir/mnist/'
savedir = '/media/masse/MySSDataStor1/Context-Dependent Gating/Final MNIST/'
savedir1 = '/media/masse/MySSDataStor1/Context-Dependent Gating/Final MNIST/'
all_same_scale = True
f, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(8,6))
accuracy = {}
ylim_min = 0.6
# Figure 2A
# Plotting: SI, EWC, base
# No dropout on input layer
base = 'mnist_no_stabilization'
EWC = 'mnist_EWC'
SI = 'mnist_SI'
#SI= 'neurons_InpDO_mnist_SI'
b3 = plot_best_result(ax1, savedir + 'SI/', SI, col=[1,0,1], label='SI')
b1 = plot_best_result(ax1, savedir + 'Baseline/', base, col=[0,0,0], label='Base')
b2 = plot_best_result(ax1, savedir + 'EWC Fixed/', EWC, col=[0,1,0], label='EWC')
accuracy['base'] = b1
accuracy['EWC'] = b2
accuracy['SI'] = b3
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
ax1.set_xlim(0,100)
add_subplot_details(ax1, [ylim_min,1],[0,100],[])
# Figure 2B
# Plotting: SI+TD Partial, SI, EWC+TD Partial, EWC
# No dropout on input layer
SI_TDP = 'mnist_SI_rule'
EWC_TDP = 'mnist_EWC_rule'
b2 = plot_best_result(ax2, savedir + 'SI/', SI, col=[1,0,1], label='SI')
b4 = plot_best_result(ax2, savedir + 'EWC Fixed/', EWC, col=[0,1,0], label='EWC')
b1 = plot_best_result(ax2, savedir + 'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial',linestyle='--')
b3 = plot_best_result(ax2, savedir + 'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partial',linestyle='--')
accuracy['SI_Partial'] = b1
accuracy['EWC_Partial'] = b3
ax2.legend(ncol=2, fontsize=9)
ax2.grid(True)
ax2.set_xlim(0,100)
add_subplot_details(ax2, [ylim_min,1],[0,100],[])
# Figure 2C
# Plotting: SI+TD Partial Split, SI+TD Partial, EWC+TD Partial, EWC+TD Partial Split
# Dropout irrelevant?
SI_TDPS = 'mnist_SI_split_rule'
EWC_TDPS = 'split_InpDO_mnist_EWC'
b1 = plot_best_result(ax3, savedir + 'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial',linestyle='--')
b3 = plot_best_result(ax3, savedir + 'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partial',linestyle='--')
b2 = plot_best_result(ax3, savedir + 'SI Rule Split/', SI_TDPS, col=[1,0,1], label='Split SI+Partial')
b4 = plot_best_result(ax3, savedir + 'EWC Split/', EWC_TDPS, col=[0,1,0],label='Split EWC+Partial')
accuracy['SI_Split'] = b2
accuracy['EWC_Split'] = b4
ax3.legend(ncol=2, fontsize=9)
ax3.grid(True)
ax3.set_xlim(0,100)
add_subplot_details(ax3, [ylim_min,1],[0,100],[])
# Figure 2D
# Plotting: SI+TD Partial Split, SI+TD Full, EWC+TD Full, EWC+TD Partial Split
# Dropout irrelevant?
SI_TD = 'neurons_InpDO_mnist_SI'
EWC_TD = 'neurons_InpDO_mnist_EWC'
XdG = 'mnist_nostab'
#b2 = plot_best_result(ax4, savedir + 'SI Rule Split/', SI_TDPS, col=[1,0,1], label='Split SI++Partial')
#b4 = plot_best_result(ax4, savedir + 'EWC Split/', EWC_TDPS, col=[0,1,0],label='Split EWC++Partial')
b3 = plot_best_result(ax4, savedir + 'SI/', SI, col=[1,0,1], label='SI')
b2 = plot_best_result(ax4, savedir + 'EWC Fixed/', EWC, col=[0,1,0], label='EWC')
b1 = plot_best_result(ax4, savedir + 'SI XdG/', SI_TD, col=[1,0,1], label='SI+XdG',linestyle='--')
b3 = plot_best_result(ax4, savedir + 'EWC XdG/', EWC_TD, col=[0,1,0], label='EWC+XdG',linestyle='--')
b5 = plot_best_result(ax4, savedir + 'XdG/', XdG, col=[0,0,0], label='XdG',linestyle='--')
accuracy['SI_XdG'] = b1
accuracy['EWC_XdG'] = b3
accuracy['XdG'] = b5
ax4.grid(True)
ax4.set_xlim(0,100)
add_subplot_details(ax4, [ylim_min,1],[0,100],[])
ax4.legend(ncol=2, fontsize=9)
plt.tight_layout()
plt.savefig('Figure2.pdf', format='pdf')
plt.show()
for k, v in accuracy.items():
print(k, ' = ', v)
def mnist_table():
savedir = './savedir/perm_mnist/archive'
base = 'mnist_n2000_no_stabilization' # archive1
SI = 'perm_mnist_n2000_d1_no_topdown' # archive0
EWC = 'mnist_n2000_EWC' # archive1
SI_TDP = 'perm_mnist_n2000_d1_bias' # archive0
EWC_TDP = 'perm_mnist_n2000_d1_EWC_bias' # archive1
SI_TDPS = 'mnist_n2000_pathint_split_oc' # archive2
EWC_TDPS = 'mnist_n2000_EWC_split_oc' # archive2
SI_TDF = 'perm_mnist_n2000_d1_1of5' # archive0
EWC_TDF = 'perm_mnist_n2000_d1_EWC_1of5' # archive1
archs = [1,0,1,0,1,2,2,0,1]
names = ['Base', 'SI', 'EWC', 'SI + Partial', 'EWC + Partial', \
'SI + Partial + Split', 'EWC + Partial + Split',\
'SI + Full', 'EWC + Full']
locs = [base, SI, EWC, SI_TDP, EWC_TDP, SI_TDPS, EWC_TDPS, SI_TDF, EWC_TDF]
with open('mnist_table_data.tsv', 'w') as f:
f.write('Name\tC\tT1\tT10\tT20\tT50\tT100\n')
for a, s, n in zip(archs, locs, names):
c_opt, acc = retrieve_best_result(savedir+str(a)+'/', s)
f.write(n + '\t' + str(c_opt) + '\t' + str(acc[0])
+ '\t' + str(acc[9])
+ '\t' + str(acc[19])
+ '\t' + str(acc[49])
+ '\t' + str(acc[99]) + '\n')
def cifar_table():
savedir = './savedir/cifar_no_multihead/'
base = 'cifar_n1000_no_stabilization'
SI = 'cifar_n1000_pathint'
EWC = 'cifar_n1000_EWC'
SI_TDP = 'cifar_n1000_partial_pathint'
EWC_TDP = 'cifar_n1000_partial_EWC'
SI_TDPS = 'cifar_n1164_split_pathint'
EWC_TDPS = 'cifar_n1164_split_EWC'
SI_TDF = 'cifar_n1000_full_pathint'
EWC_TDF = 'cifar_n1000_full_EWC'
names = ['Base', 'SI', 'EWC', 'SI + Partial', 'EWC + Partial', \
'SI + Partial + Split', 'EWC + Partial + Split',\
'SI + Full', 'EWC + Full']
locs = [base, SI, EWC, SI_TDP, EWC_TDP, SI_TDPS, EWC_TDPS, SI_TDF, EWC_TDF]
with open('cifar_table_data.tsv', 'w') as f:
f.write('Name\tC\tT1\tT10\tT20\n')
for s, n in zip(locs, names):
c_opt, acc = retrieve_best_result(savedir, s)
f.write(n + '\t' + str(c_opt) + '\t' + str(acc[0])
+ '\t' + str(acc[9])
+ '\t' + str(acc[19]) + '\n')
def fig2_inset():
f, ax = plt.subplots(1,1)
# Figure 2D
# Plotting: SI+TD Partial Split, SI+TD Full, EWC+TD Full, EWC+TD Partial Split
# Dropout irrelevant?
SI_TD = 'perm_mnist_n2000_d1_1of5'
SI_TDPS = 'mnist_n2000_pathint_split_oc'
EWC_TD = 'perm_mnist_n2000_d1_EWC_1of5'
EWC_TDPS = 'mnist_n2000_EWC_split_oc'
b2 = plot_best_result(ax, './savedir/perm_mnist/archive2/', SI_TDPS, col=[0.7,0.7,0], label='Split SI+TD Par.')
b4 = plot_best_result(ax, './savedir/perm_mnist/archive2/', EWC_TDPS, col=[0.7,0.7,0.7],label='Split EWC+TD Par.')
b1 = plot_best_result(ax, './savedir/perm_mnist/archive0/', SI_TD, col=[0,0,1], label='SI+TD Full')
b3 = plot_best_result(ax, './savedir/perm_mnist/archive1/', EWC_TD, col=[0,1,0], label='EWC+TD Full')
ax.grid(True)
ax.set_yticks([0.85,0.90,0.95,1.0])
ax.set_xlim(0,100)
add_subplot_details(ax, [0.85,1],[])
plt.tight_layout()
plt.show()
def plot_fig5():
savedir = '/home/masse/Context-Dependent-Gating/savedir/ImageNet/'
savedir1 = '/media/masse/MySSDataStor1/Context-Dependent Gating/Final ImageNet/'
f, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(8,6))
accuracy = {}
# Figure 5A
# Plotting: SI, EWC, base
# No dropout on input layer
base = 'imagenet_base_omega'
EWC = 'ImageNet_EWC_omega'
SI = 'imagenet_SI_omega'
base_MH = 'imagenet_baseMH_'
EWC_MH = 'ImageNet_EWC_MH_omega'
SI_MH = 'imagenet_SI_MH_'
b1 = plot_best_result(ax1, savedir, base, col=[0,0,0], label='base')
b2 = plot_best_result(ax1, savedir1+'EWC/', EWC, col=[0,1,0], label='EWC')
b3 = plot_best_result(ax1, savedir, SI, col=[1,0,1], label='SI')
b4 = plot_best_result(ax1, savedir, base_MH, col=[0,0,0], label='base MH', linestyle='--')
b5 = plot_best_result(ax1, savedir1+'EWC MH/', EWC_MH, col=[0,1,0], label='EWC MH', linestyle='--')
b6 = plot_best_result(ax1, savedir, SI_MH, col=[1,0,1], label='SI MH', linestyle='--')
accuracy['base'] = b1
accuracy['EWC'] = b2
accuracy['SI'] = b3
accuracy['base_MH'] = b4
accuracy['EWC_MH'] = b5
accuracy['SI_MH'] = b6
print('A ', b1,b2,b3,b4,b5,b6)
ax1.legend(ncol=3, fontsize=9)
ax1.grid(True)
add_subplot_details(ax1, ylim = [0,1], xlim = [0,100])
# Figure 3B2
# Plotting: SI+TD Partial, SI, EWC+TD Partial, EWC
# No dropout on input layer
SI_TDP = 'ImageNet_SI_rule_'
EWC_TDP = 'ImageNet_EWC_rule_' # InpDO was a typo, not actually using drop out on iputs
b2 = plot_best_result(ax2, savedir, SI, col=[1,0,1], label='SI')
b4 = plot_best_result(ax2, savedir1+'EWC/', EWC, col=[0,1,0], label='EWC')
b1 = plot_best_result(ax2, savedir1+'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial', linestyle='--')
b3 = plot_best_result(ax2, savedir1+'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partal', linestyle='--')
accuracy['SI Partial'] = b1
accuracy['EWC Partial'] = b3
ax2.set_xlim(0,100)
ax2.legend(ncol=2, fontsize=9)
ax2.grid(True)
add_subplot_details(ax2, ylim = [0,1], xlim = [0,100])
# Figure 3C
# Plotting: SI+TD Partial Split, SI+TD Partial, EWC+TD Partial, EWC+TD Partial Split
# Dropout irrelevant?
SI_TDPS = 'ImageNet_SI_rule_split_'
EWC_TDPS = 'ImageNet_EWC_rule_split_'
b1 = plot_best_result(ax3, savedir1+'SI Rule Split/', SI_TDP, col=[1,0,1], label='Split SI+TD Partial')
b3 = plot_best_result(ax3, savedir1+'EWC Rule/', EWC_TDP, col=[0,1,0], label='EWC+Partal', linestyle='--')
b2 = plot_best_result(ax3, savedir1+'SI Rule/', SI_TDP, col=[1,0,1], label='SI+Partial', linestyle='--')
b4 = plot_best_result(ax3, savedir1+'EWC Rule Split/', EWC_TDPS, col=[0,1,0],label='Split EWC+Partial')
accuracy['SI Split'] = b1
accuracy['EWC Split'] = b4
ax3.set_xlim(0,100)
ax3.legend(ncol=2, fontsize=9)
ax3.grid(True)
add_subplot_details(ax3, ylim = [0,1], xlim = [0,100])
# Figure 3D
# Plotting: SI+TD Partial Split, SI+TD Full, EWC+TD Full, EWC+TD Partial Split
# Dropout irrelevant?
SI_TD = 'ImageNet_SI_XdG_'
EWC_TD = 'ImageNet_EWC_XdG_'
XdG = 'imagenet_nostab'
#b2 = plot_best_result(ax4, savedir1+'SI Rule Split/', SI_TDP, col=[1,0,1], label='Split SI+TD Partial')
#b4 = plot_best_result(ax4, savedir1+'EWC Rule Split/', EWC_TDPS, col=[0,1,0],label='Split EWC+Partial')
b2 = plot_best_result(ax4, savedir1+'EWC/', EWC, col=[0,1,0], label='EWC')
b3 = plot_best_result(ax4, savedir, SI, col=[1,0,1], label='SI')
b1 = plot_best_result(ax4, savedir1+'EWC XdG/', EWC_TD, col=[0,1,0],label='EWC+XdG', linestyle='--')
b3 = plot_best_result(ax4, savedir1+'SI XdG/', SI_TD, col=[1,0,1],label='SI+XdG', linestyle='--')
b5 = plot_best_result(ax4, savedir1+'XdG/', XdG, col=[0,0,0],label='SI+XdG', linestyle='--')
accuracy['SI XdG'] = b3
accuracy['EWC XdG'] = b1
accuracy['XdG'] = b5
ax4.set_xlim(0,100)
ax4.legend(ncol=2, fontsize=9)
ax4.grid(True)
add_subplot_details(ax4, ylim = [0,1], xlim = [0,100])
plt.tight_layout()
plt.savefig('Figure3.pdf', format='pdf')
plt.show()
for k, v in accuracy.items():
print(k , ' = ', v)
def plot_fig5B():
savedir = '/home/masse/Context-Dependent-Gating/savedir/ImageNet/'
f, ax4 = plt.subplots(1,1,figsize=(8,6))
accuracy = {}
SI_TDPS = 'imagenet_SI_split_'
EWC_TDPS = 'imagenet_EWC_split_'
SI_TD = 'imagenet_SI_XdG_'
EWC_TD = 'imagenet_EWC_XdG_'
b2 = plot_best_result(ax4, savedir, SI_TDPS, col=[1,0,1], label='Split SI+Partial')
b4 = plot_best_result(ax4, savedir, EWC_TDPS, col=[0,1,0],label='Split EWC+Partial')
b1 = plot_best_result(ax4, savedir, EWC_TD, col=[0,1,0],label='EWC+XdG', linestyle='--')
b3 = plot_best_result(ax4, savedir, SI_TD, col=[1,0,1],label='SI+XdG', linestyle='--')
accuracy['SI XdG'] = b1
accuracy['EWC XdG'] = b3
ax4.set_xlim(0,100)
ax4.legend(ncol=2, fontsize=9)
ax4.grid(True)
add_subplot_details(ax4, ylim = [0,1], xlim = [0,100])
plt.tight_layout()
plt.savefig('Figure3.pdf', format='pdf')
plt.show()
def plot_ARL_fig():
savedir = '/home/masse/Context-Dependent-Gating/savedir/ImageNet/'
f, ax4 = plt.subplots(1,1,figsize=(8,6))
accuracy = {}
SI = 'imagenet_SI_omega'
SI_TD = 'imagenet_SI_XdG_'
b1 = plot_best_result(ax4, savedir, SI, col=[0,0,1], label='SI')
b2 = plot_best_result(ax4, savedir, SI_TD, col=[1,0,0],label='SI+XdG')
accuracy['SI'] = b1
accuracy['SI_XdG'] = b2
ax4.set_xlim(0,100)
ax4.legend(ncol=2, fontsize=9)
ax4.grid(True)
add_subplot_details(ax4, ylim = [0,0.7], xlim = [0,100])
plt.tight_layout()
plt.savefig('Figure_ARL.pdf', format='pdf')
plt.show()
def plot_mnist_figure():
f = plt.figure(figsize=(6,2.5))
# SI only, no top-down
SI_fn = 'perm_mnist_n2000_d1_no_topdown_omega'
# SI only + top-down
SI_td_fn = 'perm_mnist_n2000_d1_bias_omega'
# SI only + split in 5 + top-down
SI_split_fn = 'perm_mnist_n735_d1_bias_20tasks'
# SI + INH TD, selecting one out of 4
SI_inh_td4_fn = 'perm_mnist_n2000_d1_1of4_omega'
# SI + INH TD, selecting one out of 5
SI_inh_td5_fn = 'perm_mnist_n2000_d1_1of5_omega'
# SI + INH TD, selecting one out of 6
SI_inh_td6_fn = 'perm_mnist_n2000_d1_1of6_omega'
# SI + INH TD, selecting one out of 3
SI_inh_td3_fn = 'perm_mnist_n2000_d1_1of3_omega'
# SI + INH TD, selecting one out of 2
SI_inh_td2_fn = 'perm_mnist_n2000_d1_1of2_omega'
ax = f.add_subplot(1, 2, 1)
plot_best_result(ax, data_dir, SI_fn, col = [0,0,1], description = 'SI only')
plot_best_result(ax, data_dir, SI_td_fn, col = [1,0,0])
plot_best_result(ax, data_dir, SI_split_fn, col = [0,1,0], split = 5)
plot_best_result(ax, data_dir, SI_inh_td5_fn, col = [1,0,1], description = 'SI + TD date 80%')
add_subplot_details(ax, [0.8, 1],[0.85, 0.9,0.95])
ax = f.add_subplot(1, 2, 2)
plot_best_result(ax, data_dir, SI_inh_td5_fn, col = [1,0,1])
plot_best_result(ax, data_dir, SI_inh_td4_fn, col = [0,1,0])
plot_best_result(ax, data_dir, SI_inh_td6_fn, col = [0,0,1], description = 'SI + TD date 86.67%')
plot_best_result(ax, data_dir, SI_inh_td3_fn, col = [0,1,1])
plot_best_result(ax, data_dir, SI_inh_td2_fn, col = [0,0,0])
add_subplot_details(ax, [0.9, 1], [0.95])
plt.tight_layout()
plt.savefig('Fig1.pdf', format='pdf')
plt.show()
def add_subplot_details(ax, ylim = [0,1], xlim = [0,100],yminor = []):
d = ylim[1] - ylim[0]
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_ylim([ylim[0], ylim[1]])
for i in yminor:
ax.plot([0,100],[i,i],'k--')
ax.set_xlim([xlim[0], xlim[1]])
ax.set_ylabel('Mean task accuracy')
ax.set_xlabel('Task number')
def plot_best_result(ax, data_dir, prefix, col = [0,0,1], split = 1, description = [], label=None, linestyle = '-'):
# Get filenames
name_and_data = []
for full_fn in os.listdir(data_dir):
if full_fn.startswith(prefix):
x = pickle.load(open(data_dir + full_fn, 'rb'))
name_and_data.append((full_fn, x['accuracy_full'][-1], x['par']['omega_c']))
if prefix == 'mnist_SI_rule':
print('mnist_STI_rule')
print(name_and_data)
print(os.listdir(data_dir))
print(data_dir)
print('XXXX')
# Find number of c's and v's
cids = []
vids = []
xids = []
om_c = []
for (f, _, oc) in name_and_data:
if 'xi' in f:
if f[-12] not in cids:
cids.append(f[-12])
if f[-8] not in xids:
xids.append(f[-8])
if f[-5] not in vids:
vids.append(f[-5])
else:
if f[-9].isdigit():
c = f[-9:-7]
else:
c = f[-8]
if c not in cids:
cids.append(c)
if f[-5] not in vids:
vids.append(f[-5])
xids = [0]
om_c.append(oc)
accuracies = np.zeros((len(xids),len(cids)))
count = np.zeros((len(xids),len(cids)))
cids = sorted(cids)
vids = sorted(vids)
xids = sorted(xids)
print(prefix, cids, vids, xids)
for i, c_id, in enumerate(cids):
for v_id in vids:
for j, x_id in enumerate(xids):
#text_c = 'omega'+str(c_id)
text_c = 'omega'+str(c_id)
text_v = '_v'+str(v_id)
text_x = '_xi'+str(x_id)
for full_fn in os.listdir(data_dir):
if full_fn.startswith(prefix) and 'xi' in full_fn and text_c in full_fn and text_v in full_fn and text_x in full_fn:
#print('c_id', c_id)
x = pickle.load(open(data_dir + full_fn, 'rb'))
accuracies[j,i] += x['accuracy_full'][-1]
count[j,i] += 1
elif full_fn.startswith(prefix) and not 'xi' in full_fn and text_c in full_fn and text_v in full_fn:
#print('c_id', c_id)
x = pickle.load(open(data_dir + full_fn, 'rb'))
accuracies[j,i] += x['accuracy_full'][-1]
count[j,i] += 1
accuracies /= (1e-16+count)
accuracies = np.reshape(accuracies,(1,-1))
print(prefix)
print(accuracies)
ind_best = np.argsort(accuracies)[-1]
best_c = int(ind_best[-1]%len(cids))
best_xi = ind_best[-1]//len(cids)
task_accuracy = []
for v_id in vids:
#text_c = 'omega'+str(cids[best_c])
text_c = 'omega'+str(cids[best_c])
text_xi = 'xi'+str(xids[best_xi])
text_v = '_v'+str(v_id)
print(prefix, text_c, text_xi, text_v)
for full_fn in os.listdir(data_dir):
if full_fn.startswith(prefix) and 'xi' in full_fn and text_c in full_fn and text_v in full_fn and text_x in full_fn:
x = pickle.load(open(data_dir + full_fn, 'rb'))
task_accuracy.append(x['accuracy_full'])
print(prefix,' ', full_fn, ' ', x['par']['stabilization'], ' omega C ', x['par']['omega_c'])
elif full_fn.startswith(prefix) and not 'xi' in full_fn and text_c in full_fn and text_v in full_fn:
x = pickle.load(open(data_dir + full_fn, 'rb'))
task_accuracy.append(x['accuracy_full'])
print(prefix,' ', full_fn, ' ', x['par']['stabilization'], x['par']['gating_type'], x['par']['multihead'], ' omega C ', x['par']['omega_c'], x['par']['omega_xi'])
task_accuracy = np.mean(np.stack(task_accuracy),axis=0)
if split > 1:
task_accuracy = np.array(task_accuracy)
task_accuracy = np.tile(np.reshape(task_accuracy,(-1,1)),(1,split))
task_accuracy = np.reshape(task_accuracy,(1,-1))[0,:]
if not description == []:
print(description , ' ACC after 10 trials = ', task_accuracy[9], ' after 30 trials = ', task_accuracy[29], \
' after 100 trials = ', task_accuracy[99])
ax.plot(np.arange(1, np.shape(task_accuracy)[0]+1), task_accuracy, color = col, linestyle = linestyle, label=label)
return task_accuracy[[9,-1]]
def retrieve_best_result(data_dir, fn):
best_accuracy = -1
val_c = 0.
for f in os.listdir(data_dir):
if f.startswith(fn):
x = pickle.load(open(data_dir+f, 'rb'))
if x['accuracy_full'][-1] > best_accuracy:
best_accuracy = x['accuracy_full'][-1]
task_accuracy = x['accuracy_full']
val_c = x['par']['omega_c']
return val_c, task_accuracy
| StarcoderdataPython |
6529815 | <reponame>manju-rn/synapse
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.rest.client import login, room
from synapse.storage.engines import PostgresEngine
from tests.unittest import HomeserverTestCase, skip_unless
from tests.utils import USE_POSTGRES_FOR_TESTS
class EventSearchInsertionTest(HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
room.register_servlets,
]
def test_null_byte(self):
"""
Postgres/SQLite don't like null bytes going into the search tables. Internally
we replace those with a space.
Ensure this doesn't break anything.
"""
# Register a user and create a room, create some messages
self.register_user("alice", "password")
access_token = self.login("alice", "password")
room_id = self.helper.create_room_as("alice", tok=access_token)
# Send messages and ensure they don't cause an internal server
# error
for body in ["hi\u0000bob", "another message", "hi alice"]:
response = self.helper.send(room_id, body, tok=access_token)
self.assertIn("event_id", response)
# Check that search works for the message where the null byte was replaced
store = self.hs.get_datastores().main
result = self.get_success(
store.search_msgs([room_id], "hi bob", ["content.body"])
)
self.assertEquals(result.get("count"), 1)
if isinstance(store.database_engine, PostgresEngine):
self.assertIn("hi", result.get("highlights"))
self.assertIn("bob", result.get("highlights"))
# Check that search works for an unrelated message
result = self.get_success(
store.search_msgs([room_id], "another", ["content.body"])
)
self.assertEquals(result.get("count"), 1)
if isinstance(store.database_engine, PostgresEngine):
self.assertIn("another", result.get("highlights"))
# Check that search works for a search term that overlaps with the message
# containing a null byte and an unrelated message.
result = self.get_success(store.search_msgs([room_id], "hi", ["content.body"]))
self.assertEquals(result.get("count"), 2)
result = self.get_success(
store.search_msgs([room_id], "hi alice", ["content.body"])
)
if isinstance(store.database_engine, PostgresEngine):
self.assertIn("alice", result.get("highlights"))
def test_non_string(self):
"""Test that non-string `value`s are not inserted into `event_search`.
This is particularly important when using sqlite, since a sqlite column can hold
both strings and integers. When using Postgres, integers are automatically
converted to strings.
Regression test for #11918.
"""
store = self.hs.get_datastores().main
# Register a user and create a room
user_id = self.register_user("alice", "password")
access_token = self.login("alice", "password")
room_id = self.helper.create_room_as("alice", tok=access_token)
room_version = self.get_success(store.get_room_version(room_id))
# Construct a message with a numeric body to be received over federation
# The message can't be sent using the client API, since Synapse's event
# validation will reject it.
prev_event_ids = self.get_success(store.get_prev_events_for_room(room_id))
prev_event = self.get_success(store.get_event(prev_event_ids[0]))
prev_state_map = self.get_success(
self.hs.get_storage().state.get_state_ids_for_event(prev_event_ids[0])
)
event_dict = {
"type": EventTypes.Message,
"content": {"msgtype": "m.text", "body": 2},
"room_id": room_id,
"sender": user_id,
"depth": prev_event.depth + 1,
"prev_events": prev_event_ids,
"origin_server_ts": self.clock.time_msec(),
}
builder = self.hs.get_event_builder_factory().for_room_version(
room_version, event_dict
)
event = self.get_success(
builder.build(
prev_event_ids=prev_event_ids,
auth_event_ids=self.hs.get_event_auth_handler().compute_auth_events(
builder,
prev_state_map,
for_verification=False,
),
depth=event_dict["depth"],
)
)
# Receive the event
self.get_success(
self.hs.get_federation_event_handler().on_receive_pdu(
self.hs.hostname, event
)
)
# The event should not have an entry in the `event_search` table
f = self.get_failure(
store.db_pool.simple_select_one_onecol(
"event_search",
{"room_id": room_id, "event_id": event.event_id},
"event_id",
),
StoreError,
)
self.assertEqual(f.value.code, 404)
@skip_unless(not USE_POSTGRES_FOR_TESTS, "requires sqlite")
def test_sqlite_non_string_deletion_background_update(self):
"""Test the background update to delete bad rows from `event_search`."""
store = self.hs.get_datastores().main
# Populate `event_search` with dummy data
self.get_success(
store.db_pool.simple_insert_many(
"event_search",
keys=["event_id", "room_id", "key", "value"],
values=[
("event1", "room_id", "content.body", "hi"),
("event2", "room_id", "content.body", "2"),
("event3", "room_id", "content.body", 3),
],
desc="populate_event_search",
)
)
# Run the background update
store.db_pool.updates._all_done = False
self.get_success(
store.db_pool.simple_insert(
"background_updates",
{
"update_name": "event_search_sqlite_delete_non_strings",
"progress_json": "{}",
},
)
)
self.wait_for_background_updates()
# The non-string `value`s ought to be gone now.
values = self.get_success(
store.db_pool.simple_select_onecol(
"event_search",
{"room_id": "room_id"},
"value",
),
)
self.assertCountEqual(values, ["hi", "2"])
| StarcoderdataPython |
6635101 | <reponame>DowneyTung/saleor
import graphene
from ..translations.mutations import ShopSettingsTranslate
from .mutations import (
AuthorizationKeyAdd,
AuthorizationKeyDelete,
HomepageCollectionUpdate,
ShopAddressUpdate,
ShopDomainUpdate,
ShopFetchTaxRates,
ShopSettingsUpdate,
)
from .types import Shop
class ShopQueries(graphene.ObjectType):
shop = graphene.Field(Shop, description="Return information about the shop.")
def resolve_shop(self, _info):
return Shop()
class ShopMutations(graphene.ObjectType):
authorization_key_add = AuthorizationKeyAdd.Field()
authorization_key_delete = AuthorizationKeyDelete.Field()
homepage_collection_update = HomepageCollectionUpdate.Field()
shop_domain_update = ShopDomainUpdate.Field()
shop_settings_update = ShopSettingsUpdate.Field()
shop_fetch_tax_rates = ShopFetchTaxRates.Field()
shop_settings_translate = ShopSettingsTranslate.Field()
shop_address_update = ShopAddressUpdate.Field()
| StarcoderdataPython |
6431567 | <gh_stars>0
import utils
import random
import math
class Entity(object):
def __init__(self):
self.type = "entity"
class Allie(Entity):
def __init__(self, position):
super().__init__()
self.type = "allie"
self.position = [position[0], position[1]]
self.orientation = 0
self.pv_max = 10.0
self.pv = self.pv_max
self.regen_health = utils.perpetualTimer(1, self.regenerate_health)
self.regen_health.start()
self.bullets_max = 5
self.bullets = self.bullets_max
self.regen_bullets = utils.perpetualTimer(1, self.regenerate_bullets)
self.regen_bullets.start()
self.random_walk = utils.perpetualTimer(2, self.randomly_change_direction)
self.random_walk.start()
self.speed = 10.0
def update(self, delta, limits_x, limits_y):
# Move
new_x = self.position[0] + self.speed * delta * math.cos(math.radians(self.orientation))
new_y = self.position[1] + self.speed * delta * math.sin(math.radians(self.orientation))
while new_x > limits_x[1] or new_x < limits_x[0] or new_y > limits_y[1] or new_y < limits_y[0]:
self.randomly_change_direction()
new_x = self.position[0] + self.speed * delta * math.cos(math.radians(self.orientation))
new_y = self.position[1] + self.speed * delta * math.sin(math.radians(self.orientation))
self.position = [new_x, new_y]
def randomly_change_direction(self):
self.orientation = (self.orientation + (random.random()*70)-35)%360
def regenerate_bullets(self):
self.bullets = min(self.bullets_max, self.bullets + 1)
def regenerate_health(self):
self.pv = min(self.pv_max, self.pv + 0.1)
def stop_all(self):
self.regen_bullets.cancel()
self.regen_health.cancel()
self.random_walk.cancel()
| StarcoderdataPython |
1900092 | <gh_stars>1-10
"""Limited subset of azavea/django-sld's generator module
This module preserves that module's structure for extensibility,
but only borrows the as_quantiles method.
"""
import numpy as np
from django.contrib.gis.db.models import fields
from pysal.esda.mapclassify import Quantiles
from sld import (Filter, LineSymbolizer, PointSymbolizer, PolygonSymbolizer,
PropertyCriterion, StyledLayerDescriptor)
def as_quantiles(*args, **kwargs):
"""
Generate Quantile classes from the provided queryset. If the queryset
is empty, no class breaks are returned. For more information on the Quantile
classifier, please visit:
U{http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Quantiles}
@type queryset: QuerySet
@param queryset: The query set that contains the entire distribution of
data values.
@type field: string
@param field: The name of the field on the model in the queryset that
contains the data values.
@type nclasses: integer
@param nclasses: The number of class breaks desired.
@type geofield: string
@param geofield: The name of the geometry field. Defaults to 'geom'.
@rtype: L{sld.StyledLayerDescriptor}
@returns: An SLD object that represents the class breaks.
"""
return _as_classification(Quantiles, *args, **kwargs)
def _as_classification(classification,
queryset,
field,
nclasses,
geofield='geom',
propertyname=None,
userstyletitle=None,
featuretypestylename=None,
colorbrewername='',
invertgradient=False,
**kwargs):
"""
Accept a queryset of objects, and return the values of the class breaks
on the data distribution. If the queryset is empty, no class breaks are
computed.
@type classification: pysal classifier
@param classification: A classification class defined in
pysal.esda.mapclassify. As of version 1.0.1, this list is comprised of:
- Equal_Interval
- Fisher_Jenks
- Jenks_Caspall
- Jenks_Caspall_Forced
- Jenks_Caspall_Sampled
- Max_P_Classifier
- Maximum_Breaks
- Natural_Breaks
- Quantiles
@type queryset: QuerySet
@param queryset: The query set that contains the entire distribution of data values.
@type field: string
@param field: The name of the field on the model in the queryset that contains the data
values.
@type nclasses: integer
@param nclasses: The number of class breaks desired.
@type geofield: string
@keyword geofield: The name of the geography column on the model. Defaults to 'geom'
@type propertyname: string
@keyword propertyname: The name of the filter property name, if different from the model field.
@type userstyletitle: string
@keyword userstyletitle: The title of the UserStyle element.
@type featuretypestylename: string
@keyword featuretypestylename: The name of the FeatureTypeStyle element.
@type colorbrewername: string
@keyword colorbrewername: The name of a colorbrewer ramp name. Must have the same # of
corresponding classes as nclasses.
@type invertgradient: boolean
@keyword invertgradient: Should the resulting SLD have colors from high to low, instead of low
to high?
@type kwargs: keywords
@param kwargs: Additional keyword arguments for the classifier.
@rtype: L{sld.StyledLayerDescriptor}
@returns: An SLD class object that represents the classification scheme
and filters.
"""
thesld = StyledLayerDescriptor()
ftype = queryset.model._meta.get_field(geofield)
if isinstance(ftype, fields.LineStringField) or isinstance(
ftype, fields.MultiLineStringField):
symbolizer = LineSymbolizer
elif isinstance(ftype, fields.PolygonField) or isinstance(
ftype, fields.MultiPolygonField):
symbolizer = PolygonSymbolizer
else:
# PointField, MultiPointField, GeometryField, or GeometryCollectionField
symbolizer = PointSymbolizer
if propertyname is None:
propertyname = field
nl = thesld.create_namedlayer('%d breaks on "%s" as %s' %
(nclasses, field, classification.__name__))
us = nl.create_userstyle()
if userstyletitle is not None:
us.Title = str(userstyletitle)
fts = us.create_featuretypestyle()
if featuretypestylename is not None:
fts.Name = str(featuretypestylename)
# with just one class, make a single static style with no filters
if nclasses == 1:
rule = fts.create_rule(propertyname, symbolizer=symbolizer)
shade = 0 if invertgradient else 255
shade = '#%02x%02x%02x' % (
shade,
shade,
shade,
)
# no filters for one class
if symbolizer == PointSymbolizer:
rule.PointSymbolizer.Graphic.Mark.Fill.CssParameters[
0].Value = shade
elif symbolizer == LineSymbolizer:
rule.LineSymbolizer.Stroke.CssParameters[0].Value = shade
elif symbolizer == PolygonSymbolizer:
rule.PolygonSymbolizer.Stroke.CssParameters[0].Value = '#000000'
rule.PolygonSymbolizer.Fill.CssParameters[0].Value = shade
thesld.normalize()
return thesld
# with more than one class, perform classification
datavalues = np.array(
queryset.order_by(field).values_list(field, flat=True))
q = classification(datavalues, nclasses, **kwargs)
shades = None
if q.k == nclasses and colorbrewername and not colorbrewername == '':
try:
import colorbrewer
shades = getattr(colorbrewer, colorbrewername)[nclasses]
if invertgradient:
shades.reverse()
except (ImportError, KeyError):
# could not import colorbrewer, or nclasses unavailable
pass
for i, qbin in enumerate(q.bins):
if type(qbin) == np.ndarray:
qbin = qbin[0]
title = '<= %s' % qbin
rule = fts.create_rule(title, symbolizer=symbolizer)
if shades:
shade = '#%02x%02x%02x' % shades[i]
else:
shade = (float(q.k - i) / q.k) * 255
if invertgradient:
shade = 255 - shade
shade = '#%02x%02x%02x' % (
shade,
shade,
shade,
)
if symbolizer == PointSymbolizer:
rule.PointSymbolizer.Graphic.Mark.Fill.CssParameters[
0].Value = shade
elif symbolizer == LineSymbolizer:
rule.LineSymbolizer.Stroke.CssParameters[0].Value = shade
elif symbolizer == PolygonSymbolizer:
rule.PolygonSymbolizer.Stroke.CssParameters[0].Value = '#000000'
rule.PolygonSymbolizer.Fill.CssParameters[0].Value = shade
# now add the filters
if i > 0:
f_low = Filter(rule)
f_low.PropertyIsGreaterThan = PropertyCriterion(
f_low, 'PropertyIsGreaterThan')
f_low.PropertyIsGreaterThan.PropertyName = propertyname
f_low.PropertyIsGreaterThan.Literal = str(q.bins[i - 1])
f_high = Filter(rule)
f_high.PropertyIsLessThanOrEqualTo = PropertyCriterion(
f_high, 'PropertyIsLessThanOrEqualTo')
f_high.PropertyIsLessThanOrEqualTo.PropertyName = propertyname
f_high.PropertyIsLessThanOrEqualTo.Literal = str(qbin)
if i > 0:
rule.Filter = f_low + f_high
else:
rule.Filter = f_high
thesld.normalize()
return thesld
| StarcoderdataPython |
3310266 | import pytest, os,vcr
from igdb_api_python.igdb import igdb as igdb
igdb = igdb(os.environ['api_key'])
@vcr.use_cassette('tests/vcr_cassettes/platforms/single_platform.yml', filter_headers=['user-key'])
def test_single_platform():
result = igdb.platforms(11)
assert result.body != []
assert result.body[0]['id'] == 11
@vcr.use_cassette('tests/vcr_cassettes/platforms/multiple_platform.yml', filter_headers=['user-key'])
def test_multiple_platform():
result = igdb.platforms({
'ids':[13,5,3]
})
assert result.body != []
assert result.body[0]['id'] == 13
assert result.body[1]['id'] == 5
assert result.body[2]['id'] == 3
@vcr.use_cassette('tests/vcr_cassettes/platforms/search.yml')
def test_game_search_multi_and_single():
result = igdb.platforms({
'search': "xbox one",
'fields' : 'name'
})
print(result.body)
assert result.body != []
assert result.body[0]['id'] == 49
assert type(result.body[0]) == dict
| StarcoderdataPython |
1857525 | """
Raspberry Pi package.
"""
import os
import json
from .constants import ASSETS_DIR
with open(os.path.join(ASSETS_DIR, "metadata.json")) as f:
metadata = json.load(f)
__title__ = metadata["__title__"]
__description__ = metadata["__description__"]
__version__ = metadata["__version__"]
__lead__ = metadata["__lead__"]
__email__ = metadata["__email__"]
__url__ = metadata["__url__"]
__all__ = [
"__title__",
"__description__",
"__version__",
"__lead__",
"__email__",
"__url__",
]
| StarcoderdataPython |
4912068 | <filename>core/tests/test_curriculo.py<gh_stars>0
import django
django.setup()
from core.bo.curriculo import get_curriculo_by_cc
from core.models import ComponenteCurricular
from core.tests.povoar_testes import criar_dados, remover_dados
from django.test import TestCase
class CurriculoTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
print('\nCurriculoTests')
criar_dados()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
remover_dados()
def test_get_curriculo_by_cc(self):
componente = ComponenteCurricular.objects.get(id_componente=99999)
curriculos = get_curriculo_by_cc(componente.id_componente)
self.assertIsNotNone(curriculos, 'Curriculos 1 não é None?')
self.assertEqual(1, len(curriculos), 'Testando quantidade de currículos 1.')
componente = ComponenteCurricular.objects.get(id_componente=99998)
curriculos = get_curriculo_by_cc(componente.id_componente)
self.assertIsNotNone(curriculos, 'Curriculos 2 não é None?')
self.assertEqual(2, len(curriculos), 'Testando quantidade de currículos 2.')
| StarcoderdataPython |
3428329 | # -*- coding: utf-8 -*-
"""
Created on Mar 11, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import imghdr
import io
import xml.etree.cElementTree as ET
from os import urandom
from uuid import uuid4
from collections import OrderedDict
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.types import Integer, Unicode, String, Boolean, Enum
from models import dbsession
from models.BaseModels import DatabaseObject
from models.Relationships import team_to_box
from models.IpAddress import IpAddress
from models.GameLevel import GameLevel
from models.Corporation import Corporation
from models.Category import Category
from models.SourceCode import SourceCode
from tornado.options import options
from libs.XSSImageCheck import is_xss_image, get_new_avatar
from libs.ValidationError import ValidationError
from libs.StringCoding import encode
from PIL import Image
from resizeimage import resizeimage
import enum
class FlagsSubmissionType(str, enum.Enum):
CLASSIC = "CLASSIC"
SINGLE_SUBMISSION_BOX = "SINGLE_SUBMISSION_BOX"
from builtins import ( # noqa: E402
str,
) # TODO Python2/3 compatibility issue if imported before FlagSubmissionType
class Box(DatabaseObject):
""" Box definition """
uuid = Column(String(36), unique=True, nullable=False, default=lambda: str(uuid4()))
corporation_id = Column(Integer, ForeignKey("corporation.id"), nullable=False)
category_id = Column(Integer, ForeignKey("category.id"), nullable=True)
_name = Column(Unicode(32), unique=True, nullable=False)
_operating_system = Column(Unicode(16))
_description = Column(Unicode(1024))
_capture_message = Column(Unicode(1024))
_difficulty = Column(Unicode(16))
game_level_id = Column(Integer, ForeignKey("game_level.id"), nullable=False)
_avatar = Column(String(64))
_value = Column(Integer, nullable=True)
_locked = Column(Boolean, default=False, nullable=False)
garbage = Column(
String(32),
unique=True,
nullable=False,
default=lambda: encode(urandom(16), "hex"),
)
teams = relationship(
"Team", secondary=team_to_box, back_populates="boxes", lazy="select"
)
hints = relationship(
"Hint",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
)
flags = relationship(
"Flag",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
order_by="desc(-Flag._order)",
)
flag_submission_type = Column(
Enum(FlagsSubmissionType), default=FlagsSubmissionType.CLASSIC
)
ip_addresses = relationship(
"IpAddress",
backref=backref("box", lazy="select"),
cascade="all,delete,delete-orphan",
)
@classmethod
def all(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
""" Returns a the object with id of _id """
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_uuid(cls, _uuid):
""" Return and object based on a uuid """
return dbsession.query(cls).filter_by(uuid=str(_uuid)).first()
@classmethod
def by_name(cls, name):
""" Return the box object whose name is "name" """
return dbsession.query(cls).filter_by(_name=str(name)).first()
@classmethod
def by_category(cls, _cat_id):
""" Return the box object whose category is "_cat_id" """
return dbsession.query(cls).filter_by(category_id=int(_cat_id)).all()
@classmethod
def by_garbage(cls, _garbage):
return dbsession.query(cls).filter_by(garbage=_garbage).first()
@classmethod
def by_ip_address(cls, ip_addr):
"""
Returns a box object based on an ip address, supports both ipv4
and ipv6
"""
ip = IpAddress.by_address(ip_addr)
return ip.box if ip is not None else None
@classmethod
def flaglist(self, box_id=None):
flags = self.by_id(box_id).flags
flaglist = OrderedDict()
for flag in flags:
flaglist[flag.uuid] = flag.name
return flaglist
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not 3 <= len(str(value)) <= 32:
raise ValidationError("Name must be 3 - 32 characters")
self._name = str(value)
@property
def operating_system(self):
return self._operating_system if self._operating_system else "?"
@operating_system.setter
def operating_system(self, value):
self._operating_system = str(value)
@property
def description(self):
if self._description is None:
self._description = ""
ls = []
if 0 < len(self._description):
text = self._description.replace("\r\n", "\n").strip()
ls.append("%s" % text)
else:
ls.append("No information on file.")
if self.difficulty != "Unknown":
ls.append("Reported Difficulty: %s" % self.difficulty)
if not encode(ls[-1], "utf-8").endswith(b"\n"):
ls[-1] = ls[-1] + "\n"
return str("\n\n".join(ls))
@description.setter
def description(self, value):
if value is None:
return ""
if 1025 < len(value):
raise ValidationError("Description cannot be greater than 1024 characters")
self._description = str(value)
@property
def difficulty(self):
return (
self._difficulty
if self._difficulty and len(self._difficulty)
else "Unknown"
)
@difficulty.setter
def difficulty(self, value):
if value is None:
return
if 17 < len(value):
raise ValidationError("Difficulty cannot be greater than 16 characters")
self._difficulty = str(value)
@property
def capture_message(self):
return self._capture_message if self._capture_message else ""
@capture_message.setter
def capture_message(self, value):
self._capture_message = str(value)
@property
def value(self):
if not self._value:
return 0
return self._value
@value.setter
def value(self, value):
try:
self._value = abs(int(value))
except ValueError:
raise ValidationError("Reward value must be an integer")
@property
def locked(self):
""" Determines if an admin has locked an box. """
return self._locked
@locked.setter
def locked(self, value):
""" Setter method for _lock """
assert isinstance(value, bool)
self._locked = value
@property
def avatar(self):
if self._avatar is not None:
return self._avatar
else:
avatar = get_new_avatar("box")
if not avatar.startswith("default_"):
self._avatar = avatar
dbsession.add(self)
dbsession.commit()
return avatar
@avatar.setter
def avatar(self, image_data):
if self.uuid is None:
self.uuid = str(uuid4())
if len(image_data) < (1024 * 1024):
ext = imghdr.what("", h=image_data)
if ext in ["png", "jpeg", "gif", "bmp"] and not is_xss_image(image_data):
try:
if self._avatar is not None and os.path.exists(
options.avatar_dir + "/upload/" + self._avatar
):
os.unlink(options.avatar_dir + "/upload/" + self._avatar)
file_path = str(
options.avatar_dir + "/upload/" + self.uuid + "." + ext
)
image = Image.open(io.BytesIO(image_data))
cover = resizeimage.resize_cover(image, [500, 250])
cover.save(file_path, image.format)
self._avatar = "upload/" + self.uuid + "." + ext
except Exception as e:
raise ValidationError(e)
else:
raise ValidationError(
"Invalid image format, avatar must be: .png .jpeg .gif or .bmp"
)
else:
raise ValidationError("The image is too large")
@property
def ipv4s(self):
""" Return a list of all ipv4 addresses """
return [ip for ip in self.ip_addresses if ip.version == 4]
@property
def ipv6s(self):
""" Return a list of all ipv6 addresses """
return [ip for ip in self.ip_addresses if ip.version == 6]
@property
def visable_ips(self):
return [ip for ip in self.ip_addresses if ip.visable is True]
@property
def source_code(self):
return SourceCode.by_box_id(self.id)
def get_garbage_cfg(self):
try:
hex_name = encode(self.name).hex()
except AttributeError:
hex_name = encode(self.name, "hex")
return "[Bot]\nname = %s\ngarbage = %s\n" % (hex_name, self.garbage)
def is_complete(self, user):
boxcomplete = True
for boxflag in self.flags:
if user.team and boxflag not in user.team.flags:
boxcomplete = False
break
return boxcomplete
def to_xml(self, parent):
""" Convert object to XML """
box_elem = ET.SubElement(parent, "box")
box_elem.set("gamelevel", "%s" % str(self.game_level.number))
ET.SubElement(box_elem, "name").text = self.name
ET.SubElement(box_elem, "operatingsystem").text = self._operating_system
ET.SubElement(box_elem, "description").text = self._description
ET.SubElement(box_elem, "capture_message").text = self.capture_message
ET.SubElement(box_elem, "value").text = str(self.value)
ET.SubElement(box_elem, "flag_submission_type").text = FlagsSubmissionType(
self.flag_submission_type
).name
ET.SubElement(box_elem, "difficulty").text = self._difficulty
ET.SubElement(box_elem, "garbage").text = str(self.garbage)
if self.category_id:
ET.SubElement(box_elem, "category").text = Category.by_id(
self.category_id
).category
flags_elem = ET.SubElement(box_elem, "flags")
flags_elem.set("count", "%s" % str(len(self.flags)))
for flag in self.flags:
flag.to_xml(flags_elem)
hints_elem = ET.SubElement(box_elem, "hints")
count = 0
for hint in self.hints:
if hint.flag_id is None:
hint.to_xml(hints_elem)
count += 1
hints_elem.set("count", "%s" % str(count))
ips_elem = ET.SubElement(box_elem, "ipaddresses")
ips_elem.set("count", "%s" % str(len(self.ip_addresses)))
for ip in self.ip_addresses:
ip.to_xml(ips_elem)
avatarfile = os.path.join(options.avatar_dir, self.avatar)
if self.avatar and os.path.isfile(avatarfile):
with open(avatarfile, mode="rb") as _avatar:
data = _avatar.read()
ET.SubElement(box_elem, "avatar").text = encode(data, "base64")
else:
ET.SubElement(box_elem, "avatar").text = "none"
def to_dict(self):
""" Returns editable data as a dictionary """
corp = Corporation.by_id(self.corporation_id)
game_level = GameLevel.by_id(self.game_level_id)
cat = Category.by_id(self.category_id)
if cat:
category = cat.uuid
else:
category = ""
return {
"name": self.name,
"uuid": self.uuid,
"corporation": corp.uuid,
"category": category,
"operating_system": self.operating_system,
"description": self._description,
"capture_message": self.capture_message,
"difficulty": self.difficulty,
"game_level": game_level.uuid,
"flag_submission_type": self.flag_submission_type,
"flaglist": self.flaglist(self.id),
"value": self.value,
}
def __repr__(self):
return "<Box - name: %s>" % (self.name,)
def __str__(self):
return self.name
| StarcoderdataPython |
1799470 | from textwrap import dedent
from typing import Any, Dict, List
from shared.di import service_as_factory
from shared.postgresql_backend import ConnectionHandler
from shared.util import ModelLocked, collectionfield_and_fqid_from_fqfield
# FQID LOCKING
# positions: <1> <2> <3> <4> <5>
# a/1 modified: X X
# Lock a/1 with pos 4, 5: OK
# Lock a/1 with pos 3, 2, ..: not OK
# Lock a/1 with pos P: Exists an event with pos>P -> not OK
@service_as_factory
class SqlOccLockerBackendService:
connection: ConnectionHandler
def assert_fqid_positions(self, fqids: Dict[str, int]) -> None:
if not fqids:
return
query_arguments: List[Any] = []
filter_parts = []
for fqid, position in fqids.items():
query_arguments.extend((fqid, position,))
filter_parts.append("(fqid=%s and position>%s)")
query = (
"select fqid from events where " + " or ".join(filter_parts) + " limit 1"
)
self.raise_model_locked_if_match(query, query_arguments)
def assert_fqfield_positions(self, fqfields: Dict[str, int]) -> None:
if not fqfields:
return
event_query_arguments: List[Any] = []
event_filter_parts = []
collectionfield_query_arguments: List[str] = []
collectionfield_filter_parts = []
for fqfield, position in fqfields.items():
collectionfield, fqid = collectionfield_and_fqid_from_fqfield(fqfield)
event_query_arguments.extend((fqid, position,))
event_filter_parts.append("(fqid=%s and position>%s)")
collectionfield = collectionfield.replace("_", r"\_")
collectionfield = collectionfield.replace("$", "_%")
collectionfield_query_arguments.extend((fqid, collectionfield,))
collectionfield_filter_parts.append(
"(e.fqid=%s and cf.collectionfield LIKE %s)"
)
event_filter = " or ".join(event_filter_parts)
collectionfield_filter = " or ".join(collectionfield_filter_parts)
query = dedent(
f"""\
select e.fqid from (
select id, fqid from events where {event_filter}
) e
inner join events_to_collectionfields ecf on e.id=ecf.event_id
inner join collectionfields cf on ecf.collectionfield_id=cf.id
where {collectionfield_filter} limit 1"""
)
query_arguments = event_query_arguments + collectionfield_query_arguments
self.raise_model_locked_if_match(query, query_arguments)
def assert_collectionfield_positions(
self, collectionfields: Dict[str, int]
) -> None:
if not collectionfields:
return
query_arguments: List[Any] = []
filter_parts = []
for collectionfield, position in collectionfields.items():
query_arguments.extend((collectionfield, position,))
filter_parts.append("(collectionfield=%s and position>%s)")
query = (
"select collectionfield from collectionfields where "
+ " or ".join(filter_parts)
+ " limit 1"
)
self.raise_model_locked_if_match(query, query_arguments)
def raise_model_locked_if_match(self, query, arguments):
""" returns str (the only response) or None if there is no row """
locked_key = self.connection.query_single_value(query, arguments)
if locked_key is not None:
raise ModelLocked(locked_key)
| StarcoderdataPython |
5073248 | import json
from discord.ext import commands, tasks
from app.classes.bot import Bot
class StatsEvents(commands.Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.broadcast_stats.start()
@tasks.loop(minutes=1)
async def broadcast_stats(self) -> None:
await self.bot.wait_until_ready()
member_count = 0
for g in self.bot.guilds:
if g.member_count:
member_count += g.member_count
await self.bot.websocket.send(
json.dumps(
{
"command": "set_stats",
"guild_count": len(self.bot.guilds),
"member_count": member_count,
"cluster": self.bot.cluster_name,
}
).encode("utf-8")
)
def setup(bot: Bot) -> None:
bot.add_cog(StatsEvents(bot))
| StarcoderdataPython |
11285987 | <filename>interface.py<gh_stars>0
import algorithm
import variable
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(600, 300, 700, 500)
self.setWindowTitle("Alphabet Recognition")
self.home()
def home(self):
self.tdstextbox = QLineEdit(self)
self.tdstextbox.move(5, 5)
self.tdstextbox.resize(110, 30)
self.tdstextbox.setText(str(variable.NUM_IMAGE_IN_DATA_SET))
self.tdsbtn = QPushButton("Number of TDS", self)
self.tdsbtn.clicked.connect(self.getLearningRateText)
self.tdsbtn.resize(100,30)
self.tdsbtn.move(120, 5)
self.netextbox = QLineEdit(self)
self.netextbox.move(5, 45)
self.netextbox.resize(110, 30)
self.netextbox.setText(str(variable.NUM_EPOCHS))
self.nebtn = QPushButton("Number Epochs", self)
self.nebtn.clicked.connect(self.getNumEpochsText)
self.nebtn.resize(100, 30)
self.nebtn.move(120, 45)
self.h1textbox = QLineEdit(self)
self.h1textbox.move(5, 85)
self.h1textbox.resize(110, 30)
self.h1textbox.setText(str(variable.NUM_NODE_HIDDEN1))
self.h1btn = QPushButton("Hidden 1 nodes", self)
self.h1btn.clicked.connect(self.getHiddenOneText)
self.h1btn.resize(100, 30)
self.h1btn.move(120, 85)
self.h2textbox = QLineEdit(self)
self.h2textbox.move(5, 125)
self.h2textbox.resize(110, 30)
self.h2textbox.setText(str(variable.NUM_NODE_HIDDEN2))
self.h2btn = QPushButton("Hidden 2 nodes", self)
self.h2btn.clicked.connect(self.getHiddenTwoText)
self.h2btn.resize(100, 30)
self.h2btn.move(120, 125)
self.thtextbox = QLineEdit(self)
self.thtextbox.move(5, 165)
self.thtextbox.resize(110, 30)
self.thtextbox.setText(str(variable.THRESHOLD))
self.thbtn = QPushButton("Threshold", self)
self.thbtn.clicked.connect(self.getThresholdText)
self.thbtn.resize(100, 30)
self.thbtn.move(120, 165)
self.lrtextbox = QLineEdit(self)
self.lrtextbox.move(5, 205)
self.lrtextbox.resize(110, 30)
self.lrtextbox.setText(str(variable.ALPHA))
self.lrbtn = QPushButton("Learning Rate", self)
self.lrbtn.clicked.connect(self.getNumOfTDSText)
self.lrbtn.resize(100, 30)
self.lrbtn.move(120, 205)
self.intextbox = QLineEdit(self)
self.intextbox.move(5, 245)
self.intextbox.resize(110, 30)
self.intextbox.setText(str(variable.RATE))
self.inbtn = QPushButton("Image Scramble Rate", self)
self.inbtn.clicked.connect(self.getRateText)
self.inbtn.resize(120, 30)
self.inbtn.move(120, 245)
self.gentdsbtn = QPushButton("Generate A-G Training data set", self)
self.gentdsbtn.clicked.connect(self.generateAlphbetTDS)
self.gentdsbtn.resize(200, 30)
self.gentdsbtn.move(5, 285)
self.trainbtn = QPushButton("Initial Weights and Train NN", self)
self.trainbtn.clicked.connect(self.trainNN)
self.trainbtn.resize(200, 30)
self.trainbtn.move(5, 325)
self.trainbtn.setDisabled(True)
self.instruction = QTextEdit(self)
self.instruction.move(5, 365)
self.instruction.resize(200, 100)
self.instruction.setDisabled(True)
self.instruction.setText("Change and set above value by clicking the button next to it")
self.comboxBox = QComboBox(self)
self.comboxBox.addItem("A")
self.comboxBox.addItem("B")
self.comboxBox.addItem("C")
self.comboxBox.addItem("D")
self.comboxBox.addItem("E")
self.comboxBox.addItem("F")
self.comboxBox.addItem("G")
self.comboxBox.addItem("H")
self.comboxBox.addItem("I")
self.comboxBox.addItem("J")
self.comboxBox.move(300, 5)
self.genalphabetbtn = QPushButton("Generate image & Recognize using NN", self)
self.genalphabetbtn.clicked.connect(self.genAlphabetForTesting)
self.genalphabetbtn.resize(200, 30)
self.genalphabetbtn.move(410, 5)
self.genalphabetbtn.setDisabled(True)
self.table = QTableWidget(10, 8, self)
for i in range(10):
for j in range(8):
value = 0
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
self.table.resize(305, 255)
self.table.move(300, 55)
self.outputtextbox = QTextEdit(self)
self.outputtextbox.move(300, 320)
self.outputtextbox.resize(250, 150)
self.outputtextbox.setDisabled(True)
self.outputtextbox.setText("Output Value???")
self.show()
@pyqtSlot()
def getLearningRateText(self):
number = self.lrtextbox.text()
try:
variable.ALPHA = float(number)
print("learning rate => " + str(variable.ALPHA))
except ValueError:
print("Please enter numbers only, not text!!")
def getNumEpochsText(self):
number = self.netextbox.text()
try:
variable.NUM_EPOCHS = int(number)
print("Number of epochs => " + str(variable.NUM_EPOCHS))
except ValueError:
print("Please enter numbers only, not text!!")
def getHiddenOneText(self):
number = self.h1textbox.text()
try:
variable.NUM_NODE_HIDDEN1 = int(number)
print("Hidden one nodes => " + str(variable.NUM_NODE_HIDDEN1))
except ValueError:
print("Please enter numbers only, not text!!")
def getHiddenTwoText(self):
number = self.h2textbox.text()
try:
variable.NUM_NODE_HIDDEN2 = int(number)
print("Hidden two nodes => " + str(variable.NUM_NODE_HIDDEN2))
except ValueError:
print("Please enter numbers only, not text!!")
def getThresholdText(self):
number = self.thtextbox.text()
try:
variable.THRESHOLD = float(number)
print("Threshold => " + str(variable.THRESHOLD))
except ValueError:
print("Please enter numbers only, not text!!")
def getNumOfTDSText(self):
number = self.tdstextbox.text()
try:
variable.NUM_IMAGE_IN_DATA_SET = int(number)
print("Number of Training Data Set => " + str(variable.NUM_IMAGE_IN_DATA_SET))
except ValueError:
print("Please enter numbers only, not text!!")
def getRateText(self):
number = self.intextbox.text()
try:
variable.RATE = float(number)
print("Number of Image scramble rate => " + str(variable.RATE))
except ValueError:
print("Please enter numbers only, not text!!")
def generateAlphbetTDS(self):
algorithm.create_training_data_set(variable.image_A, variable.Train_data_set_A)
algorithm.create_training_data_set(variable.image_B, variable.Train_data_set_B)
algorithm.create_training_data_set(variable.image_C, variable.Train_data_set_C)
algorithm.create_training_data_set(variable.image_D, variable.Train_data_set_D)
algorithm.create_training_data_set(variable.image_E, variable.Train_data_set_E)
algorithm.create_training_data_set(variable.image_F, variable.Train_data_set_F)
algorithm.create_training_data_set(variable.image_G, variable.Train_data_set_G)
print("Done generate %d images for each alphabet" % variable.NUM_IMAGE_IN_DATA_SET)
self.instruction.setText("Done generate %d images for each alphabet" % variable.NUM_IMAGE_IN_DATA_SET)
self.trainbtn.setDisabled(False)
def trainNN(self):
print("Start Training")
self.instruction.setText("Start Training, process could take very long")
self.lrbtn.setDisabled(True)
self.nebtn.setDisabled(True)
self.h1btn.setDisabled(True)
self.h2btn.setDisabled(True)
self.thbtn.setDisabled(True)
self.tdsbtn.setDisabled(True)
algorithm.init_weight()
for i in range(variable.NUM_EPOCHS):
print(i)
for j in range(variable.NUM_IMAGE_IN_DATA_SET):
algorithm.training_nn(0, variable.Train_data_set_A, j)
algorithm.training_nn(1, variable.Train_data_set_B, j)
algorithm.training_nn(2, variable.Train_data_set_C, j)
algorithm.training_nn(3, variable.Train_data_set_D, j)
algorithm.training_nn(4, variable.Train_data_set_E, j)
algorithm.training_nn(5, variable.Train_data_set_F, j)
algorithm.training_nn(6, variable.Train_data_set_G, j)
print("Training Complete")
print(variable.Weight_input_hidden1)
print(variable.Weight_hidden1_hidden2)
print(variable.Weight_hidden2_output)
self.instruction.setText("Training Complete")
self.lrbtn.setDisabled(False)
self.nebtn.setDisabled(False)
self.h1btn.setDisabled(False)
self.h2btn.setDisabled(False)
self.thbtn.setDisabled(False)
self.tdsbtn.setDisabled(False)
self.genalphabetbtn.setDisabled(False)
def genAlphabetForTesting(self):
alphabet = self.comboxBox.currentText()
if alphabet.__eq__('A'):
image = algorithm.scramble(variable.image_A, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('B'):
image = algorithm.scramble(variable.image_B, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('C'):
image = algorithm.scramble(variable.image_C, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('D'):
image = algorithm.scramble(variable.image_D, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('E'):
image = algorithm.scramble(variable.image_E, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('F'):
image = algorithm.scramble(variable.image_F, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('G'):
image = algorithm.scramble(variable.image_G, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('H'):
image = algorithm.scramble(variable.image_H, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('I'):
image = algorithm.scramble(variable.image_I, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
elif alphabet.__eq__('J'):
image = algorithm.scramble(variable.image_J, variable.RATE)
for i in range(10):
for j in range(8):
value = image[i][j]
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, "")
self.table.setItem(i, j, item)
if value == 1:
self.table.item(i, j).setBackground(QColor(0, 0, 0))
self.showNN(image)
def showNN(self, image):
algorithm.o_input(image)
algorithm.o_hidden1()
algorithm.o_hidden2()
algorithm.o_output()
self.outputtextbox.setText("The outputs are \n=> %f\n=> %f\n=> %f\n=> %f\n=> %f\n=> %f" %
(variable.Output_output[0], variable.Output_output[1], variable.Output_output[2],
variable.Output_output[3], variable.Output_output[4], variable.Output_output[5]))
self.outputtextbox.append(self.recog_alpha(variable.Output_output))
def recog_alpha(self, output):
result = []
reco = ""
for item in output:
if(item > 0.99):
result.append(1)
else:
result.append(0)
reco = ""
for i in range(len(variable.expect_output)):
if variable.expect_output[i] == result:
if i == 0:
reco = "This image is A"
elif i == 1:
reco = "This image is B"
elif i == 2:
reco = "This image is C"
elif i == 3:
reco = "This image is D"
elif i == 4:
reco = "This image is E"
elif i == 5:
reco = "This image is F"
elif i == 6:
reco = "This image is G"
if reco.__eq__(""):
return "Cannot recognize this image: This image is not between A - G"
else:
return reco
app = QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
| StarcoderdataPython |
8039189 | """
用python编程:有一个4*4的二维矩阵,请按如下图顺序输出里面的值。
"""
import numpy as np
input = np.array((1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))
input = input.reshape(4,4)
def output_edge(input, output,direction = 0):
if(input.size == 0):
return
if direction == 0:
output.append(input[0,:].tolist())
output_edge(input[1:,:],output,1)
elif direction == 1:
output.append(input[:,input.shape[1]-1].tolist())
output_edge(input[:,:input.shape[1]-1],output,2)
elif direction == 2:
t = input[input.shape[0]-1,:]
t = t[::-1]
output.append(t.tolist())
output_edge(input[:input.shape[0]-1,:],output,3)
elif direction == 3:
t = input[:,0]
t = t[::-1]
output.append(t.tolist())
output_edge(input[:,1:],output,0)
output = []
output_edge(input,output)
print(output)
| StarcoderdataPython |
3253499 | #!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit.auto_suggest import AutoSuggest, Suggestion
from .utils import json_fixer, extract_tokens, yaml_fixer
__author__ = "<NAME>"
__copyright__ = "Copyright 2017-2021, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "PerpetualBeta"
class AutosuggestFromDocumentData(AutoSuggest):
def __init__(self, bottom_toolbar_attributes=None, mode='json', **kwargs):
super(AutosuggestFromDocumentData, self).__init__()
self._token_list = []
self._mode = mode
self._bottom_toolbar_attributes = bottom_toolbar_attributes
def _tokenize(self, dict_data):
self._token_list = []
extract_tokens(dict_data, self._token_list)
return sorted(list(set(self._token_list)))
def get_suggestion(self, buffer, document):
try:
if self._mode == 'json':
dict_data = json_fixer(buffer.text)
else:
dict_data = yaml_fixer(buffer.text)
except Exception:
dict_data = {}
self._token_list = self._tokenize(dict_data)
if self._bottom_toolbar_attributes.get('debug'):
self._bottom_toolbar_attributes['debug'] = 'tokens: {}'.format(self._token_list)
last_word = document.get_word_before_cursor().replace('"', '')
if len(last_word) < 1:
return
for t in self._token_list:
if t.startswith(last_word):
return Suggestion(t[len(last_word):].decode('utf-8'))
| StarcoderdataPython |
3536286 | import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import pybullet as p
from lark import Token
from matplotlib import cm
from sim.grasp.lib import PandaGraspEnv, update_ANGLES, min_ray, max_ray
from spatial.automaton_planning import AutomatonPlanner
from spatial.geometry import Circle, Polygon, PolygonCollection, StaticObject
from spatial.logic import Spatial
class GraspableObject:
def __init__(self, object_id, name, position, orientation, shape_info):
self.id = object_id
self.name = name
self.pos = position
self.ori = orientation
self.shape_info = shape_info
self.angle = 0
def get_shape(self):
if self.shape_info[0] == "circle":
return Circle(self.pos, self.shape_info[1])
if self.shape_info[0] == "rect":
return Polygon(rectangle_around_center(self.pos[:2], self.shape_info[1][0], self.shape_info[1][1])).rotate(self.angle, use_radians=True)
raise ValueError("Unexpected shape info in graspable object!")
def get_static_shape(self):
return StaticObject(PolygonCollection({self.get_shape()}))
def get_positions(obj_list):
pos_arr = []
for go in obj_list:
pos_arr.append(go.pos)
return pos_arr
def get_orientation(obj_list):
ori_arr = []
for go in obj_list:
ori_arr.append(go.ori)
return ori_arr
def get_angles(obj_list):
ang_arr = []
for go in obj_list:
ang_arr.append(go.angles)
return ang_arr
def update_angles(obj_list):
pos_arr = get_positions(obj_list)
ori_arr = get_orientation(obj_list)
ang_arr = update_ANGLES(pos_arr, ori_arr)
for i, angle in enumerate(ang_arr):
obj_list[i].angle = angle
def update_objects(grasp_obj_list, pos_arr, ori_arr, angles_arr):
for i, go in enumerate(grasp_obj_list):
go.pos = pos_arr[i]
go.ori = ori_arr[i]
go.angle = angles_arr[i]
def rectangle_around_center(center: np.ndarray, box_length1: float, box_length2: float) -> np.ndarray:
return np.array(
[center + [-box_length1 / 2, -box_length2 / 2],
center + [box_length1 / 2, -box_length2 / 2],
center + [box_length1 / 2, box_length2 / 2],
center + [-box_length1 / 2, box_length2 / 2]])
def observation(spatial_interpreter, spatial_vars, ap_list):
obs = ''
for var_ap in ap_list:
subtree = spatial_vars[var_ap]
if spatial_interpreter.interpret(subtree) > 0:
obs += '1'
else:
obs += '0'
return obs
def gradient_map(spatial_interpreter, spatial_tree, graspable_object):
positions = np.c_[gx.ravel(), gy.ravel()]
virtual_object = copy.deepcopy(graspable_object)
grad_values = []
# compute values
for position in positions:
# move the virtual_object
virtual_object.pos = position
spatial_interpreter.reset_spatial_dict()
spatial_interpreter.assign_variable(virtual_object.name, virtual_object.get_static_shape())
grad_values.append(spatial.interpret(spatial_tree))
# reset the object position
spatial_interpreter.reset_spatial_dict()
spatial_interpreter.assign_variable(graspable_object.name, graspable_object.get_static_shape())
return grad_values
def get_composite_constraint_map(spatial_interpreter, spat_var_dict, dfa_ap, object_to_move, constraints):
result = []
for constraint in constraints:
constraint_map = gradient_map_from_guard(spatial_interpreter, spat_var_dict, dfa_ap, object_to_move, guard=constraint)
# merge constraint map into composite constraint map
# since we don't want to satisfy any constraint, we simply remember the maximum
if len(result) > 0:
result = np.minimum(constraint_map, result)
else:
result = constraint_map
return result
def gradient_map_from_guard(spatial_interpreter, spat_var_dict, dfa_ap, object_to_move, guard):
result = []
for i, guard_val in enumerate(guard):
# skip don't care variables
if guard_val == 'X':
continue
tree = spat_var_dict[dfa_ap[i]]
gradient_values = gradient_map(spatial_interpreter, tree, object_to_move)
# if the guard has the variable as negative, flip the gradient map
if guard_val == '0':
gradient_values = [-1 * x for x in gradient_values]
# merge results into the constraint_map (by logical conjunction)
if len(result) > 0:
result = np.minimum(result, gradient_values)
else:
result = gradient_values
return result
def remove_obstacle_from_gradient(gradient, obj):
positions = np.c_[gx.ravel(), gy.ravel()]
for i, position in enumerate(positions):
if obj.contains_point(position):
gradient[i] = np.nan
return gradient
def find_best_point(map_2d, threshold):
boolean_table = map_2d > threshold
# forbid all positions that are constrained
for i in range(map_2d.shape[0]):
for j in range(map_2d.shape[1]):
if np.isnan(map_2d[i][j]):
boolean_table[i, j] = False
# forbid all positions that are to close or too far from the grasping arm
for i in range(map_2d.shape[0]):
for j in range(map_2d.shape[1]):
dist_from_origin = rx[j] ** 2 + ry[i] ** 2
if dist_from_origin < min_ray ** 2 or dist_from_origin > max_ray ** 2:
boolean_table[i, j] = False
# copy the gradient, mask the values
masked_map_2d = np.array(map_2d, copy=True)
for i in range(masked_map_2d.shape[0]):
for j in range(masked_map_2d.shape[1]):
if not boolean_table[i, j]:
masked_map_2d[i, j] = np.nan
if not np.any(masked_map_2d > 0):
return None
result = np.where(masked_map_2d == np.nanmax(masked_map_2d))
# zip the 2 arrays to get the exact coordinates
list_of_coordinates = list(zip(result[0], result[1]))
id_x = list_of_coordinates[0][0]
id_y = list_of_coordinates[0][1]
return np.array([rx[id_y], ry[id_x], 0])
def get_relevant_objects(targets, dfa_ap, spat_vars):
relv_objs = set()
for trgt in targets:
for i, bit in enumerate(trgt):
# skip don't care bits
if bit == 'X':
continue
# otherwise, it's relevant, so we get the subtree
subtree = spat_vars[dfa_ap[i]]
# get all leaves that correspond to a variable
for token in subtree.scan_values(lambda x: isinstance(x, Token)):
if token.type == 'NAME':
relv_objs.add(token.value)
# only these objects can be picked up
pickable = ["bottle", "banana", "mug", "gelatin", "sugarbox", "can", "crackerbox", "kanelbulle"]
return relv_objs.intersection(pickable)
if __name__ == '__main__':
# spatial interpreter
spatial = Spatial(quantitative=True)
# automaton-based planner
planner = AutomatonPlanner()
# pybullet grasping simulation
sim = PandaGraspEnv(1)
# specification
spec = "(F(kanelbulle enclosedin plate))"
spec += "& (F((banana dist plate <= 0.3) & (banana dist plate >= 0.1) & (banana leftof plate) & (banana below plate)))"
spec += "& (F((mug dist plate <= 0.3) & (mug dist plate >= 0.1) & (mug leftof plate) & (mug above plate)))"
spec += "& (F((bottle dist plate <= 0.3) & (bottle dist plate >= 0.1) & (bottle leftof plate) & (bottle above plate)))"
spec += "& (F((sugarbox dist plate >= 0.4) & (sugarbox dist crackerbox <= 0.2)))"
spec_tree = spatial.parse(spec) # build the spatial tree
planner.tree_to_dfa(spec_tree) # transform the tree into an automaton
# print the corresponding LTLf formula
print("\nTemporal Structure:", planner.temporal_formula)
print("Planner DFA Size:", len(planner.dfa.nodes), len(planner.dfa.edges), "\n")
# parameters
step_size = 0.1
x_range = [-max_ray, max_ray]
y_range = [-max_ray, max_ray]
# gradient grid mesh
rx, ry = np.arange(x_range[0], x_range[1], step_size), np.arange(y_range[0], y_range[1], step_size)
gx, gy = np.meshgrid(rx, ry)
# statistics
counter_fallen = 0 # number of times an object has fallen
counter_out = 0 # number of times an object is out of the reachable region
action_fail = 0 # number of fails in reaching the target position
counter_actions = 0 # number of actions performed
# object initialization
h = sim.get_table_height() + 0.08
graspable_objects = [
GraspableObject(object_id=0,
name='bottle',
position=np.array([-0.15, 0.5, h]),
orientation=p.getQuaternionFromEuler([math.pi / 2, 0, 0]),
shape_info=['circle', 0.024]),
GraspableObject(object_id=1,
name='banana',
position=np.array([0.45, 0.23, h]),
orientation=p.getQuaternionFromEuler([0, 0, 0]),
shape_info=['rect', (0.05, 0.015)]),
GraspableObject(object_id=2,
name='mug',
position=np.array([0.45, -0.2, h]),
orientation=p.getQuaternionFromEuler([0, 0, 0]),
shape_info=['circle', 0.03]),
GraspableObject(object_id=3,
name='gelatin',
position=np.array([0.05, -0.55, h - 0.05]),
orientation=p.getQuaternionFromEuler([math.pi / 2, +math.pi / 2 + math.pi / 14, math.pi / 2]),
shape_info=['rect', (0.05, 0.025)]),
GraspableObject(object_id=4,
name='sugarbox',
position=np.array([0.3, 0.4, h]),
orientation=p.getQuaternionFromEuler([0, 0, math.pi / 2]),
shape_info=['rect', (0.05, 0.025)]),
GraspableObject(object_id=5,
name='can',
position=np.array([-0.1, -0.45, h]),
orientation=p.getQuaternionFromEuler([0, 0, 0]),
shape_info=['circle', 0.03]),
GraspableObject(object_id=6,
name='crackerbox',
position=np.array([0., -0.45, h]),
orientation=p.getQuaternionFromEuler([0, 0, math.pi / 2]),
shape_info=['rect', (0.05, 0.025)]),
GraspableObject(object_id=7,
name='kanelbulle',
position=np.array([0.2, 0.55, h - .01]),
orientation=p.getQuaternionFromEuler([0, 0, 0]),
shape_info=['circle', 0.03]),
GraspableObject(object_id=8,
name='plate',
position=np.array([0.4, 0., h]),
orientation=p.getQuaternionFromEuler([0, 0, 0]),
shape_info=['circle', 0.05])
]
# object initialization - angles
update_angles(graspable_objects)
# object initialization - spatial variables
for grasp_obj in graspable_objects:
spatial.assign_variable(grasp_obj.name, grasp_obj.get_static_shape())
# this dictionary contains a variable name to spatial tree mapping
spatial_variables = planner.get_variable_to_tree_dict()
# you have to define in which order you pass variable assignments
trace_ap = list(spatial_variables.keys())
# resets the automaton current state to the initial state (doesn't do anything here)
planner.reset_state()
# before you ask anything from the automaton, provide a initial observation of each spatial sub-formula
init_obs = observation(spatial, spatial_variables, trace_ap)
planner.dfa_step(init_obs, trace_ap)
# load robot and object in the chosen pose
print("Initializing Simulation... \n")
sim.reset(get_positions(graspable_objects), get_orientation(graspable_objects), "../urdf")
# planning loop
while not planner.currently_accepting():
target_set, constraint_set, edge = planner.plan_step()
print("Considering", target_set, "with constraints", constraint_set, "...")
# no path to accepting state exists, we're doomed
if not target_set:
print("It is impossible to satisfy the specification. Exiting planning loop...\n")
break
target_obj_id = None
target_point = None
# get all objects relevant to the current targets
relevant_objects = get_relevant_objects(target_set, planner.get_dfa_ap(), spatial_variables)
for obj_name in relevant_objects:
print("Considering", obj_name, "...")
# we already found a candidate and can directly execute
if target_obj_id is not None:
break
# obtain the object information from the name
relevant_obj = next(x for x in graspable_objects if x.name == obj_name)
# compute composite constraint map
composite_constraint_map = []
if constraint_set:
composite_constraint_map = get_composite_constraint_map(spatial_interpreter=spatial,
spat_var_dict=spatial_variables,
dfa_ap=planner.get_dfa_ap(),
object_to_move=relevant_obj,
constraints=constraint_set)
# try out all target options
for target in target_set:
# get target map
target_map = gradient_map_from_guard(spatial_interpreter=spatial,
spat_var_dict=spatial_variables,
dfa_ap=planner.get_dfa_ap(),
object_to_move=relevant_obj,
guard=target)
# remove the composite constraint from the map
if constraint_set:
assert len(target_map) == len(composite_constraint_map)
for v in range(len(target_map)):
if composite_constraint_map[v] > 0:
target_map[v] = np.nan
# remove the objects from the map, but skip the plate (so we can put sth into the plate)
# and skip the object we are moving
inflated_shape = Circle(relevant_obj.pos, 0.08)
for grasp_obj in graspable_objects:
if grasp_obj.name != "plate" or grasp_obj.name == relevant_obj.name:
target_map = remove_obstacle_from_gradient(target_map, grasp_obj.get_shape() - inflated_shape)
# find the best point for the object
target_point = find_best_point(np.array(target_map).reshape(gx.shape), threshold=0)
# check if any positive values exist
if target_point is not None:
# this object does it
target_obj_id = relevant_obj.id
# plot gradient values
fig = plt.figure()
ax = fig.add_subplot(111)
values_2d = np.array(target_map).reshape(gx.shape)
granularity = 0.05
con = ax.contourf(gx, gy, values_2d,
levels=np.arange(np.nanmin(values_2d) - granularity, np.nanmax(values_2d) + granularity, granularity),
cmap=cm.coolwarm,
alpha=0.3,
antialiased=False)
# plot objects
for grasp_obj in graspable_objects:
grasp_obj.get_shape().plot(ax, label=False, color='r')
# plot target point
plt.plot(target_point[0], target_point[1], "ok")
plt.autoscale()
plt.colorbar(con)
plt.show()
break
# this edge is completely impossible in this framework, we prune the edge from the automaton
if target_obj_id is None:
print("Chosen edge turned out to be impossible. Pruning the edge...\n")
planner.dfa.remove_edge(edge[0], edge[1])
else:
# chose one of the target points and execute the grasp
pos, ori, ang = sim.grasp(target_obj_id, target_point)
# update position, orientation and angles
update_objects(graspable_objects, pos, ori, ang)
# update spatial variables
for grasp_obj in graspable_objects:
spatial.assign_variable(grasp_obj.name, grasp_obj.get_static_shape())
# update automaton
init_obs = observation(spatial, spatial_variables, trace_ap)
planner.dfa_step(init_obs, trace_ap)
print("Algorithm terminated. Specification satisfied:", planner.currently_accepting())
# we are done, close the simulator
sim.close()
| StarcoderdataPython |
1725747 | <reponame>hyperpower/CarpioPlus<filename>examples/4-4-1-reconstruct_eclipse/plot.py
import matplotlib
import matplotlib.pyplot as plt
import os, sys
import numpy as np
import string
import math
import operator
import multiprocessing
from multiprocessing import Pool
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.size'] = 12
PATH_CASES = os.path.abspath(os.path.join(__file__, "../.."))
PATH_THIS = os.path.abspath(os.path.join(__file__, "../"))
PATH_DATA = os.path.abspath(os.path.join(PATH_THIS, "data"))
PATH_FIG = os.path.abspath(os.path.join(PATH_THIS, "fig"))
PATH_PROJECT = os.path.abspath(os.path.join(PATH_CASES, "../"))
PATH_PYTOOLS = os.path.abspath(os.path.join(PATH_CASES, "pytools"))
sys.path.append(PATH_PYTOOLS)
import filetool as FT
def file_name(namedir, namevar):
res = []
files = [f for f in os.listdir(namedir) if os.path.isfile(os.path.join(namedir, f))]
for f in files:
spf = f.split("_")
if spf[0] == namevar:
res.append(spf)
return res
def split(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def plot_one(strstep, strtime):
print("Draw : ", strstep, " ", strtime)
plt.figure(figsize=(6, 4))
"""
Set labels
"""
plt.xlabel(r'x')
plt.ylabel(r'$\phi$')
"""
Set range
"""
x_st = -10
x_ed = 100
y_st = -0.5
y_ed = 1.5
plt.xlim([x_st, x_ed])
plt.ylim([y_st, y_ed])
#plt.xscale('log')
#plt.yscale('log')
"""
Data part
"""
fne = PATH_DATA + "/phi_" + strstep + "_" + strtime + ".txt"
pe = FT.PointData(fne)
arrx = pe.get_coo_x()
arre = pe.get_arr_val()
arrv = []
plt.plot(arrx, arre)
plt.text(10, 1.25, "Time = "+ "%.2f" % float(strtime))
plt.text(10, 1.00, "Step = "+ "%04d" % float(strstep))
# plt.legend(llg, scheme, loc= 'upper right')
plt.grid(True)
#plt.axes().set_aspect('equal')
plt.tight_layout()
plt.savefig(PATH_FIG + "/comp_" + "%06d" % int(strstep) +".png")
plt.close()
# plt.show()
def read_error_file(scheme):
filename = PATH_RESULT + "/error_" + scheme
f = open(filename, 'r')
content=[]
for i, line in enumerate(f):
line = line.strip()
line = line.split()
step = float(line[1])
t = float(line[3])
e1 = float(line[5])
e2 = float(line[7])
ei = float(line[9])
content.append([step, t, e1, e2, ei])
return content
def plot_error1():
plt.figure(figsize=(4, 4))
"""
Set labels
"""
plt.ylabel(r'Error$_1$')
plt.xlabel(r'Step')
"""
Set range
"""
x_st = 0
x_ed = 200
y_st = -0.5
y_ed = 1.5
#plt.xlim([x_st, x_ed])
#plt.ylim([y_st, y_ed])
#plt.xscale('log')
#plt.yscale('log')
"""
Data part
"""
scheme = ["upwind1", "center", "center4"]
mat = []
for s in scheme:
m = read_error_file(s)
mat.append(m)
llg = []
for i, v in enumerate(scheme):
arrx = _col(mat[i], 0)
arre1 = _col(mat[i], 2)
arre2 = _col(mat[i], 3)
arrei = _col(mat[i], 4)
lg, = plt.plot(arrx, arre1)
llg.append(lg)
#plt.text(10, 1.25, "Time = "+ "%.2f" % float(strtime))
#plt.text(10, 1.00, "Step = "+ "%04d" % float(strstep))
plt.legend(llg, scheme, loc= 'upper left')
plt.grid(True)
#plt.axes().set_aspect('equal')
plt.tight_layout()
plt.savefig("error_1.png")
plt.close()
# plt.show()
def plot_error2():
plt.figure(figsize=(4, 4))
"""
Set labels
"""
plt.ylabel(r'Error$_2$')
plt.xlabel(r'Step')
"""
Set range
"""
x_st = 0
x_ed = 200
y_st = -0.5
y_ed = 1.5
#plt.xlim([x_st, x_ed])
#plt.ylim([y_st, y_ed])
#plt.xscale('log')
#plt.yscale('log')
"""
Data part
"""
scheme = ["upwind1", "center", "center4"]
mat = []
for s in scheme:
m = read_error_file(s)
mat.append(m)
llg = []
for i, v in enumerate(scheme):
arrx = _col(mat[i], 0)
arre1 = _col(mat[i], 2)
arre2 = _col(mat[i], 3)
arrei = _col(mat[i], 4)
lg, = plt.plot(arrx, arre2)
llg.append(lg)
#plt.text(10, 1.25, "Time = "+ "%.2f" % float(strtime))
#plt.text(10, 1.00, "Step = "+ "%04d" % float(strstep))
plt.legend(llg, scheme, loc= 'upper left')
plt.grid(True)
#plt.axes().set_aspect('equal')
plt.tight_layout()
plt.savefig("error_2.png")
plt.close()
def plot_errori():
plt.figure(figsize=(4, 4))
"""
Set labels
"""
plt.ylabel(r'Error$_{\inf}$')
plt.xlabel(r'Step')
"""
Set range
"""
x_st = 0
x_ed = 200
y_st = -0.5
y_ed = 1.5
#plt.xlim([x_st, x_ed])
#plt.ylim([y_st, y_ed])
#plt.xscale('log')
#plt.yscale('log')
"""
Data part
"""
scheme = ["upwind1", "center", "center4"]
mat = []
for s in scheme:
m = read_error_file(s)
mat.append(m)
llg = []
for i, v in enumerate(scheme):
arrx = _col(mat[i], 0)
arre1 = _col(mat[i], 2)
arre2 = _col(mat[i], 3)
arrei = _col(mat[i], 4)
lg, = plt.plot(arrx, arrei)
llg.append(lg)
#plt.text(10, 1.25, "Time = "+ "%.2f" % float(strtime))
#plt.text(10, 1.00, "Step = "+ "%04d" % float(strstep))
plt.legend(llg, scheme, loc= 'upper left')
plt.grid(True)
#plt.axes().set_aspect('equal')
plt.tight_layout()
plt.savefig("error_i.png")
plt.close()
def plot_all():
matfu = file_name(PATH_RESULT, "exact")
print(len(matfu))
matfc = []
for one in matfu:
matfc.append(one)
#multiprocessing.freeze_support()
pool = multiprocessing.Pool()
cpus = multiprocessing.cpu_count() / 2
results = []
cmatfs = split(matfc, cpus)
print("Thread num : ", len(cmatfs))
for i in xrange(0, cpus):
mat = cmatfs[i]
for one in mat:
result = pool.apply_async(plot_one, args=(one[1], one[2],))
results.append(result)
pool.close()
pool.join()
os.system("convert -delay 5 -loop 0 ./fig/comp_*.png comp.gif")
def mp():
lists=[1,2,3]
pool=Pool(processes=10) # the max processes number
for i in range(0, 50):
pool.apply_async(test,args=(i,))
pool.close()
pool.join()
def plot_illustration_fig():
plt.figure(figsize=(6, 4))
"""
Set labels
"""
plt.xlabel(r'x')
plt.ylabel(r'$\phi$')
"""
Set range
"""
x_st = 0
x_ed = 2
y_st = -0.25
y_ed = 1.25
plt.xlim([x_st, x_ed])
plt.ylim([y_st, y_ed])
#plt.xscale('log')
#plt.yscale('log')
"""
Data part
"""
arrx_start = [x_st, 0.25, 0.25, 0.75, 0.75, x_ed]
arr_v = [0.0, 0.0, 1.0, 1.0 , 0.0, 0.0]
arrx_end = [x_st, 1.25, 1.25, 1.75, 1.75, x_ed]
plt.plot(arrx_end , arr_v, linewidth=2.0)
plt.plot(arrx_start, arr_v, linewidth=3.0)
plt.text(0.25, 1.05, "Time = "+ "%.2f" % float(0.0), fontsize = 'large')
plt.text(1.25, 1.05, "Time = "+ "%.2f" % float(1.0), fontsize = 'large')
plt.annotate('Initial profile',
xy=(0.6, 0.75), xytext=(0.85, 0.85), fontsize = 'large',
arrowprops = dict(facecolor='black', shrink = 0.05),
)
plt.annotate('Advected profile',
xy=(1.75, 0.35), xytext=(0.9, 0.2), fontsize = 'large',
arrowprops = dict(facecolor='black', shrink = 0.05),
)
plt.arrow(0.4, 0.5, 0.2, 0.0, width=0.01, color = "k")
plt.text(0.42, 0.55, "u = 1.0", fontsize = 'large')
# plt.legend(llg, scheme, loc= 'upper right')
plt.grid(True)
#plt.axes().set_aspect('equal')
plt.tight_layout()
plt.savefig(PATH_FIG + "/illustration.png")
plt.close()
# plt.show()
def main():
plot_illustration_fig()
# stri = "4"
# strt = "4.0000e-02"
# plot_one(stri, strt)
#plot_all()
#plot_error1()
#plot_error2()
#plot_errori()
if __name__ == '__main__':
main()
| StarcoderdataPython |
121117 | <reponame>guci314/Chatbot_CN<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
"""
Get the table of actor_to_movie and movie_to_genre.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import pymysql
from pymysql import connections
import numpy as np
import re
class connec_mysql(object):
def __init__(self):
self.conn = pymysql.connect(
host='localhost',
user='root',
passwd='<PASSWORD>',
db='hudong_baike',
charset='utf8mb4',
use_unicode=True
)
self.cursor = self.conn.cursor()
def process_act_movie(self):
actor_movie_id = 0
self.cursor.execute("SELECT MAX(actor_id) FROM actor")
max_actor_id = self.cursor.fetchall()[0][0]
assert isinstance(max_actor_id, int)
for actor_id in range(1, max_actor_id + 1):
self.cursor.execute("SELECT actor_repworks FROM actor WHERE actor_id = {};".format(actor_id))
result = self.cursor.fetchall()
assert np.shape(result) == (1, 1) # if didn't exist, return (0, )
repworks = re.split(u"[,/、 ]", result[0][0] )
try:
assert len(repworks) > 0
for repwork in repworks:
repwork = repwork.strip(u" 《》")
self.cursor.execute("SELECT movie_id FROM movie WHERE movie_chName = %s", repwork)
check_movie_id = self.cursor.fetchall()
if len(check_movie_id) != 0:
self.cursor.execute("INSERT INTO actor_to_movie (actor_movie_id, actor_id, movie_id) VALUES (%s, %s, %s)", (actor_movie_id, actor_id, check_movie_id[0][0]) )
self.conn.commit()
actor_movie_id += 1
except Exception as e:
print("Get a error with ", e, "Maybe this actor has no represent works")
continue
def process_movie_gen(self):
movie_gen_id = 0
self.cursor.execute("SELECT MAX(movie_id) FROM movie")
max_movie_id = self.cursor.fetchall()[0][0]
assert isinstance(max_movie_id, int)
for movie_id in range(1, max_movie_id + 1):
# for movie_id in range(1, 1 + 10):
self.cursor.execute("SELECT movie_genre FROM movie WHERE movie_id = {};".format(movie_id))
result = self.cursor.fetchall()
if np.shape(result) != (1, 1):
continue
movie_genres = re.split(u"[,/、 ]", result[0][0] )
# print("movie_genres: ", movie_genres)
try:
assert len(movie_genres) > 0
for movie_genre in movie_genres:
self.cursor.execute("SELECT genre_id FROM genre WHERE genre_name = %s", movie_genre)
check_genre_id = self.cursor.fetchall()
if len(check_genre_id) != 0:
self.cursor.execute("INSERT INTO movie_to_genre (movie_genre_id, movie_id, genre_id) VALUES (%s, %s, %s)", (movie_gen_id, movie_id, check_genre_id[0][0]) )
self.conn.commit()
movie_gen_id += 1
except Exception as e:
print("Get a error with ", e)
continue
if __name__ == '__main__':
connec = connec_mysql()
# connec.process_act_movie()
connec.process_movie_gen()
| StarcoderdataPython |
1788531 | <filename>p811-subdomain-visit-count.py<gh_stars>0
# A website domain like "discuss.leetcode.com" consists of various subdomains.
# At the top level, we have "com", at the next level, we have "leetcode.com",
# and at the lowest level, "discuss.leetcode.com". When we visit a domain like
# "discuss.leetcode.com", we will also visit the parent domains "leetcode.com"
# and "com" implicitly.
# Now, call a "count-paired domain" to be a count (representing the number of
# visits this domain received), followed by a space, followed by the address.
# An example of a count-paired domain might be "9001 discuss.leetcode.com".
# We are given a list cpdomains of count-paired domains. We would like a list
# of count-paired domains, (in the same format as the input, and in any order),
# that explicitly counts the number of visits to each subdomain.
class Solution:
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
from collections import defaultdict
visit = defaultdict(int)
for cp in cpdomains:
c, p = cp.split()
ps = p.split(".")
for pd in [".".join(ps[i:]) for i in range(len(ps))]:
visit[pd] += int(c)
return ["{} {}".format(c, d) for d, c in visit.items()]
sol = Solution().subdomainVisits
print(sol(["9001 discuss.leetcode.com"]))
print(sol(["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]))
| StarcoderdataPython |
3424944 | <gh_stars>1-10
from django.db import models
from .models import *
from django import forms
class setPriceCurrencyForm(forms.ModelForm):
kwh_price = forms.FloatField(widget=forms.NumberInput(attrs={'placeholder': 'Insert price'}))
currency_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Insert currency name'}))
currency_abbr = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Insert abbreviation or symbol'}))
class Meta:
model = Price
fields = ("kwh_price", "currency_name", "currency_abbr" )
def __init__(self, *args, **kwargs):
super(setPriceCurrencyForm, self).__init__(*args, **kwargs)
| StarcoderdataPython |
3272554 | #!/usr/bin/python
# Gets stuff from Pitchfork's awesome 'Best New Music' section
# Note: this is just regexing the HTML... so may stop working if page structure changes
# This is not written by, or affiliated with Pitchfork at all
import urllib2
import re
def GetPitchforkData():
# grab all the data...
web_page = urllib2.urlopen('http://www.pitchfork.com/best')
all_the_html = web_page.read()
# parse for Best New stuff...
demarcator = 'bnm-hub-features-1' # surrounds first row of best new things
relevant_block = re.findall(demarcator+'(.*?)'+demarcator, all_the_html)[0]
return relevant_block
def BestNewArtist():
relevant_block = GetPitchforkData()
relevant_artist = re.findall('h1>'+'(.*?)'+'</h1', relevant_block)
return relevant_artist[0]
def BestNewAlbum():
relevant_block = GetPitchforkData()
relevant_album = re.findall('h2>'+'(.*?)'+'</h2', relevant_block)
return relevant_album[0]
# Use this to test; it should print artist and album on command line
def main():
test_artist = BestNewArtist()
test_album = BestNewAlbum()
print '\n>>>>>>>>>>>>>>>>>\n'
print test_artist
print test_album
print '\n>>>>>>>>>>>>>>>>>\n'
if __name__ == '__main__':
main()
| StarcoderdataPython |
1720496 | <filename>test/test_run.py<gh_stars>1-10
from copy2hash import copy2hash
from pathlib import Path
__refargs__ = {
"infile": [],
"report": ["json"],
"report_name": "copy_report",
"sha": ["sha256"],
"directory": None,
"move": False,
"file_extension": False,
"file_suffix": False,
"no_file_extension": False,
"verbose": False,
"version": False,
}
class TestParser(object):
def test_parser(self):
assert __refargs__ == copy2hash.get_args(opt=__refargs__)
def test_parser_txt(self):
fnames = Path("test").glob("*.txt")
args = __refargs__
args["infile"] = fnames
assert args == copy2hash.get_args(opt=args)
def test_parser_all(self):
fnames = Path("test").glob("*")
args = __refargs__
args["infile"] = fnames
assert args == copy2hash.get_args(opt=args)
def test_parser_version(self):
fnames = Path("test").glob("*")
args = __refargs__
args["infile"] = fnames
assert args == copy2hash.get_args(opt=args)
class TestCommandLine(object):
def test_nofiles(self):
args = __refargs__
args["infile"] = []
copy2hash.command_line_runner(opt=args)
assert 1
def test_local_directory(self):
args = __refargs__
args["infile"] = ["."]
copy2hash.command_line_runner(opt=args)
assert 1
| StarcoderdataPython |
6576706 | <filename>backend/app/routing.py
# notify/routing.py
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'notify/(?P<username>\w+)/$', consumers.NotifyConsumer.as_asgi()),
]
| StarcoderdataPython |
6535149 | from . import dataset
from decoder import int_type
import os.path
import re
class _CsvDataIterCallable:
def __init__(self, file_path, converters):
self._file_path = file_path
self._converters = converters
def __call__(self):
return _CsvDataIterator(self._file_path, self._converters)
class _CsvDataIterator:
def __init__(self, file_path, converters):
self._fp = open(file_path)
next(self._fp)
next(self._fp)
self._converters = converters
def __iter__(self):
return self
def __next__(self):
raw_fields = next(self._fp).split(",")
return [converter(x) for converter, x in zip(self._converters, raw_fields)]
def _all_csv_files():
current_dir = os.path.dirname(__file__)
ret = []
for (dirpath, dirnames, filenames) in os.walk(current_dir):
rel_dirpath = os.path.relpath(dirpath, current_dir)
for filename in filenames:
if filename.endswith(".csv"):
file_path = os.path.join(dirpath, filename)
rel_file_path = os.path.join(rel_dirpath, filename)
ret.append((rel_file_path, file_path))
ret.sort()
return ret
def _parse_type(t):
"""Return tuple of converter function and converted type"""
match = re.match(r"(?:f(\d+)\()?([us]\d\d?)\)?", t)
if not match:
raise ValueError("Type " + t + " doesn't match the regex")
if match[1] is None:
return int, int_type.IntType.from_string(match[2])
else:
multiplier = float(match[1])
def converter(s):
return round(float(s) * multiplier)
return converter, int_type.IntType.from_string(match[2])
def _open_dataset(csv_path, csv_name):
with open(csv_path, "r") as fp:
field_names = [x.strip() for x in next(fp).split(",")]
field_types = []
converters = []
for t in [x.strip() for x in next(fp).split(",")]:
converter, converted_t = _parse_type(t)
field_types.append(converted_t)
converters.append(converter)
iter_callable = _CsvDataIterCallable(csv_path, converters)
return dataset.Dataset(
csv_name,
field_names,
field_types,
iter_callable,
None, # Unknown length
)
def all_datasets():
for csv_name, csv_path in _all_csv_files():
yield _open_dataset(csv_path, csv_name)
if __name__ == "__main__":
print("All datasets")
dataset.show_content(all_datasets())
| StarcoderdataPython |
6645812 | # coding=utf-8
# given a wav and start and end, shwo the spectrum graph
import sys,os
from ChunWai import *
#from audiolab import wavread
#from audiolab import wavwrite
#from scikits.samplerate import resample # we are not using this
import numpy as np
import matplotlib
matplotlib.use('agg')
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
def usage():
print """
-wav
-start
-end
-pic : if set , will save pic
"""
sys.exit()
if __name__ == "__main__":
wav,start,end,pic = resolveParam(['-wav','-start','-end','-pic'])
if(cp([wav])):
usage()
mindist = int(0.2/0.008) # shortest second a gun fire in
#s,fs,enc = wavread(wav)
#print np.max(s),np.min(s),fs
fs,s = wavfile.read(wav)
s = s/(2.**15)
#print np.max(s),np.min(s),fs
#sys.exit()
#s = resample(s,16000.0/fs,'sinc_best') # no this, sox in.wav -r 16000 out.wav first
# or use ffmpeg -ar 16000 directly
if(fs != 16000):
error("wav sample rate is not 16k")
if(len(s.shape)>1):#already mono?
s = np.mean(s,axis=1)#into mono
spectrum = stft(s,16000,0.016,0.008) # 0.010 hop is no good
spectrum.astype('complex64')
spectrum = np.absolute(spectrum)
print spectrum.shape
if(start != ""):
start = float(start)
startIndex = int(round(start/0.008))
else:
startIndex = 0
if(end != ""):
end = float(end)
endIndex = int(round(end/0.008))
else:
endIndex = -1
#plt.matshow(spectrum[startIndex:endIndex,:].T,origin="lower")
#plt.colorbar()
#plt.show()
powerM = np.sum(spectrum[:,50:],axis=1) # empircally filtered out lower frequency power
# leave the low freq and the high freq
#powerM = np.sum(np.hstack((spectrum[:,0:0],spectrum[:,60:])),axis=1)
print powerM.shape
#plt.plot(powerM[startIndex:endIndex])
#plt.show()
f,axarr = plt.subplots(2,sharex=True)
from countGunshot import countGunshot,countGunshot2
indexes = countGunshot(powerM[startIndex:endIndex], thres=0.6, min_dist=mindist)
#print indexes #[110 356 470 554 616 661 703 730]
#indexes = countGunshot2(powerM[startIndex:endIndex])
# find peaks for 1-D power array
# useless
#indexes = signal.find_peaks_cwt(
# powerM[startIndex:endIndex],
# np.arange(1,10)
#)
axarr[0].scatter(indexes,powerM[startIndex:endIndex][indexes],marker='o',color="red")
axarr[0].plot(powerM[startIndex:endIndex])
axarr[1].matshow(spectrum[startIndex:endIndex,:].T,origin="lower")
#plt.xlabel("Predicted %s gunshots"%(len(indexes)))
#plt.ylabel("Sound Power Wave")
plt.title("Predicted %s gunshots"%(len(indexes)))
#plt.xlim(xmin=0,xmax=powerM[startIndex:endIndex].shape[0])
plt.tick_params(labeltop="off",labelbottom="off",labelleft="off",axis="both")
if(pic != ""):
plt.savefig(pic,bbox_inches="tight")
else:
plt.show()
| StarcoderdataPython |
12808612 | from functools import partial
from typing import NamedTuple, List
from unittest.mock import patch
import pytest
from pkgdev.scripts import run
from snakeoil.contexts import chdir, os_environ
class Profile(NamedTuple):
"""Profile record used to create profiles in a repository."""
path: str
arch: str
status: str = 'stable'
deprecated: bool = False
defaults: List[str] = None
eapi: str = '5'
class TestPkgdevShowkwParseArgs:
def test_missing_target(self, capsys, tool):
with pytest.raises(SystemExit):
tool.parse_args(['showkw'])
captured = capsys.readouterr()
assert captured.err.strip() == (
'pkgdev showkw: error: missing target argument and not in a supported repo')
def test_unknown_arches(self, capsys, tool, make_repo):
repo = make_repo(arches=['amd64'])
with pytest.raises(SystemExit):
tool.parse_args(['showkw', '-a', 'unknown', '-r', repo.location])
captured = capsys.readouterr()
assert captured.err.strip() == (
"pkgdev showkw: error: unknown arch: 'unknown' (choices: amd64)")
class TestPkgdevShowkw:
script = partial(run, 'pkgdev')
def _create_repo(self, make_repo):
repo = make_repo(arches=['amd64', 'ia64', 'mips', 'x86'])
repo.create_profiles([
Profile('default/linux/amd64', 'amd64'),
Profile('default/linux/x86', 'x86'),
Profile('default/linux/ia64', 'ia64', 'dev'),
Profile('default/linux/mips', 'mips', 'exp'),
])
return repo
def _run_and_parse(self, capsys, *args):
with patch('sys.argv', ['pkgdev', 'showkw', "--format", "presto", *args]), \
pytest.raises(SystemExit) as excinfo:
self.script()
assert excinfo.value.code == None
out, err = capsys.readouterr()
assert not err
lines = out.split('\n')
table_columns = [s.strip() for s in lines[1].split('|')][1:]
return {
ver: dict(zip(table_columns, values))
for ver, *values in map(lambda s: map(str.strip, s.split('|')), lines[3:-1])
}
def test_match(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0')
with patch('sys.argv', ['pkgdev', 'showkw', '-r', repo.location, 'foo/bar']), \
pytest.raises(SystemExit) as excinfo:
self.script()
assert excinfo.value.code == None
out, err = capsys.readouterr()
assert not err
assert out.split('\n')[0] == "keywords for foo/bar:"
def test_match_short_name(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0')
with patch('sys.argv', ['pkgdev', 'showkw', '-r', repo.location, 'bar']), \
pytest.raises(SystemExit) as excinfo:
self.script()
assert excinfo.value.code == None
out, err = capsys.readouterr()
assert not err
assert out.split('\n')[0] == "keywords for foo/bar:"
def test_match_cwd_repo(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0')
with patch('sys.argv', ['pkgdev', 'showkw', 'foo/bar']), \
pytest.raises(SystemExit) as excinfo, \
chdir(repo.location):
self.script()
assert excinfo.value.code == None
out, err = capsys.readouterr()
assert not err
assert out.split('\n')[0] == "keywords for foo/bar:"
def test_match_cwd_pkg(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0')
with patch('sys.argv', ['pkgdev', 'showkw']), \
pytest.raises(SystemExit) as excinfo, \
chdir(repo.location + '/foo/bar'):
self.script()
assert excinfo.value.code == None
_, err = capsys.readouterr()
assert not err
def test_no_matches(self, capsys, make_repo):
repo = self._create_repo(make_repo)
with patch('sys.argv', ['pkgdev', 'showkw', '-r', repo.location, 'foo/bar']), \
pytest.raises(SystemExit) as excinfo:
self.script()
assert excinfo.value.code == 1
out, err = capsys.readouterr()
assert not out
assert err.strip() == "pkgdev showkw: no matches for 'foo/bar'"
def test_match_stable(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('~amd64', '~ia64', '~mips', 'x86'))
res = self._run_and_parse(capsys, '-r', repo.location, 'foo/bar', '--stable')
assert set(res.keys()) == {'0'}
assert {'amd64', 'ia64', 'mips', 'x86'} & res['0'].keys() == {'amd64', 'x86'}
def test_match_unstable(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('~amd64', '~ia64', '~mips', 'x86'))
res = self._run_and_parse(capsys, '-r', repo.location, 'foo/bar', '--unstable')
assert set(res.keys()) == {'0'}
assert {'amd64', 'ia64', 'mips', 'x86'} <= res['0'].keys()
def test_match_specific_arch(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('~amd64', '~ia64', '~mips', 'x86'))
res = self._run_and_parse(capsys, '-r', repo.location, 'foo/bar', '--arch', 'amd64')
assert set(res.keys()) == {'0'}
assert {'amd64', 'ia64', 'mips', 'x86'} & res['0'].keys() == {'amd64'}
def test_match_specific_multiple_arch(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('~amd64', '~ia64', '~mips', 'x86'))
res = self._run_and_parse(capsys, '-r', repo.location, 'foo/bar', '--arch', 'amd64,mips')
assert set(res.keys()) == {'0'}
assert {'amd64', 'ia64', 'mips', 'x86'} & res['0'].keys() == {'amd64', 'mips'}
def test_correct_keywords_status(self, capsys, make_repo):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('amd64', '~ia64', '~mips', 'x86'))
repo.create_ebuild('foo/bar-1', keywords=('~amd64', '-mips', '~x86'))
repo.create_ebuild('foo/bar-2', keywords=('-*', 'amd64', '-x86'), eapi=8, slot=2)
res = self._run_and_parse(capsys, '-r', repo.location, 'foo/bar')
assert set(res.keys()) == {'0', '1', '2'}
assert dict(amd64='+', ia64='~', mips='~', x86='+', slot='0').items() <= res['0'].items()
assert dict(amd64='~', ia64='o', mips='-', x86='~', slot='0').items() <= res['1'].items()
assert dict(amd64='+', ia64='*', mips='*', x86='-', slot='2', eapi='8').items() <= res['2'].items()
@pytest.mark.parametrize(('arg', 'expected'),
(
('--stable', {'amd64', 'x86'}),
('--unstable', {'amd64', 'ia64', 'mips', 'x86'}),
('--only-unstable', {'ia64', 'mips'}),
)
)
def test_collapse(self, capsys, make_repo, arg, expected):
repo = self._create_repo(make_repo)
repo.create_ebuild('foo/bar-0', keywords=('amd64', '~ia64', '~mips', '~x86'))
repo.create_ebuild('foo/bar-1', keywords=('~amd64', '~ia64', '~mips', 'x86'))
with patch('sys.argv', ['pkgdev', 'showkw', '-r', repo.location, 'foo/bar', "--collapse", arg]), \
pytest.raises(SystemExit) as excinfo:
self.script()
out, err = capsys.readouterr()
assert excinfo.value.code == None
assert not err
arches = set(out.split('\n')[0].split())
assert arches == expected
| StarcoderdataPython |
8164813 | ##############################################################################
import pandas as pd
class Handler:
"""This class handles missing data in the dataframe and removes
unninformative columns from it.
"""
@classmethod
def _identify_imputation_method(cls, method):
"""Returns the appropriate imputation function."""
if method=='mean':
return cls._impute_mean_value
elif method=='mode':
return cls._impute_mode_value
elif method=='median':
return cls._impute_median_value
else:
return cls._impute_previous_value
@staticmethod
def _impute_mean_value(column):
"""Fill missing data with the mean of the column."""
mean_val = column.mean()
column.fillna(value=mean_val, inplace=True)
@staticmethod
def _impute_median_value(column):
"""Fill missing data with the median of the column."""
median_val = column.median()
column.fillna(value=median_val, inplace=True)
@staticmethod
def _impute_mode_value(column):
"""Fill missing data with the mode of the column."""
mode_val = column.mode()[0]
column.fillna(value=mode_val, inplace=True)
@staticmethod
def _impute_previous_value(column):
"""Fill missing data with previous values present in the column."""
column.fillna(method='pad', inplace=True)
@classmethod
def impute_missing_values(cls, dataframe, headers, method = None):
"""Impute data for the missing values in the specified columns with
the given method.
"""
_impute_function = cls._identify_imputation_method(method)
for header in headers:
column = dataframe[header]
_impute_function(column)
@staticmethod
def remove_columns(dataframe, headers):
"""Removes unwanted columns in place based on the given list of
headers.
"""
dataframe.drop(headers, inplace=True, axis=1)
@staticmethod
def remove_rows(dataframe, headers, how):
"""Removes rows which has invalid values for all/any columns from the
given header list.
"""
dataframe.dropna(how=how, subset=headers, inplace=True) | StarcoderdataPython |
57910 | import pytest
from faker import Faker
from django.db.utils import IntegrityError
from django.urls import reverse
from .factory import AccountSubTypeFactory
from ..models import AccountSubType, Account
from ..choices import AccountType
fake = Faker()
class TestAccountSubType:
def test_name_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('name')
assert field.verbose_name == 'name'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_type_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('type')
assert field.verbose_name == 'type'
assert field.choices == AccountType.choices
assert field.default == AccountType.Asset
assert field.has_default()
assert field.editable
assert not field.blank
assert not field.null
assert not field.hidden
assert not field.unique
def test_order_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('order')
assert field.verbose_name == 'order'
assert field.default == 0
assert field.has_default()
assert field.editable
assert field.blank
assert field.null
assert not field.hidden
assert not field.unique
def test_name_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
AccountSubType.objects.create(created_by=user, name=None, type=AccountType.Asset, order=0)
def test_type_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
AccountSubType.objects.create(created_by=user, name=fake.name(), type=None, order=0)
def test_order_can_be_null(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=None)
sub_type.save()
query = AccountSubType.objects.all()
saved_obj = query.first()
assert query.count() == 1
assert saved_obj.name == sub_type.name
assert saved_obj.type == sub_type.type
assert not saved_obj.order
def test_create(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
sub_type.save()
query = AccountSubType.objects.all()
saved_obj = query.first()
assert query.count() == 1
assert saved_obj.name == sub_type.name
assert saved_obj.type == sub_type.type
assert saved_obj.order == sub_type.order
def test_count(self, user):
AccountSubTypeFactory(created_by=user)
AccountSubTypeFactory(created_by=user)
query = AccountSubType.objects.all()
assert query.count() == 2
def test_edit(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
sub_type.save()
new_name = 'new name'
sub_type.name = new_name
sub_type.order = 1
sub_type.save()
saved_obj = AccountSubType.objects.first()
assert saved_obj.name == new_name
assert saved_obj.order == 1
def test_str(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert str(sub_type) == f'{sub_type.name}#{sub_type.get_type_display()}#{sub_type.order}'
def test_type_text(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert sub_type.type_text == sub_type.get_type_display()
def test_get_absolute_url(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert sub_type.get_absolute_url() == reverse('account:subtype:detail-update', args=[sub_type.pk])
class TestAccount:
def test_name_field(self, db):
account = Account()
field = account._meta.get_field('name')
assert field.__class__.__name__ == 'CharField'
assert field.verbose_name == 'name'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_code_field(self, db):
account = Account()
field = account._meta.get_field('code')
assert field.__class__.__name__ == 'CharField'
assert field.verbose_name == 'code'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_type_field(self, db):
account = Account()
field = account._meta.get_field('type')
assert field.__class__.__name__ == 'IntegerField'
assert field.verbose_name == 'type'
assert field.editable
assert field.blank
assert field.has_default()
assert field.choices == AccountType.choices
assert field.default == AccountType.Asset
assert not field.null
assert not field.hidden
assert not field.unique
def test_sub_type_field(self, db):
account = Account()
field = account._meta.get_field('sub_type')
assert field.__class__.__name__ == 'ForeignKey'
assert field.verbose_name == 'sub type'
assert field.editable
assert not field.blank
assert not field.has_default()
assert field.default.__name__ == 'NOT_PROVIDED'
assert not field.null
assert not field.hidden
assert not field.unique
def test_depth_field(self, db):
account = Account()
field = account._meta.get_field('depth')
assert field.__class__.__name__ == 'IntegerField'
assert field.verbose_name == 'depth'
assert field.default == 0
assert field.has_default()
assert field.editable
assert field.blank
assert field.null
assert not field.hidden
assert not field.unique
def test_entry_date_field(self):
account = Account()
field = account._meta.get_field('entry_date')
assert field.__class__.__name__ == 'DateField'
assert field.name == 'entry_date'
assert field.verbose_name == 'entry date'
assert field.blank
assert field.null
assert field.default
assert field.has_default()
assert not field.unique
def test_description_field(self, db):
account = Account()
field = account._meta.get_field('description')
assert field.__class__.__name__ == 'TextField'
assert field.verbose_name == 'description'
assert not field.max_length
assert field.editable
assert field.blank
assert field.null
assert not field.default
assert field.has_default()
assert not field.hidden
assert not field.unique
def test_name_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=None, code=fake.random_int(0, 100), type=AccountType.Asset, sub_type=sub_type, created_by=user)
def test_code_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=None, type=AccountType.Asset, sub_type=sub_type, created_by=user)
def test_type_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), type=None, sub_type=sub_type, created_by=user)
def test_sub_type_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), type=AccountType.Liability, sub_type=None, created_by=user)
def test_type_default_asset(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
assert account.type == AccountType.Asset
def test_depth_default_0(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
assert account.depth == 0
def test_str(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert str(account) == f'{account.name}:{account.code}'
def test_str_with_parent(self, user, sub_type):
parent = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
account = Account.objects.create(parent=parent, name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert str(account) == f'{account.name}:{account.code}#{parent.name}:{parent.code}'
def test_type_text(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert account.type_text == account.get_type_display()
def test_sub_type_text(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert account.sub_type_text == account.sub_type.name
def test_create(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
instance = Account.objects.get(pk=account.pk)
assert account.pk == instance.pk
def test_count(self, user, sub_type):
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
count = Account.objects.all()
assert count.count() == 3
def test_edit(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
account.code = str(1234)
account.save()
instance = Account.objects.get(pk=account.pk)
assert instance.code == str(1234)
def test_set_depth(self, user, sub_type):
parent = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
account = Account.objects.create(parent=parent, name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert parent.depth == 0
assert account.depth == 1
| StarcoderdataPython |
6674121 | <filename>main.py
from typing import List
from fastapi import Depends, FastAPI, HTTPException
import time
import sys
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from starlette.requests import Request
from typing import TypeVar, Generic, Type, Any
from xml.etree.ElementTree import fromstring
import xml.etree.cElementTree as ET
from WXBizMsgCrypt import WXBizMsgCrypt
from config import sCorpID,sEncodingAESKey,sToken
import func
# 启动App
app = FastAPI()
# 设置中间件
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# 创建登录会话
wxcpt=WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)
# 以下为接受XML格式数据部分
T = TypeVar("T", bound=BaseModel)
class Item(BaseModel):
ToUserName: str
AgentID: str
Encrypt: str
class XmlBody(Generic[T]):
def __init__(self, model_class: Type[T]):
self.model_class = model_class
async def __call__(self, request: Request) -> T:
# the following check is unnecessary if always using xml,
# but enables the use of json too
# print(request.headers.get("Content-Type"))
if '/xml' in request.headers.get("Content-Type", ""):
body = await request.body()
doc = fromstring(body)
dict_data = {}
for node in doc.getchildren():
dict_data[node.tag] = node.text
else:
dict_data = await request.json()
return self.model_class.parse_obj(dict_data)
# 接受消息模版
Recived_Temp = """<xml>
<ToUserName><![CDATA[%(ToUserName)s]]></ToUserName>
<AgentID><![CDATA[%(AgentID)s]]></AgentID>
<Encrypt><![CDATA[%(Encrypt)s]]></Encrypt>
</xml>"""
#发送消息模版
Send_Temp = """<xml>
<ToUserName>%(ToUserName)s</ToUserName>
<FromUserName>%(FromUserName)s</FromUserName>
<CreateTime>%(timestamp)s</CreateTime>
<MsgType>text</MsgType>
<Content>%(content)s</Content>
</xml>"""
# 回调验证部分
@app.get("/")
async def Verify(msg_signature: str, timestamp: str, nonce: str, echostr: str):
sVerifyMsgSig = msg_signature
sVerifyTimeStamp = timestamp
sVerifyNonce = nonce
sVerifyEchoStr = echostr
ret, sReplyEchoStr = wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp,sVerifyNonce,sVerifyEchoStr)
if( ret!=0 ):
print("ERR: DecryptMsg ret: " + str(ret))
sys.exit(1)
return int(sReplyEchoStr)
# 消息接收部分
@app.post("/")
async def main(msg_signature: str, timestamp: str, nonce: str, q: str = None, item: Item = Depends(XmlBody(Item))):
Recived_dict = {
'ToUserName': item.ToUserName,
'AgentID': item.AgentID,
'Encrypt': item.Encrypt,
}
ReqData = Recived_Temp % Recived_dict
ret,sMsg=wxcpt.DecryptMsg(sPostData=ReqData, sMsgSignature=msg_signature, sTimeStamp=timestamp, sNonce=nonce)
if( ret!=0 ):
print("ERR: DecryptMsg ret: " + str(ret))
sys.exit(1)
xml_tree = ET.fromstring(sMsg)
content_recived = xml_tree.find("Content").text
FromUserName = xml_tree.find("FromUserName").text
ToUserName = xml_tree.find("ToUserName").text
# 消息处理部分
content_send = func.handle_msg(to_user_id = FromUserName, recived_msg = content_recived)
Send_dict = {
"ToUserName": ToUserName,
"FromUserName": FromUserName,
"timestamp": timestamp,
"content": content_send,
}
sRespData = Send_Temp % Send_dict
ret,sEncryptMsg=wxcpt.EncryptMsg(sReplyMsg = sRespData, sNonce = nonce, timestamp = timestamp)
return sEncryptMsg
# 启动服务
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8181) | StarcoderdataPython |
5059222 | <filename>diary/models.py
from django.db import models
# Create your models here.
class React(models.Model):
name=models.CharField(max_length=50)
experience=models.CharField(max_length=500)
| StarcoderdataPython |
4939477 | <reponame>Tellvinch/illusion
import urllib.request, json
from .models import Quote
def get_quote():
with urllib.request.urlopen('http://quotes.stormconsultancy.co.uk/random.json') as url:
quote_details_data = url.read()
quote_details_response = json.loads(quote_details_data)
quote_object = None
if quote_details_response:
author = quote_details_response.get('author')
quote = quote_details_response.get('quote')
quote_object = Quote(author, quote)
return quote_object
#Getting the base url
# base_url = None
# def configure_request(app):
# global base_url #,api_key
# base_url = app.config['QUOTES_API_BASE_URL']
# #api_key = app.config['BLOG_API_KEY']
# def get_quote():
# with urllib.request.urlopen('http://quotes.stormconsultancy.co.uk/random.json') as url:
# quote_details_data = url.read()
# quote_details_response = json.loads(quote_details_data)
# quote_object = None
# if quote_details_response:
# author = quote_details_response.get('author')
# quote = quote_details_response.get('quote')
# quote_object = Quote(author, quote)
# return quote_object
# def process_results(quote_list):
# '''
# function that processes results of the quote and returns a list of quotes
# '''
# quote_results = []
# for quote_item in quote_list:
# id = quote_item.get('id')
# quote = quote_item.get('quote')
# author = quote_item.get('author')
# quote_object = Quote(id,quote,author)
# quote_results.append(quote_object)
# return quote_results | StarcoderdataPython |
6446573 | <filename>pcc/AST/functions/function_definition.py
from pcc.AST.ast_node import push_variable_on_stack
from pcc.AST.compiled_object import CompiledObjectType, CompiledObject
from pcc.AST.functions.function_argument import FunctionArgument
from pcc.AST.functions.function_declaration import FunctionDeclaration
from pcc.AST.statement import Statement
from pcc.AST.variables.variable_declaration import VariableDeclaration
from pcc.compiler.assembler import ProcessorRegister
class FunctionDefinition(Statement):
def __init__(self, depth):
super(FunctionDefinition, self).__init__(depth)
self.stack_variable_list = []
def __str__(self):
string = self._depth * ' ' + 'FuncDef: \n'
for arg in self.statement_sequence:
string += str(arg)
return string
def get_function_definition_node(self):
"""Get the function definition if found.
Returns:
FunctionDefinition: self
"""
return self
def add_stack_variable(self, current_list):
"""Add all stack variable to the list.
Args:
current_list (list[StackVariable]): the current list
"""
for statement in self.statement_sequence:
statement.add_stack_variable(current_list)
def get_return_type(self):
"""Get the return type.
Returns:
str: the return type
"""
return self.statement_sequence[0].return_type.name
def get_stack_variable(self, variable_name):
"""Get the stack variable by name.
Args:
variable_name (str): the name of the variable
Returns:
StackVariable: the stack variable if found, else None
"""
stack_variable = None
for var in self.stack_variable_list:
if var.name == variable_name:
stack_variable = var
return stack_variable
def _copy_argmuments_to_stack(self, assembler):
"""Copy all the arguments of this function to their stack variables.
Args:
assembler (Assembler): the assembler to use
Returns:
bytearray: the compiled machine code
"""
compiled_code = bytearray()
function_definition: FunctionDeclaration = self.statement_sequence[0]
available_integer_registers = [
ProcessorRegister.integer_argument_0,
ProcessorRegister.integer_argument_1,
ProcessorRegister.integer_argument_2,
ProcessorRegister.integer_argument_3,
ProcessorRegister.integer_argument_4,
ProcessorRegister.integer_argument_5]
for argument in function_definition.argument_list:
if isinstance(argument, FunctionArgument):
stack_var = self.get_stack_variable(argument.identifier)
elif isinstance(argument, VariableDeclaration):
stack_var = self.get_stack_variable(argument.name)
if not stack_var and argument.identifier == 'void':
continue
stack_offset = stack_var.stack_offset
if stack_var.type_name not in ['float', 'double']:
register = available_integer_registers.pop(0)
compiled_code += \
assembler.copy_reg_to_stack(register=register,
stack_offset=stack_offset)
return compiled_code
def compile(self, assembler):
"""Compile this statement.
Args:
assembler (Assembler): the assembler to use
Returns:
CompiledObject: the compiled version of this statement
"""
value = bytearray()
# save the frame pointer on stack
ret = assembler.push_to_stack(ProcessorRegister.base_pointer)
value.extend(ret)
# set the stack pointer as the new base pointer
dest = ProcessorRegister.base_pointer
src = ProcessorRegister.frame_pointer
ret = assembler.copy_from_reg_to_reg(destination=dest,
source=src)
value.extend(ret)
current_list = []
self.add_stack_variable(current_list)
# first the frame pointer has been saved to stack
stack_offset = 0
for stack_var in current_list:
stack_var.stack_start = stack_offset
value_array = stack_var.initializer_byte_array
value, stack_offset = push_variable_on_stack(assembler,
stack_offset,
value,
value_array)
stack_var.stack_offset = stack_offset
self.stack_variable_list = current_list
reg = ProcessorRegister.counter
# allign the stack to a multiple of 16
# stack_offset is a negative number that is rounded to a multiple
# of 16, stack_offset=12 -> allinged_stack_size=16
allinged_stack_size = 16*((-stack_offset)//16 + 1)
value += assembler.copy_value_to_reg(imm_value=allinged_stack_size,
destination=reg)
value += assembler.sub(source=reg,
destination=ProcessorRegister.frame_pointer)
# add a nop
ret = assembler.nop()
value.extend(ret)
value += self._copy_argmuments_to_stack(assembler)
relocation_objects = []
for statement in self.statement_sequence:
compiled_object = statement.compile(assembler)
if compiled_object is None:
continue
reloc_objects = compiled_object.relocation_objects
for relocation_object in reloc_objects:
additional_offset = len(value)
relocation_object.offset += additional_offset
relocation_objects.append(relocation_object)
value += compiled_object.value
size = len(value)
compiled_object = CompiledObject(self.statement_sequence[0].name, size,
value, CompiledObjectType.code,
relocation_objects)
return compiled_object
| StarcoderdataPython |
4842347 | import sys
from nameko.cli.main import main
main() | StarcoderdataPython |
6622874 | from django.db import models
class Coordinate(models.Model):
Unique_ID = models.CharField(max_length=128,null=True, blank=True)
Address = models.CharField(max_length=500,null=True, blank=True)
Latitude = models.CharField(max_length=32,null=True, blank=True)
Longitude = models.CharField(max_length=32,null=True, blank=True)
Creation = models.DateTimeField(auto_now_add=True, blank=True)
| StarcoderdataPython |
79430 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import os
from time import sleep
import boto3
from retrying import RetryError, retry
from time_utils import seconds
METRIC_WIDGET_TEMPLATE = """
{{
"metrics": [
[ "ParallelCluster/benchmarking/{cluster_name}", "ComputeNodesCount", {{ "stat": "Maximum", "label": \
"ComputeNodesCount Max" }} ],
[ "...", {{ "stat": "Minimum", "label": "ComputeNodesCount Min" }} ],
[ "AWS/AutoScaling", "GroupDesiredCapacity", "AutoScalingGroupName", "{asg_name}", {{ "stat": "Maximum", \
"label": "GroupDesiredCapacity" }} ],
[ ".", "GroupInServiceInstances", ".", ".", {{ "stat": "Maximum", "label": "GroupInServiceInstances" }} ]
],
"view": "timeSeries",
"stacked": false,
"stat": "Maximum",
"period": 1,
"title": "{title}",
"width": 1400,
"height": 700,
"start": "{graph_start_time}",
"end": "{graph_end_time}",
"annotations": {{
"horizontal": [
{{
"label": "Scaling Target",
"value": {scaling_target}
}}
],
"vertical": [
{{
"label": "Start Time",
"value": "{start_time}"
}},
{{
"label": "End Time",
"value": "{end_time}"
}}
]
}},
"yAxis": {{
"left": {{
"showUnits": false,
"label": "Count"
}},
"right": {{
"showUnits": true
}}
}}
}}"""
def publish_compute_nodes_metric(scheduler_commands, max_monitoring_time, region, cluster_name):
logging.info("Monitoring scheduler status and publishing metrics")
cw_client = boto3.client("cloudwatch", region_name=region)
compute_nodes_time_series = [0]
timestamps = [datetime.datetime.utcnow()]
@retry(
retry_on_result=lambda _: len(compute_nodes_time_series) == 1 or compute_nodes_time_series[-1] != 0,
wait_fixed=seconds(20),
stop_max_delay=max_monitoring_time,
)
def _watch_compute_nodes_allocation():
try:
compute_nodes = scheduler_commands.compute_nodes_count()
logging.info("Publishing metric: count={0}".format(compute_nodes))
cw_client.put_metric_data(
Namespace="ParallelCluster/benchmarking/{cluster_name}".format(cluster_name=cluster_name),
MetricData=[{"MetricName": "ComputeNodesCount", "Value": compute_nodes, "Unit": "Count"}],
)
# add values only if there is a transition.
if compute_nodes_time_series[-1] != compute_nodes:
compute_nodes_time_series.append(compute_nodes)
timestamps.append(datetime.datetime.utcnow())
except Exception as e:
logging.warning("Failed while watching nodes allocation with exception: %s", e)
raise
try:
_watch_compute_nodes_allocation()
except RetryError:
# ignoring this error in order to perform assertions on the collected data.
pass
end_time = datetime.datetime.utcnow()
logging.info(
"Monitoring completed: compute_nodes_time_series [ %s ], timestamps [ %s ]",
" ".join(map(str, compute_nodes_time_series)),
" ".join(map(str, timestamps)),
)
logging.info("Sleeping for 3 minutes to wait for the metrics to propagate...")
sleep(180)
return compute_nodes_time_series, timestamps, end_time
def enable_asg_metrics(region, cluster):
logging.info("Enabling ASG metrics for %s", cluster.asg)
boto3.client("autoscaling", region_name=region).enable_metrics_collection(
AutoScalingGroupName=cluster.asg,
Metrics=["GroupDesiredCapacity", "GroupInServiceInstances", "GroupTerminatingInstances"],
Granularity="1Minute",
)
def _publish_metric(region, instance, os, scheduler, state, count):
cw_client = boto3.client("cloudwatch", region_name=region)
logging.info("Publishing metric: state={0} count={1}".format(state, count))
cw_client.put_metric_data(
Namespace="parallelcluster/benchmarking/test_scaling_speed/{region}/{instance}/{os}/{scheduler}".format(
region=region, instance=instance, os=os, scheduler=scheduler
),
MetricData=[
{
"MetricName": "ComputeNodesCount",
"Dimensions": [{"Name": "state", "Value": state}],
"Value": count,
"Unit": "Count",
}
],
)
def produce_benchmark_metrics_report(
benchmark_params, region, cluster_name, asg_name, start_time, end_time, scaling_target, request
):
title = ", ".join("{0}={1}".format(key, val) for (key, val) in benchmark_params.items())
graph_start_time = _to_datetime(start_time) - datetime.timedelta(minutes=2)
graph_end_time = _to_datetime(end_time) + datetime.timedelta(minutes=2)
scaling_target = scaling_target
widget_metric = METRIC_WIDGET_TEMPLATE.format(
cluster_name=cluster_name,
asg_name=asg_name,
start_time=start_time,
end_time=end_time,
title=title,
graph_start_time=graph_start_time,
graph_end_time=graph_end_time,
scaling_target=scaling_target,
)
logging.info(widget_metric)
cw_client = boto3.client("cloudwatch", region_name=region)
response = cw_client.get_metric_widget_image(MetricWidget=widget_metric)
_write_results_to_outdir(request, response["MetricWidgetImage"])
def _to_datetime(timestamp):
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z")
def _write_results_to_outdir(request, image_bytes):
out_dir = request.config.getoption("output_dir")
os.makedirs("{out_dir}/benchmarks".format(out_dir=out_dir), exist_ok=True)
graph_dst = "{out_dir}/benchmarks/{test_name}.png".format(
out_dir=out_dir, test_name=request.node.nodeid.replace("::", "-")
)
with open(graph_dst, "wb") as image:
image.write(image_bytes)
| StarcoderdataPython |
248633 | """
Python 3 implementation of Qiagen Clinical Insight's API.
Installation:
- Pipenv:
pipenv install qci
- Virtualenv:
pip install qci
- Pip:
pip install qci
Usage:
- Refer to the examples/ directory for examples
Notes:
- Python 2 compatible
TODO:
- multi-threading
- check response codes
- exception catching
- DataPackage Class
"""
import requests
import os
import xmltodict
import tempfile
import sys
from datetime import datetime
from urllib.parse import urljoin
from multiprocessing.pool import ThreadPool
from qci.classes import DataPackage
BASE_URL = 'https://api.ingenuity.com/'
def get_access_token(client_id, client_secret):
"""
:param client_id: QCI API key ID, can be found in https://apps.ingenuity.com/qcibridge/apiexplorer/
:param client_secret: QCI API key secret, can be found in https://apps.ingenuity.com/qcibridge/apiexplorer/
:return: access token str()
"""
api_url = urljoin(BASE_URL, '/v1/oauth/access_token')
payload = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
}
resp = requests.get(api_url, params=payload)
return resp.json()['access_token']
def validate_datapackage(datapackage):
if not isinstance(datapackage, DataPackage):
raise ValueError('DataPackage failed validation (not a DataPackage): {}'.format(datapackage))
def upload_datapackage(datapackage):
"""
:param datapackage: qci.classes.DataPackage object
:return:
"""
api_url = urljoin(BASE_URL, '/v1/datapackages')
# Validate the datapackage
validate_datapackage(datapackage)
# Securely generate the datapackage XML and write it to a file
datapackage_fd, datapackage_file_path = tempfile.mkstemp(prefix='QCI_DP_', suffix='.zip')
datapackage_fd.write(datapackage.to_xml())
# POST the datapackage to QCI
headers = {
'Authorization': 'Bearer {}'.format(datapackage.access_token)
}
files = {'file': datapackage_fd.read()}
resp = requests.post(api_url, headers=headers, files=files)
"""Example Response:
{
"method": "partner integration",
"creator": "<EMAIL>",
"users": ["<EMAIL>"],
"title": "DM-121212 Cancer Hotspot Panel",
"analysis-name": "DM-121212",
"status": "PREPROCESSING",
"stage": "Validating",
"results-url": "https://api.ingenuity.com/datastream/analysisStatus.jsp?packageId=DP_727658804867835145738",
"status-url": "https://api.ingenuity.com/v1/datapackages/DP_727658804867835145738",
"pipeline-name": "QCI Somatic Cancer Pipeline",
"percentage-complete": 20
}
"""
return resp.json()
def upload_datapackages(datapackages, debug=True):
"""
:param datapackages: list( DataPackage ), example XML: https://developers.ingenuity.com/doc/clinical/Example__Somatic_Cancer_Diagnostic_Test.jsp#Example%3A_Somatic_Cancer_Metadata_Input_XML_File
:param debug: set to True to receive debugging messages (tracebacks)
:return:
"""
if not debug:
sys.tracebacklimit = 0 # Only show tracebacks when debugging
# POST all of the datapackages
qci_upload_pool = ThreadPool()
qci_upload_pool.map(upload_datapackage, datapackages)
qci_upload_pool.close()
qci_upload_pool.join()
def check_submission_status(access_token, qci_id):
"""
:param access_token: access token from get_access_token()
:param qci_id: Either the datapackage ID or the accession-id of the sample
:return: dict() containing the information on a submission
"""
api_url = urljoin(BASE_URL, '/v1/datapackages/{}'.format(qci_id, access_token)) # either datapackage ID or accession-id
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp = requests.get(api_url, headers=headers)
"""Example Response:
{
"method":"partner integration",
"creator":" <EMAIL> ",
"users":[" <EMAIL> "],
"title":"DM-121212 Cancer Hotspot Panel",
"analysis-name":"DM121212",
"status":"DONE",
"stage":"Pipeline successfully completed",
"results-url":"https://api.ingenuity.com/datastream/analysisStatus.jsp?packageId=DP_727658804867835145738",
"status-url":"https://api.ingenuity.com/v1/datapackages/DP_727658804867835145738",
"pipeline-name":"QCI Somatic Cancer Pipeline",
"percentage-complete":100,"results-id":"491081",
"results-redirect-url":"https://variants.ingenuity.com/vcs/?a=491081",
"export-url":"https://api.ingenuity.com/v1/export/DP_727658804867835145738"
}
"""
return resp.json()
def get_report_pdf(access_token, qci_id, output_pdf_filename=''):
"""
:param access_token: access token from get_access_token()
:param qci_id: Either the datapackage ID or the accession-id of the sample
:param output_pdf_filename: output name of the report .pdf, defaults to accession_id_date.pdf
:return: output .pdf file path
"""
api_url = urljoin(BASE_URL, '/v1/export/{}?'
'view={}'
'&access_token={}'.format(qci_id, 'pdf', access_token)) # either datapackage ID or accession-id
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp = requests.get(api_url, headers=headers)
# Create default pdf filename
if not output_pdf_filename:
output_pdf_filename = '{}_{}.pdf'.format(qci_id, datetime.now().strftime('%Y-%m-%d'))
# Write binary response to file
with open(output_pdf_filename, 'wb') as report_pdf:
report_pdf.write(resp.content)
return os.path.abspath(output_pdf_filename)
def get_test_result_xml(access_token, qci_id):
"""
:param access_token: access token from get_access_token()
:param qci_id: Either the datapackage ID or the accession-id of the sample
:return: dict() containing the result values for the test
"""
api_url = urljoin(BASE_URL, '/v1/export/{}?'
'view={}'
'&access_token={}'.format(qci_id, 'reportXml', access_token)) # either datapackage ID or accession-id
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp_xml = requests.get(api_url, headers=headers).content
resp_dict = xmltodict.parse(resp_xml)
"""Example Response:
<report>
<accession>DM-121212</accession>
<age>45</age>
<sex>male</sex>
<ethnicity>African American</ethnicity>
<patientName>******</patientName>
<dateOfBirth>1967-08-05</dateOfBirth>
<specimenId>14-375C</specimenId>
<specimentBlock>1D</specimentBlock>
<specimenCollectionDate>2014-03-19</specimenCollectionDate>
<specimenDiagnosis>non-small cell lung cancer</specimenDiagnosis>
<primaryTumorSite>lung</primaryTumorSite>
<specimenType>biopsy</specimenType>
<specimenDissection>manual</specimenDissection>
<orderingPhysicianName>*******</orderingPhysicianName>
<orderingPhysicianClient>RT44501</orderingPhysicianClient>
<orderingPhysicianFacilityName>*******</orderingPhysicianFacilityName>
<pathologistName>*********</pathologistName>
<interpretation>Pathogenic</interpretation>
<variant>
<chromosome>11</chromosome>
<position>108225575</position>
<reference>C</reference>
<alternate>T</alternate>
<genotype>Het</genotype>
<assessment>Pathogenic</assessment>
<phenotype>non-small cell lung cancer</phenotype>
<allelefraction>36</allelefraction>
<gene>ATM</gene>
"""
return resp_dict
def list_tests(access_token, state='', start_date='', end_date='', sort_by=''):
"""
:param access_token: access token from get_access_token()
:param state: state of the tests to list, choices: ('pending', 'in_review', 'needs_review', 'final')
:param start_date: start date, inclusive, in yyyy-mm-dd format
:param end_date: end date, inclusive, in yyyy-mm-dd format
:param sort_by: how to sort the results, choices: ('receivedDateAsc', 'receivedDateDesc')
:return: list( dict() ) of matching tests
"""
api_url = urljoin(BASE_URL, '/v1/clinical?'
'state={}'.format(state)) # either datapackage ID or accession-id
if start_date:
api_url += '&startReceivedDate='+start_date
if end_date:
api_url += '&endReceivedDate='+end_date
if sort_by:
api_url += '&sort='+sort_by
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp = requests.get(api_url, headers=headers)
"""Example Response:
[
{
"dataPackageID": "DP_746862303668038449347",
"accessionID": "DM35335",
"applicationUrl": "https://variants.ingenuity.com/vcs/view/analysis/532973",
"exportUrl": "https://api.ingenuity.com/v1/export/DP_746862303668038449347",
"state": "FINAL",
"receivedDate": "2015-04-28"
}
{
"dataPackageID": "DP_746862303668038449387",
"accessionID": "DM36762",
"applicationUrl": "https://variants.ingenuity.com/vcs/view/analysis/989931",
"exportUrl": "https://api.ingenuity.com/v1/export/DP_746862303668038449387",
"state": "FINAL",
"receivedDate": "2015-04-29"
}
...
]
"""
return resp.json()
def share_test(access_token, qci_id, user_dict_list):
"""
:param access_token: access token from get_access_token()
:param qci_id: Either the datapackage ID or the accession-id of the sample
:param user_dict_list: list( dict() ) of users to share a test with,
example: [ {'email': '<EMAIL>'} ]
docs: https://developers.ingenuity.com/doc/clinical/API_Endpoint_Reference_and_Examples.jsp#Share_Test_with_Others_API
:return: HTTP response content
"""
api_url = urljoin(BASE_URL, '/v1/datapackages/{}/users'.format(qci_id)) # either datapackage ID or accession-id
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp = requests.post(api_url, headers=headers, data=user_dict_list)
return resp.content
def get_test_product_profiles(access_token):
"""
:param access_token: access token from get_access_token()
:param qci_id: Either the datapackage ID or the accession-id of the sample
:return: HTTP response content
"""
api_url = urljoin(BASE_URL, '/v1/testProductProfiles') # either datapackage ID or accession-id
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
resp = requests.get(api_url, headers=headers)
return resp.json()
if __name__ == '__main__':
# TODO: move these to examples/
import json
qci_creds = json.loads(open('qci_credentials.json', 'r'))
example_accession_id = '1807190065-COtA2477'
auth_token = get_access_token(client_id=qci_creds['client_id'], client_secret=qci_creds['client_secret'])
print('Got access token.')
# print(check_submission_status(auth_token, '1807270053-COtA2682'))
# print(get_test_result_xml(auth_token, '1<PASSWORD>'))
# print(list_tests(auth_token))
print(list_tests(auth_token))
# You would normally pull data from yous database here
example_data_package = DataPackage(
access_token=auth_token,
primary_id='COtGx1234'
)
upload_datapackage(example_data_package)
| StarcoderdataPython |
5122172 |
import sys
from PySide2.QtWidgets import *
from PySide2 import QtGui
from PySide2.QtCore import Qt
from layouts.welcome import Welcome
from layouts.data_select import DataSelect
from layouts.bulk_analysis import BulkAnalysis
from layouts.imaging import Imaging
from layouts.show_data import ShowData
from layouts.error_log import ErrorLog
from style import IlapsStyle
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setGeometry(50, 50, 1500, 1000)
self.setWindowTitle("Ilaps")
self.setWindowIcon(QtGui.QIcon('./imgs/ilaps.png'))
self.init_gui()
self.setStyleSheet(IlapsStyle)
def init_gui(self):
self.Data = None
self.Iolite = None
self.logger = None
self.mainWidget = QWidget()
self.setCentralWidget(self.mainWidget)
self.mainLayout = QVBoxLayout(self.mainWidget)
# initialize all pages
self.welcome = Welcome(self)
self.data_select = DataSelect(self)
self.bulk_analysis = BulkAnalysis(self)
self.imaging = Imaging(self)
self.show_data = ShowData(self)
self.error_log = ErrorLog(self)
# create main stack of pages
self.Stack = QStackedWidget(self)
self.Stack.addWidget(self.welcome.get_page())
self.Stack.addWidget(self.data_select.get_page())
self.Stack.addWidget(self.bulk_analysis.get_page())
self.Stack.addWidget(self.imaging.get_page())
self.Stack.addWidget(self.show_data.get_page())
self.Stack.addWidget(self.error_log.get_page())
self.Stack.setCurrentIndex(0)
self.mainLayout.addWidget(self.Stack)
# toolbar
close = QAction(QtGui.QIcon('./imgs/quit.png'), 'Exit', self)
close.setShortcut('Ctrl+Q')
close.triggered.connect(self.close_application)
home = QAction(QtGui.QIcon('./imgs/home.png'), 'Home', self)
home.setShortcut('Ctrl+H')
home.triggered.connect(lambda: self.change_layout(0))
data = QAction(QtGui.QIcon('./imgs/graph.png'), 'Data analysis', self)
data.setShortcut('Ctrl+D')
data.triggered.connect(lambda: self.change_layout(1))
bulk = QAction(QtGui.QIcon('./imgs/analysis.png'),
'Bulk analysis', self)
bulk.setShortcut('Ctrl+A')
bulk.triggered.connect(lambda: self.change_layout(2))
imaging = QAction(QtGui.QIcon('./imgs/imaging.png'), 'Imaging', self)
imaging.setShortcut('Ctrl+I')
imaging.triggered.connect(lambda: self.change_layout(3))
table = QAction(QtGui.QIcon('./imgs/table.png'), 'Show data', self)
table.setShortcut('Ctrl+T')
table.triggered.connect(lambda: self.change_layout(4))
error = QAction(QtGui.QIcon('./imgs/error.png'),
'Show error log', self)
error.setShortcut('Ctrl+E')
error.triggered.connect(lambda: self.change_layout(5))
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(home)
self.toolbar.addAction(data)
self.toolbar.addAction(bulk)
self.toolbar.addAction(imaging)
self.toolbar.addAction(table)
self.toolbar.addAction(error)
self.toolbar.addAction(close)
def change_layout(self, i):
self.Stack.setCurrentIndex(i)
def close_application(self):
choice = QMessageBox.question(self, 'Quit!',
"Are you sure you want to quit?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
sys.exit()
else:
pass
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| StarcoderdataPython |
1820313 | <reponame>shalevy1/coco-annotator
import imantics as im
import json
from mongoengine import *
from .datasets import DatasetModel
from .categories import CategoryModel
from .events import Event
from flask_login import current_user
class AnnotationModel(DynamicDocument):
COCO_PROPERTIES = ["id", "image_id", "category_id", "segmentation",
"iscrowd", "color", "area", "bbox", "metadata",
"keypoints"]
id = SequenceField(primary_key=True)
image_id = IntField(required=True)
category_id = IntField(required=True)
dataset_id = IntField()
segmentation = ListField(default=[])
area = IntField(default=0)
bbox = ListField(default=[0, 0, 0, 0])
iscrowd = BooleanField(default=False)
creator = StringField(required=True)
width = IntField()
height = IntField()
color = StringField()
keypoints = ListField(default=[])
metadata = DictField(default={})
paper_object = ListField(default=[])
deleted = BooleanField(default=False)
deleted_date = DateTimeField()
milliseconds = IntField(default=0)
events = EmbeddedDocumentListField(Event)
def __init__(self, image_id=None, **data):
from .images import ImageModel
if image_id is not None:
image = ImageModel.objects(id=image_id).first()
if image is not None:
data['image_id'] = image_id
data['width'] = image.width
data['height'] = image.height
data['dataset_id'] = image.dataset_id
super(AnnotationModel, self).__init__(**data)
def save(self, copy=False, *args, **kwargs):
if self.dataset_id and not copy:
dataset = DatasetModel.objects(id=self.dataset_id).first()
if dataset is not None:
self.metadata = dataset.default_annotation_metadata.copy()
if self.color is None:
self.color = im.Color.random().hex
if current_user:
self.creator = current_user.username
else:
self.creator = 'system'
return super(AnnotationModel, self).save(*args, **kwargs)
def is_empty(self):
return len(self.segmentation) == 0 or self.area == 0
def mask(self):
""" Returns binary mask of annotation """
mask = np.zeros((self.height, self.width))
pts = [
np.array(anno).reshape(-1, 2).round().astype(int)
for anno in self.segmentation
]
mask = cv2.fillPoly(mask, pts, 1)
return mask
def clone(self):
""" Creates a clone """
create = json.loads(self.to_json())
del create['_id']
return AnnotationModel(**create)
def __call__(self):
category = CategoryModel.objects(id=self.category_id).first()
if category:
category = category()
data = {
'image': None,
'category': category,
'color': self.color,
'polygons': self.segmentation,
'width': self.width,
'height': self.height,
'metadata': self.metadata
}
return im.Annotation(**data)
def add_event(self, e):
self.update(push__events=e)
__all__ = ["AnnotationModel"]
| StarcoderdataPython |
3596935 | <gh_stars>0
#!/usr/bin/env python
import os.path
import ConfigParser
import subprocess
from pymongo import MongoClient
# Settings file
CONFIG_FILE = '/etc/otfnfv.conf'
if not os.path.isfile(CONFIG_FILE):
print "Make sure you have a %s file in /etc !" % 'otfnfv.conf'
exit(1)
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
# Source path
PATH = config.get('system', 'path')
# Network interface
INTERFACE = config.get('system', 'interface')
# Ryu config
RYU_PATH = config.get('ryu', 'path')
RYU_APP = config.get('ryu', 'app')
RYU_PORT = config.getint('ryu', 'port')
RYU_APP_PATH = RYU_PATH + RYU_APP
def connect(conn_request):
"""
Connect with MongoDB
"""
if conn_request == 'remote':
host = config.get('mongodb', 'remote_host')
else:
host = config.get('mongodb', 'host')
port = config.getint('mongodb', 'port')
db_name = config.get('mongodb', 'db')
conn = MongoClient(host, port)
db = conn[db_name]
return [conn, db]
def execute(cmd):
"""
Execute a command in another process
"""
subprocess.Popen(cmd)
def exit_network(signal, frame):
"""
On program exit or crash, do the necessary cleanup
"""
commands = [
(['pkill', '-f', 'network.py'], 'Network'),
(['pkill', '-f', 'ryu-manager'], 'Ryu'),
(['pkill', '-f', 'controller.py'], 'Interface Controller'),
(['pkill', '-f', 'filter.py'], 'Filter Manager'),
(['mn', '-c', '-v', 'output'], 'Cleaning network'),
]
for cmd in commands:
print "Killing %s" % cmd[1]
subprocess.call(cmd[0])
mg, db = connect(None)
control = db['control']
rules = db['rules']
print "Reset control document"
control.delete_one({})
doc_control = {
'vnf': {
'to_instantiate': '',
'to_kill': '',
'create_connection': '',
'working': False,
},
'rule': {
'dirty': False
},
'network': {
'stop': False,
'switch': []
}
}
control.insert_one(doc_control)
# Remove all created rules
rules.delete_many({})
print "Killing main process"
subprocess.call(['pkill', '-f', 'otfnfv'])
| StarcoderdataPython |
1781913 | import re
class RegexGenerator():
def __init__(self,string_pattern_to_detect):
self.string_pattern_to_detect = string_pattern_to_detect
self.regex_string = ""
self.create_regex( )
def create_regex(self):
last_index_of_type = -1
current_type = self.determine_char_type(self.string_pattern_to_detect[0])
current_type_set = True
for idx,char in enumerate(self.string_pattern_to_detect):
last_index_of_type+=1
if( (self.determine_char_type(char) != current_type) or self.determine_char_type(char) == "SPECIAL"):
self.set_regex_strings(current_type,last_index_of_type,idx)
current_type = self.determine_char_type(char)
last_index_of_type = 0
#This if only triggers if the last char is of a different type than the previous.
#This is an easy fix to the problem of detecting strings like 'AA0' and 'BB1'.
if(idx == len(self.string_pattern_to_detect) - 1):
last_index_of_type+=1
self.set_regex_strings(current_type,last_index_of_type,idx)
#Since the way this works requires a different char to end the pattern,
#the last character can never be properly accounted for.
#This mean something like 'aaaa' will return nothing. Because there is no
#different char. We can fix this by testing if it's the last index and if it's the
#same type as the previous.
if( idx == len(self.string_pattern_to_detect) - 1 and current_type == self.determine_char_type(self.string_pattern_to_detect[idx-1])):
last_index_of_type+=1
self.set_regex_strings(current_type,last_index_of_type,idx)
def get_regex(self):
return self.regex_string
def set_regex_strings(self,current_type,last_index_of_type,idx):
if current_type == "DIGIT":
self.regex_string+= ("\\d{"+str(last_index_of_type)+"}")
elif current_type == "CHARACTER":
self.regex_string+= ("\\w{"+str(last_index_of_type)+"}")
elif current_type == "WHITESPACE":
self.regex_string+= ("\\s{"+str(last_index_of_type)+"}")
elif current_type == "SPECIAL":
self.regex_string += "[" + self.string_pattern_to_detect[idx-1] + "]"
#not yet implemented
def check_if_valid(self):
print("returns true if the regex is correct.")
def determine_char_type(self, char):
if( char.isnumeric() ):
return "DIGIT"
elif( char.isalpha() ):
return "CHARACTER"
elif( char.isspace() ):
return "WHITESPACE"
else:
return "SPECIAL"
#Easy test code for self contained runs.
#myGen = RegexGenerator("2021-05-26T20:52:38.000Z")
#myGen = RegexGenerator("aaaa0aaaa")
#print(myGen.get_regex()) | StarcoderdataPython |
6636610 | #
# Copyright (c) 2015-2018 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
import os
import tempfile
import pytest
from prometheus_client.parser import text_string_to_metric_families
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse, Response, Request
import talisker.statsd
import talisker.endpoints
import talisker.revision
from talisker.endpoints import StandardEndpointMiddleware
from tests.test_metrics import counter_name
@pytest.fixture
def wsgi_app(status='200', headers=[], body=''):
def app(environ, start_response):
start_response(status, headers)
return body
return app
@pytest.fixture
def client(wsgi_app):
app = StandardEndpointMiddleware(wsgi_app)
return Client(app, BaseResponse)
def set_networks(monkeypatch, networks):
monkeypatch.setitem(os.environ, 'TALISKER_NETWORKS', networks)
@talisker.endpoints.private
def protected(self, request):
return Response(status=200)
def get_response(ip, forwarded=None):
req_dict = {'REMOTE_ADDR': ip}
if forwarded:
req_dict['HTTP_X_FORWARDED_FOR'] = forwarded
return protected(None, Request(req_dict))
def test_private_no_config(monkeypatch):
set_networks(monkeypatch, '')
assert get_response(b'127.0.0.1').status_code == 200
assert get_response(b'1.2.3.4').status_code == 403
assert get_response(b'1.2.3.4', '127.0.0.1').status_code == 200
# double check unicode input
assert get_response('127.0.0.1').status_code == 200
assert get_response('1.2.3.4').status_code == 403
def test_private_with_config(monkeypatch):
set_networks(monkeypatch, '10.0.0.0/8')
assert get_response(b'127.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1, 10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1, 5.6.7.8').status_code == 403
assert get_response(b'1.2.3.4').status_code == 403
assert get_response(b'1.2.3.4', '5.6.7.8').status_code == 403
assert get_response(b'10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '5.6.7.8, 10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '10.0.0.1, 5.6.7.8').status_code == 403
def test_private_with_multiple_config(monkeypatch):
set_networks(monkeypatch, '10.0.0.0/8 192.168.0.0/24')
assert get_response(b'127.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1, 10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '127.0.0.1, 5.6.7.8').status_code == 403
assert get_response(b'1.2.3.4').status_code == 403
assert get_response(b'1.2.3.4', '5.6.7.8').status_code == 403
assert get_response(b'10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '5.6.7.8, 10.0.0.1').status_code == 200
assert get_response(b'1.2.3.4', '10.0.0.1, 5.6.7.8').status_code == 403
assert get_response(b'192.168.0.1').status_code == 200
assert get_response(b'1.2.3.4', '192.168.0.1').status_code == 200
assert get_response(b'1.2.3.4', '5.6.7.8, 192.168.0.1').status_code == 200
assert get_response(b'1.2.3.4', '192.168.0.1, 5.6.7.8').status_code == 403
def test_private_response_template(monkeypatch):
set_networks(monkeypatch, '')
resp = get_response(b'1.2.3.4')
assert b"IP address 1.2.3.4" in resp.data
assert b"REMOTE_ADDR: 1.2.3.4" in resp.data
assert b"X-Forwarded-For: None" in resp.data
resp = get_response(b'1.2.3.4', '10.0.0.1, 192.168.0.1')
assert b"IP address 192.168.0.1" in resp.data
assert b"REMOTE_ADDR: 1.2.3.4" in resp.data
assert b"X-Forwarded-For: 10.0.0.1, 192.168.0.1" in resp.data
def test_unknown_endpoint(client):
response = client.get('/_status/unknown')
# passed through to app
assert response.status_code == 200
def test_pass_thru():
c = client(wsgi_app(body='test'))
response = c.get('/something')
assert response.status_code == 200
assert response.data == b'test'
def test_index_endpoint(client):
response = client.get('/_status')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
def test_index_trailing_slash(client):
response = client.get('/_status/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
def test_ping(client, monkeypatch):
monkeypatch.chdir(tempfile.mkdtemp())
response = client.get('/_status/ping')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert response.data == b'unknown\n'
def test_check_no_app_url():
talisker.revision.set('unknown')
c = client(wsgi_app('404'))
response = c.get('/_status/check')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
assert response.data == b'unknown\n'
def test_check_with_app_url():
def app(e, sr):
"""Implements custom check check"""
if e['PATH_INFO'] == '/_status/check':
sr('200', [])
return b'app implemented check'
else:
sr('404', [])
return ''
c = client(app)
response = c.get('/_status/check')
assert response.data == b'app implemented check'
def test_check_with_no_app_url_iterator():
talisker.revision.set('unknown')
def app(e, sr):
yield b'app'
sr('404', [])
yield b'iterator'
c = client(app)
response = c.get('/_status/check')
assert response.data == b'unknown\n'
def test_check_with_app_url_iterator():
def app(e, sr):
yield b'app'
sr('200', [])
yield b'iterator'
c = client(app)
response = c.get('/_status/check')
assert response.data == b'appiterator'
def test_check_with_exc_info():
def app(e, sr):
try:
raise Exception('test')
except Exception:
sr(500, [], exc_info=1)
return ''
c = client(app)
response = c.get('/_status/check')
assert response.data == b'error'
assert response.status_code == 500
def test_sentry(client):
response = client.get('/_status/test/sentry',
environ_overrides={'REMOTE_ADDR': b'1.2.3.4'})
assert response.status_code == 403
with pytest.raises(talisker.endpoints.TestException):
client.get('/_status/test/sentry',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
def test_statsd_metric(client, context):
statsd = talisker.statsd.get_client()
env = {'statsd': statsd,
'REMOTE_ADDR': b'127.0.0.1'}
response = client.get('/_status/test/statsd', environ_overrides=env)
assert context.statsd[0] == 'test:1|c'
assert response.status_code == 200
def test_metrics(client):
response = client.get('/_status/test/prometheus',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
response = client.get('/_status/metrics',
environ_overrides={'REMOTE_ADDR': b'1.2.3.4'})
assert response.status_code == 403
response = client.get('/_status/metrics',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
assert list(text_string_to_metric_families(response.data.decode()))
def test_metrics_no_prometheus(client, monkeypatch):
monkeypatch.setattr(
talisker.endpoints, 'pkg_is_installed', lambda x: False)
response = client.get(
'/_status/metrics', environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 501
response = client.get(
'/_status/test/prometheus',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 501
def test_prometheus_metric(client):
response = client.get('/_status/test/prometheus',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
response = client.get('/_status/metrics',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
output = response.data.decode('utf8')
name = counter_name('test_total')
assert '# HELP {} Multiprocess metric\n'.format(name) in output
assert '# TYPE {} counter'.format(name) in output
assert '{} 1.0'.format(name) in output
def test_info_packages(client):
response = client.get('/_status/info/packages',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
def test_info_workers(client):
response = client.get('/_status/info/workers',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
def test_info_objgraph(client):
response = client.get('/_status/info/objgraph',
environ_overrides={'REMOTE_ADDR': b'127.0.0.1'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
| StarcoderdataPython |
1787546 | """Transforms on raw wav samples."""
import random
import numpy as np
import librosa
import torch
from torch.utils.data import Dataset
def should_apply_transform(prob=0.5):
"""Transforms are only randomly applied with the given probability."""
return random.random() < prob
class ChangeAmplitude(object):
"""Changes amplitude of an audio randomly."""
def __init__(self, amplitude_range=(0.7, 1.1)):
self.amplitude_range = amplitude_range
def __call__(self, data):
if not should_apply_transform():
return data
data['input'] = data['input'] * random.uniform(*self.amplitude_range)
return data
class ChangeSpeedAndPitchAudio(object):
"""Change the speed of an audio. This transform also changes the pitch of the audio."""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if not should_apply_transform():
return data
samples = data['input']
scale = random.uniform(-self.max_scale, self.max_scale)
speed_fac = 1.0 / (1 + scale)
data['input'] = np.interp(np.arange(0, len(samples), speed_fac), np.arange(0,len(samples)), samples).astype(np.float32)
return data
class StretchAudio(object):
"""Stretches an audio randomly."""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if not should_apply_transform():
return data
scale = random.uniform(-self.max_scale, self.max_scale)
data['input'] = librosa.effects.time_stretch(data['input'], 1+scale)
return data
class TimeshiftAudio(object):
"""Shifts an audio randomly."""
def __init__(self, max_shift_seconds=0.2):
self.max_shift_seconds = max_shift_seconds
def __call__(self, data):
if not should_apply_transform():
return data
samples = data['input']
sample_rate = data['sample_rate']
max_shift = (sample_rate * self.max_shift_seconds)
shift = random.randint(-max_shift, max_shift)
a = -min(0, shift)
b = max(0, shift)
samples = np.pad(samples, (a, b), "constant")
data['input'] = samples[:len(samples) - a] if a else samples[b:]
return data
class ToMelSpectrogram(object):
"""Creates the mel spectrogram from an audio. The result is a 32x32 matrix."""
def __init__(self, n_mels=32):
self.n_mels = n_mels
def __call__(self, data):
samples = data['input']
sample_rate = data['sample_rate']
s = librosa.feature.melspectrogram(samples, sr=sample_rate, n_mels=self.n_mels)
data['input'] = librosa.power_to_db(s, ref=np.max)
return data
class DataToMelSpectrogram(object):
"""Creates the mel spectrogram from an audio. The result is a 32x32 matrix."""
def __init__(self, sample_rate=16000, n_mels=32):
self.n_mels = n_mels
self.sample_rate = sample_rate
def __call__(self, samples):
s = librosa.feature.melspectrogram(samples, sr=self.sample_rate, n_mels=self.n_mels)
return librosa.power_to_db(s, ref=np.max)
class AddNoise(object):
def __init__(self, intensity=0.005):
self.intensity = intensity
def __call__(self, data):
if not should_apply_transform():
return data
# Adding white noise
wn = np.random.randn(len(data))
data = data + self.intensity * wn
return data
class ToTensor(object):
"""Converts into a tensor."""
def __init__(self, np_name, tensor_name, normalize=None):
self.np_name = np_name
self.tensor_name = tensor_name
self.normalize = normalize
def __call__(self, data):
tensor = torch.FloatTensor(data[self.np_name])
if self.normalize is not None:
mean, std = self.normalize
tensor -= mean
tensor /= std
data[self.tensor_name] = tensor
return data
| StarcoderdataPython |
3551390 | <gh_stars>1-10
"""
Copyright (c) 2013, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of <NAME> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#!/usr/bin/env python
import argparse
import codecs
import locale
import sys
#import htmlmin
from . import Minifier
parser = argparse.ArgumentParser(
description='Minify HTML',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('input_file',
nargs='?',
metavar='INPUT',
help='File path to html file to minify. Defaults to stdin.',
)
parser.add_argument('output_file',
nargs='?',
metavar='OUTPUT',
help="File path to output to. Defaults to stdout.",
)
parser.add_argument('-c', '--remove-comments',
help=(
'''When set, comments will be removed. They can be kept on an individual basis
by starting them with a '!': <!--! comment -->. The '!' will be removed from
the final output. If you want a '!' as the leading character of your comment,
put two of them: <!--!! comment -->.
'''),
action='store_true')
parser.add_argument('-s', '--remove-empty-space',
help=(
'''When set, this removes empty space betwen tags in certain cases.
Specifically, it will remove empty space if and only if there a newline
character occurs within the space. Thus, code like
'<span>x</span> <span>y</span>' will be left alone, but code such as
' ...
</head>
<body>
...'
will become '...</head><body>...'. Note that this CAN break your
html if you spread two inline tags over two lines. Use with caution.
'''),
action='store_true')
parser.add_argument('--remove-all-empty-space',
help=(
'''When set, this removes ALL empty space betwen tags. WARNING: this can and
likely will cause unintended consequences. For instance, '<i>X</i> <i>Y</i>'
will become '<i>X</i><i>Y</i>'. Putting whitespace along with other text will
avoid this problem. Only use if you are confident in the result. Whitespace is
not removed from inside of tags, thus '<span> </span>' will be left alone.
'''),
action='store_true')
parser.add_argument('--keep-optional-attribute-quotes',
help=(
'''When set, this keeps all attribute quotes, even if they are optional.
'''),
action='store_true')
parser.add_argument('-H', '--in-head',
help=(
'''If you are parsing only a fragment of HTML, and the fragment occurs in the
head of the document, setting this will remove some extra whitespace.
'''),
action='store_true')
parser.add_argument('-k', '--keep-pre-attr',
help=(
'''HTMLMin supports the propietary attribute 'pre' that can be added to elements
to prevent minification. This attribute is removed by default. Set this flag to
keep the 'pre' attributes in place.
'''),
action='store_true')
parser.add_argument('-a', '--pre-attr',
help=(
'''The attribute htmlmin looks for to find blocks of HTML that it should not
minify. This attribute will be removed from the HTML unless '-k' is
specified. Defaults to 'pre'.
'''),
default='pre')
parser.add_argument('-p', '--pre-tags',
metavar='TAG',
help=(
'''By default, the contents of 'pre', and 'textarea' tags are left unminified.
You can specify different tags using the --pre-tags option. 'script' and 'style'
tags are always left unmininfied.
'''),
nargs='*',
default=['pre', 'textarea'])
parser.add_argument('-e', '--encoding',
help=("Encoding to read and write with. Default 'utf-8'.\n\n"),
default='utf-8',
)
def main():
args = parser.parse_args()
minifier = Minifier(
remove_comments=args.remove_comments,
remove_empty_space=args.remove_empty_space,
remove_optional_attribute_quotes=not args.keep_optional_attribute_quotes,
pre_tags=args.pre_tags,
keep_pre=args.keep_pre_attr,
pre_attr=args.pre_attr,
)
if args.input_file:
inp = codecs.open(args.input_file, encoding=args.encoding)
else:
inp = codecs.getreader(
sys.stdin.encoding or locale.getpreferredencoding())(sys.stdin)
for line in inp.readlines():
minifier.input(line)
if args.output_file:
codecs.open(
args.output_file, 'w', encoding=args.encoding).write(minifier.output)
else:
print(minifier.output)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3201872 | """
.. currentmodule:: feinsum.einsum
.. autoclass:: FusedEinsum
.. autoclass:: VeryLongAxis
.. autoclass:: EinsumAxisAccess
.. autoclass:: FreeAxis
.. autoclass:: SummationAxis
.. autoclass:: Argument
.. autoclass:: EinsumOperand
.. autoclass:: IntermediateResult
.. autoclass:: ContractionSchedule
.. autoclass:: SizeParam
Helper routines
^^^^^^^^^^^^^^^
.. autofunction:: get_trivial_contraction_schedule
.. autofunction:: get_opt_einsum_contraction_schedule
"""
from __future__ import annotations
import abc
import numpy as np
from pyrsistent.typing import PMap as PMapT
from pyrsistent import pmap
from typing import Union, Tuple, Any, FrozenSet, List
from dataclasses import dataclass
from functools import cached_property, cache
from more_itertools import zip_equal as zip
from pytools import UniqueNameGenerator
IntegralT = Union[int, np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64]
INT_CLASSES = (int, np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
ShapeComponentT = Union[IntegralT, "SizeParam"]
ShapeT = Tuple[ShapeComponentT, ...]
@dataclass(frozen=True, eq=True, repr=True)
class VeryLongAxis:
"""
Describes a dimension length which is to be assumed to be very large.
"""
# TODO: Record the threshold over which an axis could be considered as
# "VeryLong."
@dataclass(frozen=True, eq=True, repr=True)
class SizeParam:
name: str
@dataclass(frozen=True, repr=True, eq=True)
class EinsumAxisAccess(abc.ABC):
"""
Base class for axis access types in an einsum expression.
"""
@dataclass(frozen=True, repr=True, eq=True)
class FreeAxis(EinsumAxisAccess):
"""
Records the axis of an einsum argument over which contraction is not performed.
.. attribute:: output_index
Position of the corresponding index in the einsum's output.
"""
output_index: int
@dataclass(frozen=True, repr=True, eq=True)
class SummationAxis(EinsumAxisAccess):
"""
Records an index in an einsum expression over which reduction is performed.
Sometimes also referred to as an axis with a corresponding "dummy index" in
Ricci Calculus.
.. attribute:: index
An integer which is unique to a reduction index of an einsum.
"""
index: int
@dataclass(frozen=True, eq=True, repr=True)
class FusedEinsum:
"""
A fused einsum expression.
.. attribute:: shape
.. attribute:: ndim
.. automethod:: index_to_dim_length
.. automethod:: get_subscripts
"""
arg_shapes: Tuple[ShapeT, ...]
value_to_dtype: PMapT[str, np.dtype[Any]]
access_descriptors: Tuple[Tuple[EinsumAxisAccess, ...], ...]
use_matrix: Tuple[Tuple[FrozenSet[str], ...], ...]
index_names: PMapT[EinsumAxisAccess, str]
@property
def noutputs(self) -> int:
return len(self.use_matrix)
@cache
def index_to_dim_length(self) -> PMapT[EinsumAxisAccess, ShapeComponentT]:
index_to_dim = {}
for arg_shape, arg_axes in zip(self.arg_shapes,
self.access_descriptors):
for dim, index in zip(arg_shape, arg_axes):
if dim not in index_to_dim:
index_to_dim[index] = dim
else:
assert dim == index_to_dim[index]
return pmap(index_to_dim)
@cached_property
def shape(self) -> ShapeT:
free_index_to_dim = {idx: dim
for idx, dim in self.index_to_dim_length().items()
if isinstance(idx, FreeAxis)}
assert all(FreeAxis(idim) in free_index_to_dim
for idim in range(len(free_index_to_dim)))
return tuple(dim
for _, dim in sorted(free_index_to_dim.items(),
key=lambda x: x[0].output_index))
@property
def ndim(self) -> int:
return len(self.shape)
@cache
def get_subscripts(self) -> str:
"""
Returns the subscripts used in the building the *einsum* from it.
"""
return (",".join("".join(self.index_names[axis]
for axis in axes)
for axes in self.access_descriptors)
+ "->"
+ "".join(self.index_names[FreeAxis(i)]
for i in range(self.ndim))
)
def copy(self, **kwargs: Any) -> FusedEinsum:
from dataclasses import replace
return replace(self, **kwargs)
class Argument(abc.ABC):
"""
An abstract class denoting an argument to an einsum in
:class:`ContractionSchedule`. See :attr:`ContractionSchedule.arguments`.
"""
@dataclass(frozen=True, eq=True, repr=True)
class IntermediateResult(Argument):
"""
An :class:`Argument` representing an intermediate result available during
the current contraction.
"""
name: str
@dataclass(frozen=True, eq=True, repr=True)
class EinsumOperand(Argument):
"""
An :class:`Argument` representing the *ioperand*-th argument that was
passed to the parent einsum whose :class:`ContractionSchedule` is being
specified.
"""
ioperand: int
@dataclass(frozen=True, eq=True, repr=True)
class ContractionSchedule:
"""
Records the schedule in which contractions are to be performed in an einsum
as a series of einsums with the i-th einsum having subscript
``subscript[i]`` operating on ``arguments[i]`` and writing its result to
``result_names[i]``.
.. attribute:: result_names
Names of the result generated by each
.. attribute:: arguments
A :class:`tuple` containing :class:`tuple` of :class:`` for each
contraction in the schedule.
.. attribute:: nsteps
"""
subscripts: Tuple[str, ...]
result_names: Tuple[str, ...]
arguments: Tuple[Tuple[Argument, ...], ...]
def __post_init__(self) -> None:
assert len(self.subscripts) == len(self.result_names) == len(self.arguments)
@property
def nsteps(self) -> int:
"""
Returns the number of steps involved in scheduling the einsum.
"""
return len(self.subscripts)
def copy(self, **kwargs: Any) -> ContractionSchedule:
from dataclasses import replace
return replace(self, **kwargs)
def get_trivial_contraction_schedule(einsum: FusedEinsum) -> ContractionSchedule:
"""
Returns the :class:`ContractionSchedule` for *einsum* scheduled as a single
contraction.
"""
return ContractionSchedule((einsum.get_subscripts(),),
("_fe_out",),
(tuple(EinsumOperand(i)
for i, _ in enumerate(einsum.arg_shapes)),)
)
def get_opt_einsum_contraction_schedule(expr: FusedEinsum,
**opt_einsum_kwargs: Any,
) -> ContractionSchedule:
"""
Returns a :class:`ContractionSchedule` as computed by
:func:`opt_einsum.contract_path`.
:param opt_einsum_kwargs: kwargs to be passed to
:func:`opt_einsum.contract_path`.
.. note::
The following defaults are populated in *opt_einsum_kwargs*, if left
unspecified:
- ``optimize="optimal"``
- ``use_blas=False``
"""
import opt_einsum
from feinsum.make_einsum import array
long_dim_length = opt_einsum_kwargs.pop("long_dim_length", 1_000_000)
if "optimize" not in opt_einsum_kwargs:
opt_einsum_kwargs["optimize"] = "optimal"
if "use_blas" not in opt_einsum_kwargs:
opt_einsum_kwargs["use_blas"] = False
_, path = opt_einsum.contract_path(expr.get_subscripts(),
*[array([d if isinstance(op_shape,
INT_CLASSES)
else long_dim_length
for d in op_shape],
"float64")
for op_shape in expr.arg_shapes],
**opt_einsum_kwargs)
current_args: List[Argument] = [
EinsumOperand(i)
for i in range(path.input_subscripts.count(",") + 1)]
vng = UniqueNameGenerator()
subscripts: List[str] = []
result_names: List[str] = []
arguments: List[Tuple[Argument, ...]] = []
for contraction in path.contraction_list:
arg_indices, _, subscript, _, _ = contraction
arguments.append(tuple(current_args[idx]
for idx in arg_indices))
subscripts.append(subscript)
result_names.append(vng("_fe_tmp"))
current_args = ([arg
for idx, arg in enumerate(current_args)
if idx not in arg_indices]
+ [IntermediateResult(result_names[-1])])
assert len(current_args) == 1
result_names[-1] = vng("_fe_out")
return ContractionSchedule(tuple(subscripts),
tuple(result_names),
tuple(arguments))
| StarcoderdataPython |
8102282 | <reponame>iamjohnnym/jane
from flask import render_template, flash, redirect, session, url_for
from app import app, db, modules
from models import User
from app.modules.domain.view import mod as domain
from app.modules.database.view import mod as database
from app.modules.user.view import mod as user
from app.modules.system.main import System
app.register_blueprint(domain)
app.register_blueprint(database)
app.register_blueprint(user)
@app.route('/', methods = ['GET', 'POST'])
@app.route('/index', methods = ['GET', 'POST'])
def index():
s = System()
system = {'Hostname': s.getHostname(),
'IP Address(es)': s.getIps(),
'Operating System': s.getOs(),
'Kernel Version': s.getKernel(),
'zeusCp Version': s.getZcpVersion(),
}
graph = {'Disk Usage - Used | Total': {'used': s.getDiskUsed(),
'total': s.getDiskTotal(),
'percent': s.getDiskTotal() / s.getDiskUsed(),
},
'RAM Usage - Used | Total': {'used': s.getUsedPhyMemory(),
'total': s.getTotalPhyMemory(),
'percent': s.getPercentPhyMemory(),
},
},
services = {'Web Service': s.getWebService(),
'Database Service': s.getDatabaseService(),
}
view_more = {'Domains': {'number': s.getNumberOfDomains(),
'url': '/domains',
},
'Databases': {'number': s.getNumberOfDatabases(),
'url': '/databases',
},
'Users': {'number': s.getNumberOfUsers(),
'url': '/users',
},
}
return render_template("sb-admin/index.html",
title = 'overview',
system = system,
services = services,
graph = graph,
view_more = view_more,
)
@app.errorhandler(404)
def page_not_found(e):
return render_template("sb-admin/404.html"), 404
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.