id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
49148 | import json
from flask import Flask
from flask import render_template
import csv
import os
import pandas as pd
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
APP_STATIC = os.path.join(APP_ROOT, 'static')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', name='abc')
@app.route('/names')
def names():
df = pd.read_csv(os.path.join(APP_STATIC, 'metadata.csv'))
data = ['%s_%s' % ('BB', val) for val in df.sort_values(by='SAMPLEID')['SAMPLEID'].tolist()]
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/otu')
def otu():
data = []
with open(os.path.join(APP_STATIC, 'otu.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
data.append(row[1])
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/metadata/<sample>')
def metadata(sample):
sample = sample.split('_')
if len(sample) > 1:
sample = sample[1]
data = {}
with open(os.path.join(APP_STATIC, 'metadata.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
if row[0] == sample:
data['ETHNICITY'] = row[2]
data['GENDER'] = row[3]
data['AGE'] = row[4]
data['BBTYPE'] = row[6]
data['LOCATION'] = row[7]
data['SAMPLEID'] = row[0]
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/wfreq/<sample>')
def wfreq(sample):
import pdb;
pdb.set_trace()
sample = sample.split('_')
if len(sample) > 1:
sample = sample[1]
data = []
with open(os.path.join(APP_STATIC, 'metadata.csv')) as csvfile:
file_reader = csv.reader(csvfile, delimiter=',')
for row in file_reader:
if row[0] == sample:
data.append(row[5])
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/samples/<sample>')
def sample(sample):
data = {}
df = pd.read_csv(os.path.join(APP_STATIC, 'samples.csv'))
selected_sample = sample.upper()
try:
data['otu_ids'] = df.sort_values(by=selected_sample, ascending=False)['otu_id'].tolist()[:10]
data['sample_values'] = df.sort_values(by=selected_sample, ascending=False)[selected_sample].tolist()[:10]
except:
data['sample_values'] = []
data['otu_ids'] = []
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
1711076 | <reponame>all-of-us/raw-data-repository<gh_stars>10-100
"""remove primary key from biobank_order_identifier_history
Revision ID: 7d63fbc6d9ca
Revises: <PASSWORD>
Create Date: 2019-08-20 10:33:02.458709
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('ALTER TABLE biobank_order_identifier_history DROP PRIMARY KEY;')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('ALTER TABLE biobank_order_identifier_history ADD CONSTRAINT contacts_pk PRIMARY KEY (`system`, `value`, `version`);')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| StarcoderdataPython |
54788 | <reponame>sprotg/animal-avatar-generator<filename>src/animal_avatar/shapes/patterns.py
from animal_avatar.utils.colors import darken
PATTERNS = (
lambda color:
f'<path fill="{darken(color, -30)}" '
'd="M156 387.1c-57.8-12.3-96.7-42-96.7-107 0-9.4.8-18.6 2.4-27.6 '
'19.1 3.4 39.3 17 53.6 38.1a105 105 0 015 8.2 73.6 73.6 0 0021 '
'23.8c4.9 3.6 9.5 8.3 13.3 14 12.3 18.2 12.6 40 1.3 50.5z"/>',
lambda color:
f'<ellipse cx="323.8" cy="217.4" fill="{darken(color, -30)}" '
'rx="52.3" ry="77.6" transform="rotate(-32.5 323.8 217.4)"/>',
lambda color:
f'<path fill="{darken(color, 30)}" '
'd="M235 161.3c14.4 27.5 0 71-41.1 115.2-31.8 34.1-86.6 16.8-101-10.8s7.5-67.4 48.9-89 78.9-43 93.3-15.4z"/>',
)
| StarcoderdataPython |
3366906 | <gh_stars>0
import os
# Say it is TEST environment
TESTING = True
# Statement for enabling the development environment
DEBUG = True
# Define the application directory (like doing a pwd)
BASE_DIR = os.path.abspath(os.path.dirname("config.py"))
# Define the database - we are working with
# SQLite for this example
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + db_path
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + BASE_DIR + '/test.db'
DATABASE_CONNECT_OPTIONS = {}
# DB_NAME = os.path.basename(db_path)
# DB_NAME = 'test.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# # Enable CSRF WTF (by default, True)
# WTF_CSRF_ENABLED = True
# Enable protection agains *Cross-site Request Forgery (CSRF)*
# Disabled for testing
CSRF_ENABLED = False
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
# Upload csv folder
UPLOAD_FOLDER = f"{BASE_DIR}/app/static/uploads/"
# Inventory file name
INVENTORY_FILE = "inventory.csv"
# It will instruct Flask to print out the steps it goes through to
# locate templates on every render_template call.
# EXPLAIN_TEMPLATE_LOADING = True
| StarcoderdataPython |
123626 | <filename>Back-End/Python/Basics/Part -1 - Functional/01 - Variables-Memory/01var_memory.py
my_var = [1, 2, 3, 4]
my_num = 10
print(id(my_num))
print(hex(my_num))
# REFERENCE COUNTING
import ctypes
def ref_count(address):
return ctypes.c_long.from_address(address).value
print(ref_count(id(my_var)))
# >>> 1
import sys
print((sys.getrefcount(my_var)))
# >>> 2
#The sys.getrefcount() function takes my_var as an argument,
#this means it receives (and stores) a reference to my_var's memory address also - hence the count is off by 1. ' \
#'So use use from_address() instead. | StarcoderdataPython |
141923 | <reponame>IMBINGO95/FairMOT<filename>utils_BINGO/Imgs_Related.py
import cv2
import numpy as np
import time
import re
import shutil
import matplotlib.pyplot as plt
import os
import json
import codecs
import random
import time
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DEFAULT_FONT = cv2.FONT_HERSHEY_SIMPLEX
def calculate_biggest_box(img_dir):
# 同届这个文件夹下图片的长和宽和面积的大小
img_list = os.listdir(img_dir)
h = list()
w = list()
for item in img_list:
if item[-3:] != 'jpg':
continue
img = cv2.imread(os.path.join(img_dir,item))
shape = img.shape
h.append(shape[0])
w.append(shape[1])
h = np.array(h)
w = np.array(w)
area = h * w
plt.figure(figsize=(100,20))
plt.rcParams.update({'font.size': 60}) # set font size
for index,his in enumerate((w,h,area)):
plt.subplot(3,1,index+1)
num = 'num:{:->8}\n'.format(len(his))
max_score = 'max:{:.4f},'.format(np.max(his))
min_score = 'min:{:.4f}\n'.format(np.min(his))
mean_score = '(r)mean:{:.4f},'.format(np.mean(his))
median_score = '(g)median:{:.4f}'.format(np.median(his))
scale = np.histogram(his, bins=500, range=(0,int(np.max(his))+1))
plt.hist(his, bins=500, range=(0,int(np.max(his))+1))
'''draw mean and median line in the scores histogram'''
plt.axvline(x=np.mean(his), ymin=np.min(scale[0]), ymax=np.max(scale[0]), linewidth=5, color='r')
plt.axvline(x=np.median(his), ymin=np.min(scale[0]), ymax=np.max(scale[0]), linewidth=5, color='g')
plt.title(index)
plt.ylabel('count')
plt.xlabel(num + max_score + min_score + mean_score + median_score)
plt.grid(True)
plt.subplots_adjust(hspace=0.5) # set gap between subplot !
plt.tight_layout()
plt.savefig(os.path.join(img_dir, '../1.png'))
plt.close()
print('Figure in ', dir, ' saved !')
print('max h = {}, max w = {}'.format(h,w))
def calculate_biggest_box_json(dir):
json_file = os.path.join(dir,'Annotations.json')
with open(json_file,'r') as f:
data=json.load(f)
h = list()
w = list()
for item in data:
if item['Label'] == 0 :
continue
keypoints = item['keypoints']
# Crop target rectangle
ul_x = round(float(min(keypoints[3 * 5], keypoints[3 * 11])))
ul_y = round(float(min(keypoints[3 * 5 + 1], keypoints[3 * 6 + 1])))
br_x = round(float(max(keypoints[3 * 6], keypoints[3 * 12])))
br_y = round(float(max(keypoints[3 * 11 + 1], keypoints[3 * 12 + 1])))
i_width = br_x - ul_x
i_height = br_y - ul_y
h.append(i_height)
w.append(i_width)
h = np.array(h)
w = np.array(w)
area = h * w
plt.figure(figsize=(100, 20))
plt.rcParams.update({'font.size': 60}) # set font size
for index, his in enumerate((w, h, area)):
plt.subplot(3, 1, index + 1)
num = 'num:{:->8}\n'.format(len(his))
max_score = 'max:{:.4f},'.format(np.max(his))
min_score = 'min:{:.4f}\n'.format(np.min(his))
mean_score = '(r)mean:{:.4f},'.format(np.mean(his))
median_score = '(g)median:{:.4f}'.format(np.median(his))
scale = np.histogram(his, bins=500, range=(0, int(np.max(his)) + 1))
plt.hist(his, bins=500, range=(0, int(np.max(his)) + 1))
'''draw mean and median line in the scores histogram'''
plt.axvline(x=np.mean(his), ymin=np.min(scale[0]), ymax=np.max(scale[0]), linewidth=5, color='r')
plt.axvline(x=np.median(his), ymin=np.min(scale[0]), ymax=np.max(scale[0]), linewidth=5, color='g')
plt.title(index)
plt.ylabel('count')
plt.xlabel(num + max_score + min_score + mean_score + median_score)
plt.grid(True)
plt.subplots_adjust(hspace=0.5) # set gap between subplot !
plt.tight_layout()
plt.savefig(os.path.join(dir, '1.png'))
plt.close()
print('Figure in ', dir, ' saved !')
print('max h = {}, max w = {}'.format(h, w))
def move_file(dir):
# 将所有的数据分为训练集和测试集。
Jpeg_dir = os.path.join(dir,'JPEGImages')
Anno_dir = os.path.join(dir,'Annotations')
imgs = os.listdir(Jpeg_dir)
random.shuffle(imgs)
train_list = imgs[0:8000]
test_list = imgs[8000:]
Jpeg_train_dir = os.path.join(dir,'train','JPEGImages')
Anno_train_dir = os.path.join(dir,'train','Annotations')
for item in train_list:
name = item[:-4]
shutil.copyfile(os.path.join(Jpeg_dir,item),os.path.join(Jpeg_train_dir,item))
shutil.copyfile(os.path.join(Anno_dir,name+'.xml'),os.path.join(Anno_train_dir,name+'.xml'))
Jpeg_test_dir = os.path.join(dir,'test','JPEGImages')
Anno_test_dir = os.path.join(dir,'test','Annotations')
for item in test_list:
name = item[:-4]
shutil.copyfile(os.path.join(Jpeg_dir,item),os.path.join(Jpeg_test_dir,item))
shutil.copyfile(os.path.join(Anno_dir,name+'.xml'),os.path.join(Anno_test_dir,name+'.xml'))
def sort_img(dir,json_file):
'''
# 将过于小的图片剔除掉。
# 将朝向不对的图片剔除掉
'''
Jpeg_dir = dir
imgs_name = os.listdir(Jpeg_dir)
target_dir = os.path.join(dir,'..','After_sort_Negative_vis')
os.makedirs(target_dir,exist_ok=True)
with open(json_file,'r') as f:
data = json.load(f)
for item in data:
image_id = item['image_id']
img = cv2.imread(os.path.join(Jpeg_dir,image_id))
H,W = img.shape[0],img.shape[1]
if H < 130 or W < 60:
continue
# keypoints = item['keypoints']
# l_x = max(keypoints[6*3],keypoints[12*3])
# r_x = min(keypoints[5*3],keypoints[11*3])
# if l_x >= r_x:
# continue
# part_line = {}
# for n in range(17):
# # v=0 表示这个关键点没有标注(这种情况下x=y=v=0)
# # v=1 表示这个关键点标注了但是不可见(被遮挡了)
# # v=2 表示这个关键点标注了同时也可见
# if item['keypoints'][n * 3 + 2] == 0:
# continue
# elif item['keypoints'][n * 3 + 2] == 1:
# color = GREEN
# # elif item['keypoints'][n * 3 + 2] == 2:
# else:
# color = RED
#
# cor_x, cor_y = int(item['keypoints'][n * 3 + 0]), int(item['keypoints'][n * 3 + 1])
# part_line[n] = (cor_x, cor_y)
# # cv2.circle(img, (cor_x, cor_y), 3, p_color[n], -1)
# cv2.circle(img, (cor_x, cor_y), 3, color, -1)
# cv2.putText(img, text='{}'.format(n), org=(cor_x, cor_y), fontFace=DEFAULT_FONT, fontScale=0.5, color=BLACK,
# thickness=2)
# # cv2.putText(img, ''.join(str(human['idx'])), (int(bbox[0]), int((bbox[2] + 26))), DEFAULT_FONT, 1, BLACK, 2)
# cv2.imwrite(os.path.join(target_dir, image_id),img)
# print(os.path.join(target_dir, image_id))
shutil.copyfile(os.path.join(Jpeg_dir, image_id), os.path.join(target_dir, image_id))
def regular_test(root_path):
# bulid the regular format
re_videoName = re.compile(r'(c|C)h0\w*.mp4')
videoNames = {}
for videoName in os.listdir(root_path):
if re_videoName.match(videoName):
videoNames[videoName[3]] = videoName
print(videoNames)
def T_move_file(path1, path2, target):
os.makedirs(target,exist_ok=True)
imgs_name = os.listdir(path1)
for img in imgs_name:
if img.split('.')[-1] == 'jpg':
shutil.copy(os.path.join(path2,img),os.path.join(target,img))
if __name__ == '__main__':
root_dir = '/datanew/hwb/data/WG_Num/Negative_vis'
json_file = 'AlphaPose_WG_num.json'
sort_img(root_dir,os.path.join(root_dir,json_file))
# sort_img(img_dir)
| StarcoderdataPython |
58585 |
# Extrahiert die Transaktionen aus dem Mempool
def getTxnsFromPool(MasterObj):
rwo = list()
for i in MasterObj.mempool: rwo.append(i); MasterObj.mempool.remove(i); print('Transaction {} selected'.format(i.getTxHash()))
return rwo
# Gibt die Höhe aller Gebühren welche verwendet werden an
def getTransactionsTotalFee(CoinObj, *Transactions):
total = 0
for txi in Transactions:
for feei in txi.getFees():
if feei.coin() == CoinObj: total += feei.get()
return total
def CryptoNightMinerFnc(q, block_root_hash, diff):
current_nonce = 0
base_diff = 2**256-1
import time, datetime, struct, binascii, pycryptonight, time
started = time.time()
hash_count = 0
for n in range(base_diff):
hashy = pycryptonight.cn_slow_hash( bytes( block_root_hash + str(current_nonce).encode() ), 4, 0, 1)
hex_hash = binascii.hexlify(hashy)
hash_count += 1
if base_diff / int(hex_hash, 16) >= diff:
elapsed = time.time() - started
hr = int(int(hash_count) / int(elapsed))
q.put({ 'hash' : hex_hash, 'nonce' : current_nonce, 'hrate' : hr, 'trate' : hash_count })
return
else: current_nonce += 1
## CryptonightMiner
class CryptonightMiner:
def __init__(self,ChainObject,UseCPPBinary=False):
self.chain = ChainObject
self.shutdown = False
self.miner_address = None
self.running = False
self.hrate = 0
# Der Miner Thread wird deklariert
import threading
def miner_thread():
from apollon.utxo import CoinbaseInUtxo, LagacyOutUtxo, createFeeInputUtxo
from apollon.transaction import CoinbaseTransaction
from apollon.block import BlockConstruct
# Es wird Signalisiert das der Thread läuft
self.running = True
# Die Aktuelle Miner Adresse wird abgespeichert
curr_miner = self.miner_address
# Es wird eine Schleife ausgeführt, welche dafür sorgt das permant ein neuer Block entsteht
while not self.shutdown:
# Es wird eine liste offener Transaktionen aus dem Memorypool extrahiert
cur_transactions = getTxnsFromPool(self.chain)
# Der letzte Block wird extrahiert
last_block_mdata = self.chain.getLastBlockMetaData(True)
next_block_height = last_block_mdata['block_height'] + 1
last_block_hash = last_block_mdata['block_hash']
# Die Ausgangs UTXOS werden erstellt #TODO
coinbase_utxo_pairs = list()
for i in self.chain.getChainCoins():
# Es wird ermittelt ob es einen Reward gibt
has_reward = i.hasRewardForBlock(next_block_height)
# Die höhe der Gesamten Transationsgebühren wird ermittelt
transaction_total_fees = getTransactionsTotalFee(i, *cur_transactions)
# Es wird geprüt ob es einen Reward und oder die Gebühren der Transaktionen gibt # TODO
if has_reward == True and transaction_total_fees != 0:
# Es wird ein Eingangs UTXO für den Reward erstellt
reward_in_utxo = i.createNewRewardInputUtxo(next_block_height)
# Es wird ein Eingangs UTXO für die Gebühren erstellt
fee_in_utxo = createFeeInputUtxo(i, cur_transactions)
# Es wird geprüft ob ein Teil der Transaktionsgebühren Verbrannt werden sollen
if i.minerForceBurnFee() == True:
# Es wird ermittelt wieviel verbrannt werden soll
burn_value = i.calcMinerBurningAmountValue(fee_in_utxo.getCoinValue(True))
reciver_value_total = reward_in_utxo.getCoinValue(True) + (fee_in_utxo.getCoinValue(True) - burn_value)
# Es werden zwei Ausgangs UTXO's erzeugt
miner_outxo = LagacyOutUtxo(curr_miner, reciver_value_total, i, *[reward_in_utxo, fee_in_utxo])
burn_outxo = LagacyOutUtxo(curr_miner, burn_value, i, *[reward_in_utxo, fee_in_utxo])
# Die UTXOS werden der Liste hinzugefügt
coinbase_utxo_pairs.append(reward_in_utxo)
coinbase_utxo_pairs.append(fee_in_utxo)
coinbase_utxo_pairs.append(miner_outxo)
coinbase_utxo_pairs.append(burn_outxo)
else:
# Es wird ein Ausgangs UTXO erzeugt
miner_outxo = LagacyOutUtxo(curr_miner, reward_in_utxo.getCoinValue(True) + fee_in_utxo.getCoinValue(True), i, *[reward_in_utxo, fee_in_utxo])
# Die UTXOS werden der Liste hinzugefügt
coinbase_utxo_pairs.append(reward_in_utxo)
coinbase_utxo_pairs.append(fee_in_utxo)
coinbase_utxo_pairs.append(miner_outxo)
# Der Miner erhält nur einen Reward für das Finden des Blockes
elif has_reward == True and transaction_total_fees == 0:
# Es wird ein Eingangs UTXO für den Reward erstellt
reward_in_utxo = i.createNewRewardInputUtxo(next_block_height)
# Es wird ein Ausgangs UTXO für die Belohnung erstellt
reward_out_utxo = LagacyOutUtxo(curr_miner, reward_in_utxo.getCoinValue(True), i, reward_in_utxo)
# Die UTXOs werden der Liste hinzugefügt
coinbase_utxo_pairs.append(reward_in_utxo)
coinbase_utxo_pairs.append(reward_out_utxo)
# Der Miner erhält keine Block belohnung sondern nur die Gebühren der Transaktionen
elif has_reward == False and transaction_total_fees != 0:
# Es wird ein Eingangs UTXO für die Gebühren erstellt
reward_in_utxo = None
# Es wird eine Coinbase Transaktion aus allen UTXOS erstellt
coinbase = CoinbaseTransaction(*coinbase_utxo_pairs, BlockNo=next_block_height)
# Es wird eine Liste aus allen Transaktionen erzeuegt
totalls = list(); totalls.append(coinbase); totalls = totalls + cur_transactions
# Die Schwierigkeit wird ermittelt
cdiff = self.chain.getBlockDiff(next_block_height)
# Es wird ein Block Construtor erzeugt
from apollon.atime import ATimeString
try: new_block = BlockConstruct(last_block_hash, next_block_height, curr_miner, ATimeString.now(), cdiff, *totalls)
except Exception as E: raise Exception(E)
# Es wird geprüft ob es sich um ein Valides Objekt handelt
if new_block.isValidateObject() == True and new_block.validateBlockTransactions() == True:
# Der Mining vorgang wird gestartet
try: nwoblck = self.MineBlock(new_block, cdiff); print('New Blocke Mined: {} @ {} :: {}'.format(nwoblck.getHeight(), nwoblck.getBlockHash(), nwoblck.getBlockTimestampe()))
except Exception as E: raise Exception(E)
# Der Block wird der Kette angehängt
try: self.chain.addBlock(nwoblck)
except Exception as E: raise Exception(E)
else: print('Invalid New Block, abort')
# Es wird dem Objekt signalisiert dass der Thread beendet wurde
self.running = False
self.miner_thread = threading.Thread(target=miner_thread)
# Startet das Mining
def Start(self, MinerAddress):
# Es wird geprüft ob eine gültige Adresse übergeben wurde
from apollon.apollon_address import LagacyAddress, PhantomAddress
assert isinstance(MinerAddress, LagacyAddress) or isinstance(MinerAddress, PhantomAddress)
# Es wird geprüft ob der Miner bereits ausgeführt wird
if self.miner_address != None or self.running != False: raise Exception('Miner alrady running')
# Es wird versucht den Miner zu Starten
print('Starting Miner')
self.miner_address = MinerAddress
self.miner_thread.start()
# Es wird geprüft ob der Miner gestartet wurde
import time
for i in range(2*10):
if self.running == True and self.miner_address is not None: print('Miner started'); return 0 # Der Miner wurde gestartet
time.sleep(0.01)
# Der Miner konnte nicht gestartet werden
print('Miner start, aborted')
return 1
# Gibt die Hashrate aus
def getHashRate(self): return self.hrate
# Gibt den Mining Status an
def Status(self):
if self.running == True: return 0
elif self.running == False and self.miner_address is None: return 2
else: return 1
# Gibt den Aktuell zu Minenden Block aus
def getUnminedBlock(self):
return
# Mint den eigentlichen Block
def MineBlock(self, constructed_block, diff):
# Es wird geprüft ob ein gülter Block Constructor übergeben wurde
from apollon.block import BlockConstruct, MinedBlock
assert isinstance(constructed_block, BlockConstruct)
# Es wird geprüft ob der Block laut der Blockchain Regeln Valide ist
assert constructed_block.isValidateObject() == True
# Es wird geprüft ob alle Transaktionen zulässig sind
assert constructed_block.validateBlockTransactions() == True
# Der Mining Prozess wird erstellt
import multiprocessing as mp
ctx = mp.get_context('spawn')
q = ctx.Queue()
# Es wird auf das Ergebniss vom Miner gewartet
p = ctx.Process(target=CryptoNightMinerFnc, args=(q, constructed_block.getRootHash(True), diff))
p.start()
resolv = q.get()
p.terminate()
# Es wird ein neuer Geminter Block erstellt
mined_block = MinedBlock.fromConstructWithNonce(constructed_block, resolv['nonce'])
if mined_block.getRootHash(True) != constructed_block.getRootHash(True): raise Exception('Unkown error')
if mined_block.getBlockHash(True) != resolv['hash']: raise Exception('Not same hash')
# Die Hashrate wird gespeichert
self.hrate = resolv['hrate']
# Gibt den gemiten Block zurück
return mined_block | StarcoderdataPython |
13119 | <reponame>gruzzlymug/ddg-2018<gh_stars>1-10
import os
import psutil
import subprocess
import threading
import sys
from threading import Timer
import select
from player_abstract import AbstractPlayer
class PlainPlayer(AbstractPlayer):
def __init__(self, socket_file, working_dir, local_dir=None,
player_key="", player_mem_limit=256, player_cpu=20):
super().__init__(socket_file, working_dir, local_dir, None, None, player_key, player_mem_limit, player_cpu)
self.paused = False
self.streaming = False
self.process = None
def stream_logs(self, stdout=True, stderr=True, line_action=lambda line: print(line.decode())):
assert not self.streaming
self.streaming = True
if stdout:
threading.Thread(target=self._stream_logs, args=(self.process.stdout, line_action)).start()
if stderr:
threading.Thread(target=self._stream_logs, args=(self.process.stderr, line_action)).start()
def _stream_logs(self, stream, line_action):
for line in stream:
if self.process is None:
return
line_action(line)
def start(self):
if sys.platform == 'win32':
args = [os.path.join(self.working_dir, 'run.bat')]
# things break otherwise
env = dict(os.environ)
else:
args = ['sh', os.path.join(self.working_dir, 'run.sh')]
# Path needs to be passed through, otherwise some compilers (e.g gcc) can get confused and not find things
env = {'PATH': os.environ['PATH']}
env['PLAYER_KEY'] = str(self.player_key)
env['RUST_BACKTRACE'] = '1'
env['BC_PLATFORM'] = self._detect_platform()
if isinstance(self.socket_file, tuple):
# tcp port
env['TCP_PORT'] = str(self.socket_file[1])
else:
env['SOCKET_FILE'] = self.socket_file
cwd = self.working_dir
self.process = psutil.Popen(args, env=env, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
def pause(self):
# pausing too slow on windows
if sys.platform == 'win32': return
if not self.paused:
self.paused = True
suspend(self.process)
def unpause(self, timeout=None):
# pausing too slow on windows
if sys.platform == 'win32': return
if self.paused:
resume(self.process)
self.paused = False
def destroy(self):
if self.process is not None:
tmp = self.process
# This will signal to the log thread that everything is going to be shut down
# and ignore any future messages. In particular bash may log something like 'Terminated: <PID>'
# which would pollute the output of this script.
self.process = None
reap(tmp)
self.process = None
super().destroy()
def reap(process, timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
def on_terminate(proc):
pass
# print("process {} terminated with exit code {}".format(proc.pid, proc.returncode))
try:
procs = process.children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
process.kill()
except:
print("Killing failed; assuming process exited early.")
def suspend(process):
procs = process.children(recursive=False)
# to enterprising players reading this code:
# yes, it is possible to escape the pausing using e.g. `nohup` when running without docker.
# however, that won't work while running inside docker. Sorry.
for p in procs:
try:
p.suspend()
except:
pass
try:
p.suspend()
except:
pass
def resume(process):
procs = process.children(recursive=True)
for p in procs:
try:
p.resume()
except:
pass
try:
p.resume()
except:
pass
| StarcoderdataPython |
3346874 | """
This script is used to test if mypy understands that the Nullable type is always False-y.
"""
from __future__ import annotations
from dataclasses import dataclass
from dataclasses_jsonschema.type_defs import Nullable
@dataclass
class Example:
name: Nullable[str | None] = None
example = Example("sienna")
assert example.name # If this assert passes we know `name` is a string (because it isn't None or Nullable)
name_upper = example.name.upper()
| StarcoderdataPython |
3251476 | <reponame>safelix/dino
import os
import pathlib
import torch
import torchvision
import torchvision.datasets as datasets
cwd = pathlib.Path().resolve()
path_to_data = cwd.joinpath('../../data')
path_to_data.mkdir(exist_ok=True)
mnist_trainset = datasets.MNIST(root=path_to_data,
download=True)
| StarcoderdataPython |
1623777 | <gh_stars>0
from sklearn.decomposition import PCA
import torch
import numpy as np
import utils
import torch.sparse
import pdb
'''
Classes for two linear models.
Linear model: PCA, used to compared with trained supervised models learned from kahip partitions.
Linear model: random projections, used to compared with trained supervised models learned using kahip partitions.
'''
class PCASolver():
#Guaranteed to only need top component
def __init__(self, dataset, opt):
if isinstance(dataset, torch.Tensor):
dataset = torch.tensor(dataset).cpu().numpy()
#scale features
dataset_t = np.transpose(dataset)
self.ds_mean = dataset_t.mean(axis=-1, keepdims=True)
dataset_t = dataset_t - self.ds_mean
self.ds_std = dataset_t.std(axis=-1, keepdims=True).clip(min=0.1)
dataset_t = dataset_t / self.ds_std
dataset = np.transpose(dataset_t)
self.pca = PCA(n_components=1)
#shape: n_sample x 1
self.pca.fit(dataset)
out = self.pca.transform(dataset)
self.median = np.median(out)
'''
Input: k here to satisfy uniform interface with kmeans solver.
query: 2D vec
Output:
-1 D np array
'''
def predict(self, query):
if isinstance(query, torch.Tensor):
query = torch.tensor(query).cpu().numpy()
cls = np.zeros(len(query))
query = np.transpose((np.transpose(query) - self.ds_mean) / self.ds_std)
out = self.pca.transform(query).reshape(-1)
cls[out >= self.median] = 1
return cls
from scipy.stats import ortho_group
'''
Another linear method, random projection.
'''
class RPSolver():
def __init__(self, dataset, opt):
if isinstance(dataset, torch.Tensor):
dataset = torch.tensor(dataset).cpu().numpy()
self.data_mean = dataset.mean(axis=0)
#orthogonal projection
self.orth_mx = ortho_group.rvs(dataset.shape[-1])
#self.rand_vec = np.random.randn(dataset.shape[-1]) #np.random.multivariate_normal(mean, cov)
'''
Input: k here to satisfy uniform interface with kmeans solver.
query: 2D vec
Output:
-1 D np array
'''
def predict(self, query):
if isinstance(query, torch.Tensor):
query = torch.tensor(query).cpu().numpy()
query = query - self.data_mean
out = np.matmul(query, self.orth_mx).sum(axis=-1)
cls = np.zeros(len(query))
cls[out > 0] = 1
return cls
'''
Search tree solver.
'''
class STSolver():
'''
Input:
-dataset: dataset for current node, ie subset of full dataset.
-knn_graph: knn graph
-ranks: nearest neighbor ranks matrix (as original distances), indices are as original dataset. ranks
for index i includes the i itself.
-idx: indices of dataset used in cur iteration, indices are wrt original dataset.
'''
def __init__(self, dataset, ranks, idx, opt):
if isinstance(dataset, np.ndarray):
dataset = torch.from_numpy(dataset).to(utils.device)
idx = torch.from_numpy(idx).to(utils.device)
#augment last component with 1's
dataset = torch.cat((dataset, torch.zeros(len(dataset), 1, device=utils.device)), dim=-1)
if len(dataset) != len(ranks):
long_vec = -torch.ones(len(ranks), device=utils.device)
src_vec = torch.cuda.FloatTensor(range(len(idx)))
long_vec.scatter_(dim=0, index=idx, src=src_vec)
long_vec = long_vec.cpu().numpy()
#sparse_idx = torch.LongTensor(len(idx ) )
sparse_idx_l = []
for i, t in enumerate(idx):
cur_vec = []
for j in ranks[t]:
if long_vec[j] != -1:
cur_vec.append(long_vec[j])
idx_i = torch.cat((torch.ones(1, len(cur_vec), dtype=torch.int64, device=utils.device)*i, torch.cuda.LongTensor(cur_vec).unsqueeze(0)), dim=0)
sparse_idx_l.append(idx_i)
#2 x number of non-zero entries
sparse_idx = torch.cat(sparse_idx_l, dim=-1)
else:
#pdb.set_trace()
range_vec = torch.cuda.LongTensor(range(len(dataset))).unsqueeze(-1).repeat(1, ranks.size(-1))
ranks = ranks.to(utils.device)
sparse_idx = torch.cat((range_vec.view(1, -1), ranks.view(1, -1)), dim=0)
sparse_idx1 = torch.clone(sparse_idx)
sparse_idx1[0] = sparse_idx[1]
sparse_idx1[1] = sparse_idx[0]
sparse_idx = torch.cat((sparse_idx, sparse_idx1), dim=-1)
sparse_val = torch.ones(sparse_idx.size(-1), device=utils.device)
sparse_vec = torch.sparse.FloatTensor(sparse_idx, sparse_val, torch.Size([len(dataset), len(dataset)]))
sparse_vec = sparse_vec.coalesce()
sparse_vec = torch.sparse.FloatTensor(sparse_vec._indices(), torch.ones_like(sparse_vec._values()), torch.Size([len(dataset), len(dataset)]) )
lamb = sparse_vec._values().sum().item()/len(dataset)**2 #.001
print('lamb {}'.format(lamb))
ones = torch.ones(1, dataset.size(0), device=utils.device)
W = torch.mm(torch.sparse.mm(sparse_vec.t(), dataset).t(), dataset) - lamb*torch.mm(torch.mm(dataset.t(), ones.t()), torch.mm(ones, dataset))
eval_, evec_ = torch.eig(W, eigenvectors=True)
eval_ = eval_[:, 0]
evec_ = evec_.t()
#pdb.set_trace()
max_idx = torch.argmax(eval_)
self.top_evec = evec_[max_idx]
self.top_evec = self.top_evec.cpu().numpy()
'''
Input: k here to satisfy uniform interface with kmeans solver.
query: 2D vec
Output:
-1 D np array
'''
def predict(self, query):
query = np.concatenate((query, np.ones((len(query), 1))), axis=-1)
projected = (self.top_evec * query).sum(-1)
cls = np.zeros(len(query))
cls[projected > 0] = 1
#print('sum! {}'.format( cls.sum()))
#pdb.set_trace()
return cls
| StarcoderdataPython |
3217398 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 16:11:52 2018
Functions for LARFP_lumen_segmentation_CGV.py
@author: clauvasq
"""
# import packages
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
io.use_plugin('tifffile')
from skimage.filters import threshold_otsu
from skimage import filters
from skimage import morphology
from scipy import ndimage
import cv2
pw_desktop = '/Users/clauvasq/Desktop/'
def med_filter(stack, med_sel=7):
"""
Applies median filter to image stack by z-slice with disk selem of 7, by
defaultself.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
med_sel : int, default = 7
size of selem to use (disk selem!)
Returns
-------
med_stack : ndarray
median filtered stack
"""
med_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
med_slice = filters.median(z_slice, selem=morphology.disk(med_sel))
med_stack[i, :, :] = med_slice
return med_stack
def otsu_morph_seg(stack, hole_size=2048, opt=1):
"""
Function segments z-stack on a per-slice basis.
This is to try to remove errors of lumen segmentation caused by actin loops in
in the lumen
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X], otsu-thresholded
hole_size : int, default = 2028
for filling in holes in otsu segmentation
opt : 1, 2, 3
option 1: otsu segment each slice, then remove small holes
option 2: morph. opening (dilation then erosion) w/ selem=disk(9), then
remove small holes
option 3: for squiggly lumens, closing, remove small holes, erosion, and
then opening
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
if np.count_nonzero(z_slice) > 0:
if opt == 1:
otsu_slice = threshold_otsu(z_slice)
bin_slice = z_slice > otsu_slice
bin1 = np.array(morphology.remove_small_holes(bin_slice, hole_size),
dtype=np.uint8)
elif opt == 2:
bin_slice = morphology.binary_closing(z_slice, selem=morphology.disk(9))
bin1 = np.array(morphology.remove_small_holes(bin_slice, hole_size),
dtype=np.uint8)
else:
z1 = morphology.binary_closing(z_slice)
z2 = morphology.remove_small_holes(z1, hole_size)
z3 = morphology.binary_erosion(z2, selem=morphology.disk(5))
bin1 = morphology.binary_opening(z3, selem=morphology.disk(2))
bin_stack[i, :, :] = bin1
else:
bin_stack[i, :, :] = np.zeros(np.shape(stack[i, :, :]))
return bin_stack
def two_cell_seg(stack, hole_size=128, disk_size=3, obj_size=100):
"""
Function segments z-stack w/ 2 cells and bright actin enrichment/lumen
Otsu thresholds max projection of stack, then opening on a per slice basis
and remove objects.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
hole_size : int, default=128
disk_size : int, default = 3
size of radius of selem for morphological opening
obj_size : int, default = 100
size of minimum sized object, anything smaller will be removed.
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
otsu = threshold_otsu(np.max(stack, 0))
otsu_stack = np.array(stack > otsu, dtype=np.uint8)
for i in range(len(stack)):
im_otsu = otsu_stack[i, :, :]
morph0 = morphology.remove_small_holes(im_otsu, hole_size)
morph1 = morphology.closing(morph0, selem=morphology.disk(disk_size))
morph2 = morphology.remove_small_objects(morph1, min_size=obj_size)
bin_stack[i, :, :] = morph2
bin_stack = np.array(bin_stack, dtype=np.uint8)
return bin_stack
def dim_signal_seg(stack, med_sel=5, otsu_factor=1.5, hole_size=1024, obj_size=500):
"""
Function segments z-stack w/ of cells with dim signal.
Applies median filter, then does otsu threshold, and threshold 2*otsu value
on a per slice basis. Then does morphological operations to fill in lumen
and remove other objects.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
med_sel : int, default=5
size of selem to use (disk selem!)
otsu_factor : float, default=1.5
multiplier for otsu value to threshold by
hole_size : int, default=1024
size of holes to remove for morphological processes
obj_size : int, default = 500
size of minimum sized object, anything smaller will be removed.
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
otsu = threshold_otsu(np.max(stack, 0))
otsu_stack = np.array(stack > otsu_factor*otsu, dtype=np.uint8)
for i in range(len(stack)):
z_slice = otsu_stack[i, :, :]
# med_slice = filters.median(z_slice, selem=morphology.disk(med_sel))
# otsu = threshold_otsu(med_slice)
# otsu_slice = med_slice > otsu_factor*otsu
morph1 = morphology.remove_small_holes(z_slice, hole_size)
morph2 = morphology.remove_small_objects(morph1, min_size=obj_size)
bin_stack[i, :, :] = morph2
bin_stack = np.array(bin_stack, dtype=np.uint8)
return bin_stack
def eight_bit_seg(stack, hole_size=2048):
"""
Function segments lumen of **8-bit** z-stack on a per-slice basis.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X], otsu-thresholded
hole_size : int, default = 2048
for filling in holes in otsu segmentation
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
z1 = morphology.binary_dilation(z_slice)
z2 = morphology.remove_small_holes(z1, hole_size)
z3 = morphology.binary_erosion(z2, selem=morphology.disk(4))
bin_stack[i, :, :] = z3
return bin_stack
def eight_bit_cyst_seg(stack, disk_size=7):
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
z1 = z_slice > z_slice.mean()
z2 = morphology.binary_closing(z1, selem=morphology.disk(3))
z3 = morphology.remove_small_holes(z2, min_size=8192)
z4 = morphology.binary_erosion(z3, selem=morphology.disk(disk_size))
z5 = morphology.remove_small_objects(z4, 2048)
bin_stack[i, :, :] = z5
return bin_stack
def lumen_post(stack, disk_size=5):
"""
binary erosion on image stack of indicated disk size
use after contour finding on lumen segmentation, occasionally
"""
disk_sel = morphology.disk(disk_size)
post_stack = np.copy(stack)
for i in range(len(stack)):
post_stack[i, :, :] = morphology.binary_erosion(stack[i, :, :],
selem=disk_sel)
return post_stack
def cyst_edge(stack, low_pct=0.01, hi_pct=0.99, plot=False):
"""
Determines edges of cyst in z-slices
Does this by projecting stack in Y (or X). Then, takes mean along X (or Y),
giving line projection of intensity in Z from both X and Y directions. Then,
gets cumuluative sum, and uses low_pct and hi_pct as lower and upper bounds,
respectively, for z-slices. Uses minimum from Y and X for lower, and maximum
of Y and X for upper.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
low_pct : 0-1
lower bound of area under intensity curve
hi_pct : 0-1
upper bound of area under intensity curve
plot : default = False
if True, then plots out projections, mean line projection, and cumsum
Returns
-------
z_lower, z_upper: int, int
bounds, inclusive of z-slices that include cyst
"""
# project image along Y and X, respectively
im_projY = stack.sum(1)
im_projX = stack.sum(2)
# take mean along X and Y, respecitively
lineProjY = np.mean(im_projY, 1)
lineProjX = np.mean(im_projX, 1)
# determine edges of peak = find area under curve, and find where each
# reach certain pct of total areas
lineProjY_csum = np.cumsum(lineProjY)
lineProjX_csum = np.cumsum(lineProjX)
Y_csum = lineProjY_csum[-1]
X_csum = lineProjX_csum[-1]
z_fromY = [np.where(lineProjY_csum > low_pct*Y_csum)[0][0],
np.where(lineProjY_csum > hi_pct*Y_csum)[0][0]]
z_fromX = [np.where(lineProjX_csum > low_pct*X_csum)[0][0],
np.where(lineProjX_csum > hi_pct*X_csum)[0][0]]
# find min of z from Y and X, and find max z from Y and X
z_lower = min(z_fromY[0], z_fromX[0])
z_upper = min(z_fromY[1], z_fromX[1])
# plotting
if plot == True:
fig, ax = plt.subplots(nrows=2, ncols=3)
ax[0, 0].imshow(im_projY)
ax[1, 0].imshow(im_projX)
ax[0, 1].plot(lineProjY)
ax[1, 1].plot(lineProjX)
ax[0, 2].plot(lineProjY_csum)
ax[1, 2].plot(lineProjX_csum)
# take mean along X, to determine z-coordinates
# make
return z_lower, z_upper
def bgsub_zyx_morph(stack, sel_e=7, hole_size=2048, obj_size=512, sel_e2=5, opt=2):
"""
Segmentation of whole cyst via background subtraction in z direction,
y direction, and x direction.
(1) median filters
(2) background subtractions
(3) morphological operations to clean up
(4) medain filter again to smooth segmentation
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
sel_e : int, default = 7
size of selem in disk for first morphological erosion
hole_size : int, default = 2048
size of holes to remove
obj_size : int, default = 512
size of objects to remove
sel_e2 : int, defualt = 5
size of selem in disk for second morphological erosion
opt : 1 or 2, defualt = 2
different order of morphological operations, option 2 seems to work
better...
Returns
-------
med_stack : ndarray
ndarry of cyst, with segmented cyst
"""
# median filter
med_stack = med_filter(stack, med_sel=3)
Z, Y, X = stack.shape
z_fgm = np.copy(stack)
y_fgm = np.copy(stack)
x_fgm = np.copy(stack)
# initialize bacground subtraction
# go through each z_slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for z in range(Z):
frame = med_stack[z, :, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
z_fgm[z, :, :] = fgmask_2
# go through each y-slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for y in range(Y):
frame = med_stack[:, y, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
y_fgm[:, y, :] = fgmask_2
# go through each x-slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for x in range(X):
frame = med_stack[:, :, x]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
x_fgm[:, :, x] = fgmask_2
# sum up all pixels for differnet background subtractions
all_fgm = z_fgm + y_fgm + x_fgm
# otsu threshold
ot_fgm = all_fgm.copy()
ot_th = threshold_otsu(all_fgm)
ot_fgm = np.array(all_fgm > ot_th, dtype=np.uint8)
# morphological operations
morph_fgm = ot_fgm.copy()
if opt == 1:
for z in range(Z):
z_slice = ot_fgm[z, :, :]
z_erode = morphology.binary_erosion(z_slice, selem=morphology.disk(sel_e))
z_fill = morphology.remove_small_holes(z_erode, hole_size)
z_rmobj = morphology.remove_small_objects(z_fill, min_size=obj_size)
z_erode2 = morphology.binary_erosion(z_rmobj, selem=morphology.disk(sel_e2))
morph_fgm[z, :, :] = z_erode2
if opt == 2:
for z in range(Z):
z_slice = ot_fgm[z, :, :]
z_fill = morphology.remove_small_holes(z_slice, hole_size)
z_erode = morphology.binary_erosion(z_fill, selem=morphology.disk(sel_e))
z_rmobj = morphology.remove_small_objects(z_erode, min_size=obj_size)
morph_fgm[z, :, :] = z_rmobj
morph_fgm = np.array(morph_fgm, dtype=np.uint8)
med_stack = med_filter(morph_fgm, med_sel=7)
return med_stack
# Andrew - this is the function used for hESC colony segmenation :D
def bgsub_zyx_otsu(stack):
"""
Segmentation of whole colony via background subtraction in z direction,
y direction, and x direction.
(1) median filters
(2) background subtractions
(3) otsu filter
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
sel_e : int, default = 7
size of selem in disk for first morphological erosion
Returns
-------
ot_fgm : ndarray
ndarry of preliminary segmenation of colony
"""
# median filter
med_stack = med_filter(stack, med_sel=3)
Z, Y, X = stack.shape
z_fgm = np.copy(stack)
y_fgm = np.copy(stack)
x_fgm = np.copy(stack)
# initialize bacground subtraction
# go through each z_slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for z in range(Z):
frame = med_stack[z, :, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
z_fgm[z, :, :] = fgmask_2
# go through each y-slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for y in range(Y):
frame = med_stack[:, y, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
y_fgm[:, y, :] = fgmask_2
# go through each x-slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for x in range(X):
frame = med_stack[:, :, x]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
x_fgm[:, :, x] = fgmask_2
# sum up all pixels for differnet background subtractions
all_fgm = z_fgm + y_fgm + x_fgm
# otsu threshold
ot_fgm = all_fgm.copy()
ot_th = threshold_otsu(all_fgm)
ot_fgm = np.array(all_fgm > ot_th, dtype=np.uint8)
return ot_fgm
def cyst_edge(stack, low_pct=0.01, hi_pct=0.99, plot=False):
"""
Determines edges of cyst in z-slices
Does this by projecting stack in Y (or X). Then, takes mean along X (or Y),
giving line projection of intensity in Z from both X and Y directions. Then,
gets cumuluative sum, and uses low_pct and hi_pct as lower and upper bounds,
respectively, for z-slices. Uses minimum from Y and X for lower, and maximum
of Y and X for upper.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
low_pct : 0-1
lower bound of area under intensity curve
hi_pct : 0-1
upper bound of area under intensity curve
plot : default = False
if True, then plots out projections, mean line projection, and cumsum
Returns
-------
z_lower, z_upper: int, int
bounds, inclusive of z-slices that include cyst
"""
# project image along Y and X, respectively
im_projY = stack.sum(1)
im_projX = stack.sum(2)
# take mean along X and Y, respecitively
lineProjY = np.mean(im_projY, 1)
lineProjX = np.mean(im_projX, 1)
# determine edges of peak = find area under curve, and find where each
# reach certain pct of total areas
lineProjY_csum = np.cumsum(lineProjY)
lineProjX_csum = np.cumsum(lineProjX)
Y_csum = lineProjY_csum[-1]
X_csum = lineProjX_csum[-1]
z_fromY = [np.where(lineProjY_csum > low_pct*Y_csum)[0][0],
np.where(lineProjY_csum > hi_pct*Y_csum)[0][0]]
z_fromX = [np.where(lineProjX_csum > low_pct*X_csum)[0][0],
np.where(lineProjX_csum > hi_pct*X_csum)[0][0]]
# find min of z from Y and X, and find max z from Y and X
z_lower = min(z_fromY[0], z_fromX[0])
z_upper = min(z_fromY[1], z_fromX[1])
# plotting
if plot == True:
fig, ax = plt.subplots(nrows=2, ncols=3)
ax[0, 0].imshow(im_projY)
ax[1, 0].imshow(im_projX)
ax[0, 1].plot(lineProjY)
ax[1, 1].plot(lineProjX)
ax[0, 2].plot(lineProjY_csum)
ax[1, 2].plot(lineProjX_csum)
# take mean along X, to determine z-coordinates
# make
return z_lower, z_upper
def cyst_post(stack, disk_size=5, disk_size_y=1, seq_erosions=True):
"""
post contour finding of cyst segmentation. Goes through stack in z, erodes,
then smoothes. Then goes through stack y, erodes, then smooths. Then, based
on depth of z, applies larger erosions (larger z-slices have larger selems)
Parameters
----------
stack : ndarray
ndarray of cyst segmenation with dimensions [Z, Y, X]
disk_size : int, default = 5
default size of selem for z-based erosion & smoothing
disk_size_y : int, default = 1
default size of selem for y-based erosion & smoothing
Returns
-------
stack3 : ndarray
ndarray of cyst segmenation
"""
# searches through stack in z and applies erosion, and then smooths w/
# median filter
stack1 = np.copy(stack)
for z in range(len(stack)):
z_slice = stack[z, :, :]
z_erode = morphology.binary_erosion(z_slice, selem=morphology.disk(disk_size))
z_smooth = filters.median(z_erode, selem=morphology.disk(disk_size))
stack1[z, :, :] = z_smooth
stack2 = np.copy(stack1)
# searches through stack in y and applies erosion, and then smooths w/
# median filter
for y in range(np.shape(stack)[1]):
y_slice= stack1[:, y, :]
y_erode = morphology.binary_erosion(y_slice)
y_smooth = filters.median(y_erode, selem=morphology.disk(disk_size_y))
stack2[:, y, :] = y_smooth
# finds bounds of cyst and then for deepest (large z) slices, erodes more
stack3 = stack2.copy()
if seq_erosions == True:
Z = stack2.shape[0]
LB, UB = cyst_edge(stack2)
upper_range = int((UB-LB)/2)
mid_cyst = LB + upper_range
sel_range = [16, 12, 8, 4]
for i in range(mid_cyst, Z):
z_slice = stack2[i, :, :]
if i >=UB+2:
z_erode = np.zeros(z_slice.shape)
elif i < UB+2 and i >= UB - int(upper_range/4):
disk_e = morphology.disk(sel_range[3])
z_erode = morphology.binary_erosion(z_slice, selem=disk_e)
elif i < UB - int(upper_range/4) and i >= UB - int(upper_range/2):
disk_e = morphology.disk(sel_range[2])
z_erode = morphology.binary_erosion(z_slice, selem=disk_e)
else:
z_erode = z_slice
stack3[i, :, :] = z_erode
stack3 = np.array(stack3 > 0, dtype=np.uint8)
else:
stack3 = np.array(stack3 > 0, dtype=np.uint8)
return stack3
def lumen_contours(stack, object_size=512, longest=True):
"""
Function takes a binary image and finds contours of lumen in each z-slice
Parameters
----------
stack : binary ndarray
ndarray of cyst with segmented lumen
object_size : int, default = 512
minimum size contour
longest : boolean, default = True
determines if choose longest_contour after contour finding, or if choose
second longest (would choose second longest if cyst contour shows up in
lumen segmentation)
Returns
-------
out_stack: ndarray
ndarry of with contours on each z-slice
"""
out_stack = 0*stack
for i in range(len(stack)):
out_slice = 0*stack[i,:,:]
# cv2 3.3 requires that there be three ouputs (the first one is an im)
# cv2 4+ requires only two outputs...
contours, hierarchy = cv2.findContours(stack[i,:,:], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if len(contours)>0:
longest_contour = np.argmax([c.shape[0] for c in contours])
second_longest = np.argmax([contours[j].shape[0] if j!=longest_contour else 0 for j in range(len(contours))])
third_longest = np.argmax([contours[j].shape[0] if j not in [longest_contour,second_longest] else 0 for j in range(len(contours))])
if longest == True:
contourp = contours[longest_contour][:,0,:].T
else:
contourp = contours[second_longest][:,0,:].T # trying to see if longest_contourworks
# contourp = contours[third_longest][:,0,:].T # trying to see if longest_contourworks
out_slice[contourp[1,:],contourp[0,:]] = 255.
out_slice = ndimage.binary_fill_holes(out_slice)
out_slice = morphology.remove_small_objects(out_slice, min_size=object_size)
# if np.sum(out_slice)<0.5*np.prod(out_slice.shape):
out_stack[i,:,:] = out_slice
return out_stack
def lumen_contours_multiple(stack, object_size=512):
"""
Function takes a binary image and finds contours of lumen in each z-slice
Parameters
----------
stack : binary ndarray
ndarray of cyst with segmented lumen
object_size : int, default = 512
minimum size contour
# longest : boolean, default = True
# determines if choose longest_contour after contour finding, or if choose
# second longest (would choose second longest if cyst contour shows up in
# lumen segmentation)
Returns
-------
out_stack: ndarray
ndarry of with contours on each z-slice
"""
out_stack = 0*stack
out_stack2 = 0*stack
out_stack3 = 0*stack
for i in range(len(stack)):
out_slice = 0*stack[i,:,:]
out_slice2 = 0*stack[i,:,:]
out_slice3 = 0*stack[i,:,:]
# cv2 3.3 requires that there be three ouputs (the first one is an im)
contours, hierarchy = cv2.findContours(stack[i,:,:], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if len(contours)>0:
longest_contour = np.argmax([c.shape[0] for c in contours])
second_longest = np.argmax([contours[j].shape[0] if j!=longest_contour else 0 for j in range(len(contours))])
third_longest = np.argmax([contours[j].shape[0] if j not in [longest_contour,second_longest] else 0 for j in range(len(contours))])
contourp1 = contours[longest_contour][:,0, :].T
contourp2 = contours[second_longest][:, 0, :].T
contourp3 = contours[third_longest][:, 0, :].T
# if longest == True:
# contourp = contours[longest_contour][:,0,:].T
# else:
# contourp = contours[second_longest][:,0,:].T # trying to see if longest_contourworks
# # contourp = contours[third_longest][:,0,:].T # trying to see if longest_contourworks
# out_slice[contourp[1,:],contourp[0,:]] = 1.
out_slice[contourp1[1, :], contourp1[0, :]] = 255.
out_slice2[contourp2[1, :], contourp2[0, :]] = 255.
out_slice3[contourp3[1, :], contourp3[0, :]] = 255.
out_slice = ndimage.binary_fill_holes(out_slice)
out_slice = morphology.remove_small_objects(out_slice, min_size=object_size)
out_slice2 = ndimage.binary_fill_holes(out_slice2)
out_slice2 = morphology.remove_small_objects(out_slice2, min_size=object_size)
out_slice3 = ndimage.binary_fill_holes(out_slice3)
out_slice3 = morphology.remove_small_objects(out_slice3, min_size=object_size)
# if np.sum(out_slice)<0.5*np.prod(out_slice.shape):
out_stack[i,:,:] = out_slice
out_stack2[i, :, :] = out_slice2
out_stack3[i, :, :] = out_slice3
return out_stack, out_stack2, out_stack3
def plot_stack_save(im, colormap='gray', pw=pw_desktop,
name='python_fig', subtitle=False):
"""
Plots all the slices in a z-stack in a subplot & saves it.
Parameters
----------
im : ndarray
image stack with dimensions [Z, Y, X]
colormap : default = 'gray'
pw : default = '/Users/Claudia/Desktop/'
where to save png
name : default = 'python_fig'
what to save png as
subtitle : default = False
if True, puts labels on each z-slice
Returns
-------
fig, ax : objects
"""
Z = im.shape[0]
nrows = np.int(np.ceil(np.sqrt(Z)))
ncols = np.int(Z // nrows + 1)
fig, axes = plt.subplots(nrows, ncols*2, figsize=(3*ncols, 1.5*nrows))
for z in range(Z):
i = z // ncols
j = z % ncols
axes[i, j].imshow(im[z, ...], interpolation='nearest', cmap=colormap)
axes[i, j].set_xticks([])
axes[i, j].set_yticks([])
if subtitle == True:
axes[i, j].set_title('z = '+str(z), fontsize=8)
# Remove empty plots
for ax in axes.ravel():
if not(len(ax.images)):
fig.delaxes(ax)
fig.tight_layout()
fig.savefig(pw + name + '.png', bbox_inches='tight')
return (fig, axes)
def plot_stack_overlay(im_seg, im, pw=pw_desktop, name='python_fig',
subtitle=True):
"""
Plots all the slices in a z-stack and overlays segmentation on in a subplot
& saves it.
Parameters
----------
im_seg : ndarray
image stack of segmentation with dimensions [Z, Y, X]
im : ndarray
image stack with dimensions [Z, Y, X]
colormap : default = 'gray'
pw : default = '/Users/clauvasq/Desktop/'
where to save png
name : default = 'python_fig'
what to save png as
subtitle : default = True
if True, puts labels on each z-slice
Returns
-------
fig, ax : objects
"""
Z = im.shape[0]
nrows = np.int(np.ceil(np.sqrt(Z)))
ncols = np.int(Z // nrows + 1)
fig, axes = plt.subplots(nrows, ncols*2, figsize=(3*ncols, 1.5*nrows))
for z in range(Z):
i = z // ncols
j = z % ncols
axes[i, j].imshow(im[z, ...], interpolation='nearest', cmap='gray')
axes[i, j].imshow(im_seg[z, ...], interpolation='nearest', alpha=0.4)
axes[i, j].set_xticks([])
axes[i, j].set_yticks([])
if subtitle == True:
axes[i, j].set_title('z = '+str(z), fontsize=8)
# Remove empty plots
for ax in axes.ravel():
if not(len(ax.images)):
fig.delaxes(ax)
fig.tight_layout()
fig.savefig(pw + name + '.png', bbox_inches='tight')
return fig, ax
# need to figure out how to get animation to work in spyder
def play_stack(stack, cbar=False, pw = pw_desktop, filename='test'):
fig = plt.figure()
def animfunc(i):
im = plt.imshow(stack[i,:,:], animated=True, cmap='Greys_r')
if cbar and i==0:
plt.colorbar()
plt.title(i)
animator = animation.FuncAnimation(fig, animfunc, frames=len(stack),
interval=150, blit=False,
repeat_delay=5000)
plt.draw()
plt.show()
animator.save(pw + filename + '.mp4')
return
| StarcoderdataPython |
3258416 | import logging
from bentoml.utils.log import configure_logging
def test_configure_logging_default():
configure_logging()
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 2
assert bentoml_logger.handlers[0].name == "console"
assert bentoml_logger.handlers[1].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 2
assert prediction_logger.handlers[0].name == "console"
assert prediction_logger.handlers[1].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 2
assert feedback_logger.handlers[0].name == "console"
assert feedback_logger.handlers[1].name == "feedback"
def test_configure_logging_custom_level():
configure_logging(logging_level=logging.ERROR)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.ERROR
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 2
assert bentoml_logger.handlers[0].name == "console"
assert bentoml_logger.handlers[1].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 2
assert prediction_logger.handlers[0].name == "console"
assert prediction_logger.handlers[1].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 2
assert feedback_logger.handlers[0].name == "console"
assert feedback_logger.handlers[1].name == "feedback"
def test_configure_logging_console_disabled():
configure_logging(console_logging_enabled=False)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 1
assert prediction_logger.handlers[0].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 1
assert feedback_logger.handlers[0].name == "feedback"
def test_configure_logging_file_disabled():
configure_logging(file_logging_enabled=False)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "console"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 1
assert prediction_logger.handlers[0].name == "console"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 1
assert feedback_logger.handlers[0].name == "console"
def test_configure_logging_advanced():
advanced_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"test_formatter": {"format": "[%(asctime)s] %(levelname)s - %(message)s"}
},
"handlers": {
"test_handler": {
"level": "WARN",
"formatter": "test_formatter",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {
"test_logger": {
"handlers": ["test_handler"],
"level": "WARN",
"propagate": False,
}
},
}
configure_logging(advanced_enabled=True, advanced_config=advanced_config)
bentoml_logger = logging.getLogger("test_logger")
assert bentoml_logger.level == logging.WARN
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "test_handler"
| StarcoderdataPython |
18945 | """Commands module common setup."""
from importlib import import_module
from typing import Sequence
def available_commands():
"""Index available commands."""
return [
{"name": "help", "summary": "Print available commands"},
{"name": "provision", "summary": "Provision an agent"},
{"name": "start", "summary": "Start a new agent process"},
]
def load_command(command: str):
"""Load the module corresponding with a named command."""
module = None
module_path = None
for cmd in available_commands():
if cmd["name"] == command:
module = cmd["name"]
module_path = cmd.get("module")
break
if module and not module_path:
module_path = f"{__package__}.{module}"
if module_path:
return import_module(module_path)
def run_command(command: str, argv: Sequence[str] = None):
"""Execute a named command with command line arguments."""
module = load_command(command) or load_command("help")
module.execute(argv)
| StarcoderdataPython |
134843 | <gh_stars>1-10
from typing import Optional
from spark_auto_mapper_fhir.classproperty import genericclassproperty
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.extensions.us_core.ethnicity_item import EthnicityItem
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
class Ethnicity(ExtensionBase):
# noinspection PyPep8Naming
def __init__(
self,
url: Optional[FhirUri] = None,
extension: Optional[FhirList[EthnicityItem]] = None,
) -> None:
"""
Extension type in FHIR
"""
super().__init__(url=url or Ethnicity.codeset, extension=extension)
# noinspection PyMethodParameters
@genericclassproperty
def codeset(cls) -> FhirUri:
return "http://hl7.org/fhir/us/core/StructureDefinition/us-core-ethnicity"
| StarcoderdataPython |
1704462 | """
This module contains a class for discrete
1-dimensional exponential families. The main
uses for this class are exact (post-selection)
hypothesis tests and confidence intervals.
"""
import numpy as np
import warnings
from ..truncated import find_root
def crit_func(test_statistic, left_cut, right_cut):
"""
A generic critical function for an interval,
with weights at the endpoints.
((test_statistic < CL) + (test_statistic > CR) +
gammaL * (test_statistic == CL) +
gammaR * (test_statistic == CR))
where (CL, gammaL) = left_cut, (CR, gammaR) = right_cut.
Parameters
----------
test_statistic : np.float
Observed value of test statistic.
left_cut : (float, float)
(CL, gammaL): left endpoint and value at exactly the left endpoint (should be in [0,1]).
right_cut : (float, float)
(CR, gammaR): right endpoint and value at exactly the right endpoint (should be in [0,1]).
Returns
-------
decision : np.float
"""
CL, gammaL = left_cut
CR, gammaR = right_cut
value = ((test_statistic < CL) + (test_statistic > CR)) * 1.
if gammaL != 0:
value += gammaL * (test_statistic == CL)
if gammaR != 0:
value += gammaR * (test_statistic == CR)
return value
class discrete_family(object):
def __init__(self, sufficient_stat, weights):
r"""
A discrete 1-dimensional
exponential family with reference measure $\sum_j w_j \delta_{X_j}$
and sufficient statistic `sufficient_stat`. For any $\theta$, the distribution
is
.. math::
P_{\theta} = \sum_{j} e^{\theta X_j - \Lambda(\theta)} w_j \delta_{X_j}
where
.. math::
\Lambda(\theta) = \log \left(\sum_j w_j e^{\theta X_j} \right).
Parameters
----------
sufficient_stat : `np.float((n))`
weights : `np.float(n)`
Notes
-----
The weights are normalized to sum to 1.
"""
xw = np.array(sorted(zip(sufficient_stat, weights)))
self._x = xw[:,0]
self._w = xw[:,1]
self._lw = np.log(xw[:,1])
self._w /= self._w.sum() # make sure they are a pmf
self.n = len(xw)
self._theta = np.nan
@property
def theta(self):
"""
The natural parameter of the family.
"""
return self._theta
@theta.setter
def theta(self, _theta):
if _theta != self._theta:
_thetaX = _theta * self.sufficient_stat + self._lw
_largest = _thetaX.max() - 5 # try to avoid over/under flow, 5 seems arbitrary
_exp_thetaX = np.exp(_thetaX - _largest)
_prod = _exp_thetaX
self._partition = np.sum(_prod)
self._pdf = _prod / self._partition
self._partition *= np.exp(_largest)
self._theta = _theta
@property
def partition(self):
r"""
Partition function at `self.theta`:
.. math::
\sum_j e^{\theta X_j} w_j
"""
if hasattr(self, "_partition"):
return self._partition
@property
def sufficient_stat(self):
"""
Sufficient statistics of the exponential family.
"""
return self._x
@property
def weights(self):
"""
Weights of the exponential family.
"""
return self._w
def pdf(self, theta):
r"""
Density of $P_{\theta}$ with respect to $P_0$.
Parameters
----------
theta : float
Natural parameter.
Returns
-------
pdf : np.float
"""
self.theta = theta # compute partition if necessary
return self._pdf
def cdf(self, theta, x=None, gamma=1):
r"""
The cumulative distribution function of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X < x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
cdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf) - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat < x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def ccdf(self, theta, x=None, gamma=0, return_unnorm=False):
r"""
The complementary cumulative distribution function
(i.e. survival function) of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X > x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CCDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
ccdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf[::-1])[::-1] - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat > x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def E(self, theta, func):
r"""
Expectation of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
gamma : float(optional)
Weight given at `x`.
Returns
-------
E : np.float
"""
return (func(self.sufficient_stat) * self.pdf(theta)).sum()
def Var(self, theta, func):
r"""
Variance of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
Returns
-------
var : np.float
"""
mu = self.E(theta, func)
return self.E(theta, lambda x: (func(x)-mu)**2)
def Cov(self, theta, func1, func2):
r"""
Covariance of `func1` and `func2` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func1, func2 : callable
Assumed to be vectorized.
Returns
-------
cov : np.float
"""
mu1 = self.E(theta, func1)
mu2 = self.E(theta, func2)
return self.E(theta, lambda x: (func1(x)-mu1)*(func2(x)-mu2))
def two_sided_acceptance(self, theta, alpha=0.05, tol=1e-6):
r"""
Compute cutoffs of UMPU two-sided test.
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if theta != self._theta:
CL = np.max([x for x in self.sufficient_stat if self._critCovFromLeft(theta, (x, 0), alpha) >= 0])
gammaL = find_root(lambda x: self._critCovFromLeft(theta, (CL, x), alpha), 0., 0., 1., tol)
CR, gammaR = self._rightCutFromLeft(theta, (CL, gammaL), alpha)
self._left_cut, self._right_cut = (CL, gammaL), (CR, gammaR)
return self._left_cut, self._right_cut
def two_sided_test(self, theta0, observed, alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
rejLeft = self._test2RejectsLeft(theta0, observed, alpha, auxVar)
rejRight = self._test2RejectsRight(theta0, observed, alpha, auxVar)
else:
rejLeft = self._test2RejectsLeft(theta0, observed, alpha)
rejRight = self._test2RejectsRight(theta0, observed, alpha)
return rejLeft or rejRight
def one_sided_test(self, theta0, observed, alternative='greater', alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU one-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alternative : str
One of ['greater', 'less']
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if alternative not in ['greater', 'less']:
raise ValueError('alternative must be one of ["greater", "less"]')
self.theta = theta0
if randomize:
if auxVar is None:
auxVar = np.random.random()
if alternative == 'greater':
return self.ccdf(theta0, observed, gamma=auxVar) < alpha
else:
return self.cdf(theta0, observed, gamma=auxVar) < alpha
else:
if alternative == 'greater':
return self.ccdf(theta0, observed) < alpha
else:
return self.cdf(theta0, observed) < alpha
def interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form UMAU confidence interval.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
upper = self._inter2Upper(observed, auxVar, alpha, tol)
lower = self._inter2Lower(observed, auxVar, alpha, tol)
else:
upper = self._inter2Upper(observed, 1., alpha, tol)
lower = self._inter2Lower(observed, 0., alpha, tol)
return lower, upper
def equal_tailed_interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form interval by inverting
equal-tailed test with $\alpha/2$ in each tail.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
mu = self.E(self.theta, lambda x: x)
sigma = np.sqrt(self.Var(self.theta, lambda x: x))
lb = mu - 20 * sigma
ub = mu + 20 * sigma
F = lambda th : self.cdf(th, observed)
L = find_root(F, 1.0 - 0.5 * alpha, lb, ub)
U = find_root(F, 0.5 * alpha, lb, ub)
return L, U
def equal_tailed_test(self, theta0, observed, alpha=0.05):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
pval = self.cdf(theta0, observed, gamma=0.5)
return min(pval, 1-pval) < alpha
def one_sided_acceptance(self, theta,
alpha=0.05,
alternative='greater',
tol=1e-6):
r"""
Compute the acceptance region cutoffs of UMPU one-sided test.
TODO: Include randomization?
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
alternative : str
One of ['greater', 'less'].
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if alternative == 'greater':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.min(self.sufficient_stat[F <= alpha])
acceptance = (-np.inf, cutoff)
elif alternative == 'less':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.max(self.sufficient_stat[F <= alpha])
acceptance = (cutoff, np.inf)
else:
raise ValueError("alternative should be one of ['greater', 'less']")
return acceptance
def equal_tailed_acceptance(self, theta0, alpha=0.05):
r"""
Compute the acceptance region cutoffs of
equal-tailed test (without randomization).
Therefore, size may not be exactly $\alpha$.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
alpha : float (optional)
Size of two-sided test.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
F = self.cdf(theta0, gamma=0.5)
Lcutoff = np.max(self.sufficient_stat[F <= 0.5 * alpha])
Rcutoff = np.min(self.sufficient_stat[F >= 1 - 0.5*alpha])
return Lcutoff, Rcutoff
# Private methods
def _rightCutFromLeft(self, theta, leftCut, alpha=0.05):
"""
Given C1, gamma1, choose C2, gamma2 to make E(phi(X)) = alpha
"""
C1, gamma1 = leftCut
alpha1 = self.cdf(theta, C1, gamma1)
if alpha1 >= alpha:
return (np.inf, 1)
else:
alpha2 = alpha - alpha1
P = self.ccdf(theta, gamma=0)
idx = np.nonzero(P < alpha2)[0].min()
cut = self.sufficient_stat[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
ccdf_term = P[idx]
gamma2 = (alpha2 - ccdf_term) / pdf_term
return (cut, gamma2)
def _leftCutFromRight(self, theta, rightCut, alpha=0.05):
"""
Given C2, gamma2, choose C1, gamma1 to make E(phi(X)) = alpha
"""
C2, gamma2 = rightCut
alpha2 = self.ccdf(theta, C2, gamma2)
if alpha2 >= alpha:
return (-np.inf, 1)
else:
alpha1 = alpha - alpha2
P = self.cdf(theta, gamma=0)
idx = np.nonzero(P < alpha1)[0].max()
cut = self.sufficient_stat[idx]
cdf_term = P[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
gamma1 = (alpha1 - cdf_term) / pdf_term
return (cut, gamma1)
def _critCovFromLeft(self, theta, leftCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with left cutoff C1, gamma1
"""
C1, gamma1 = leftCut
C2, gamma2 = self._rightCutFromLeft(theta, leftCut, alpha)
if C2 == np.inf:
return -np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _critCovFromRight(self, theta, rightCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with right cutoff C2, gamma2
"""
C2, gamma2 = rightCut
C1, gamma1 = self._leftCutFromRight(theta, rightCut, alpha)
if C1 == -np.inf:
return np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _test2RejectsLeft(self, theta, observed, alpha=0.05, auxVar=1.):
"""
Returns 1 if x in left lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to "larger" x, so LESS likely to reject
auxVar = 1 is conservative
"""
return self._critCovFromLeft(theta, (observed, auxVar), alpha) > 0
def _test2RejectsRight(self, theta, observed, alpha=0.05, auxVar=0.):
"""
Returns 1 if x in right lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger," so MORE likely to reject.
auxVar = 0 is conservative.
"""
return self._critCovFromRight(theta, (observed, 1.-auxVar), alpha) < 0
def _inter2Upper(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
upper bound of two-sided umpu interval
"""
if observed < self.sufficient_stat[0] or (observed == self.sufficient_stat[0] and auxVar <= alpha):
return -np.inf # observed, auxVar too small, every test rejects left
if observed > self.sufficient_stat[self.n - 2] or (observed == self.sufficient_stat[self.n - 2] and auxVar == 1.):
return np.inf # observed, auxVar too large, no test rejects left
return find_root(lambda theta: -1*self._test2RejectsLeft(theta, observed, alpha, auxVar), -0.5, -1., 1., tol)
def _inter2Lower(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
lower bound of two-sided umpu interval
"""
if observed > self.sufficient_stat[self.n-1] or (observed == self.sufficient_stat[self.n-1] and auxVar >= 1.-alpha):
return np.inf # observed, auxVar too large, every test rejects right
if observed < self.sufficient_stat[1] or (observed == self.sufficient_stat[1] and auxVar == 0.):
return -np.inf # observed, auxVar too small, no test rejects right
return find_root(lambda theta: 1.*self._test2RejectsRight(theta, observed, alpha, auxVar), 0.5, -1., 1., tol)
| StarcoderdataPython |
1754955 | <reponame>DewMaple/opencv-learning<gh_stars>0
import argparse
import cv2
import imutils
from utils import find_image
class Stitcher:
def stitch(self, images, key_points):
image_1 = cv2.imread(images[0])
image_2 = cv2.imread(images[1])
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(image_1, None)
kp2, des2 = sift.detectAndCompute(image_2, None)
print(kp1)
print(kp2)
def resize(image, width):
(h, w) = image.shape[:2]
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
if __name__ == '__main__':
# ap = argparse.ArgumentParser()
# ap.add_argument("-f", "--first", help="path to the first image")
# ap.add_argument("-s", "--second", help="path to the second image")
#
# args = vars(ap.parse_args())
args = {"first": find_image('mpv-shot0001.jpg'), 'second': find_image('mpv-shot0002.jpg')}
print(args)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = resize(imageA, width=800)
imageB = resize(imageB, width=800)
imageB = imutils.rotate(imageB, -12)
# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
# cv2.imshow("Keypoint Matches", vis)
# cv2.imshow("Result", result)
cv2.waitKey(0)
| StarcoderdataPython |
151440 | # Required for Python to search this directory for module files
# We only export public API here.
from .commitmessage import CommitMessage
from .detection import find_checkout_root, default_scm, detect_scm_system
from .git import Git, AmbiguousCommitError
from .scm import SCM, AuthenticationError, CheckoutNeedsUpdate
from .svn import SVN
| StarcoderdataPython |
4807649 | """ ShowOspfv3SummaryPrefix.py
IOSXE parser for the following show command:
* show ospfv3 summary-prefix
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Or, Optional, Use, Default
# ===============================================
# Schema for 'show ospfv3 summary-prefix'
# Optional: allowing either ipv4 or ipv6 or both
# ===============================================
class ShowOspfv3SummaryPrefixSchema(MetaParser):
schema = {
'process_id': {
Any(): {
'address_family': str,
'router_id': str,
'null_route': {
Any(): {
'null_metric': str,
},
},
'summary': {
Any(): {
'sum_type': str,
'sum_tag': int,
'sum_metric': int
},
},
},
},
}
# ====================================
# Parser for 'ShowOspfv3SummaryPrefix'
# ====================================
class ShowOspfv3SummaryPrefix(ShowOspfv3SummaryPrefixSchema):
"""
Router#sh ospfv3 summary-prefix
OSPFv3 10000 address-family ipv6 (router-id 10.2.2.21)
10:2::/96 Metric <unreachable>
10:2:2::/96 Metric 111, External metric type 2, Tag 111
Router#
"""
cli_command = 'show ospfv3 summary-prefix'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# init var
ret_dict = {}
ospf_id = ""
# OSPFv3 10000 address-family ipv6 (router-id 10.2.2.21)
p1 = re.compile(
r'^OSPFv3 +(?P<ospf_id>(\d+)) +address-family +(?P<address_family>(\S+)) +\(router-id +(?P<router_id>(\S+))\)')
# 10:2::/96 Metric <unreachable>
p2 = re.compile(r'^(?P<null_prefix>(\S+)) +.* Metric\s+(?P<null_metric>(\S+$))')
# 10:2:2::/96 Metric 111, External metric type 2, Tag 111
p3 = re.compile(
r'^(?P<sum_prefix>(\S+)) +.* Metric\s+(?P<sum_metric>(\d+)),.* +type +(?P<sum_type>(\d)),\s+Tag +(?P<sum_tag>(\S+))')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
group = m.groupdict()
ret_dict['process_id'] = {}
ospf_id = group['ospf_id']
ret_dict['process_id'][ospf_id] = {}
ret_dict['process_id'][ospf_id]['null_route'] = {}
ret_dict['process_id'][ospf_id]['summary'] = {}
ret_dict['process_id'][ospf_id]['address_family'] = group['address_family']
ret_dict['process_id'][ospf_id]['router_id'] = group['router_id']
continue
m = p2.match(line)
if m:
group = m.groupdict()
if group['null_prefix']:
n_prefix = group['null_prefix']
ret_dict['process_id'][ospf_id]['null_route'][n_prefix] = {}
ret_dict['process_id'][ospf_id]['null_route'][n_prefix]['null_metric'] = group['null_metric']
continue
m = p3.match(line)
if m:
group = m.groupdict()
if group['sum_prefix']:
prefix = group['sum_prefix']
ret_dict['process_id'][ospf_id]['summary'][prefix] = {}
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_metric'] = int(group['sum_metric'])
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_type'] = group['sum_type']
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_tag'] = int(group['sum_tag'])
continue
return ret_dict
| StarcoderdataPython |
5379 | <filename>leaderboard-server/leaderboard-server.py
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import simplejson as json
from leaderboard.leaderboard import Leaderboard
import uwsgidecorators
import signalfx
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app)
highscore_lb_starship = Leaderboard('highscores-starship',host='redis-instance')
sfx = signalfx.SignalFx(ingest_endpoint='http://otelcol:9943').ingest('token-at-collector')
def parseData(row):
metricDump1 = {}
counterArray = []
metricDump1["dimensions"] = {}
metricDump1["dimensions"]["ip"] = row["ip"] # dimension
metricDump1["metric"] = "starship.shots"
metricDump1["value"] = row["shots"]
counterArray.append(metricDump1)
print('Sending data:',counterArray)
sfx.send(counters=counterArray)
@app.route('/health')
def health():
return '{"status":"OK"}', 200
@app.route('/leaders/<game>')
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def returnLeaders(game):
if game == "starship":
return json.dumps(highscore_lb_starship.all_leaders()), 200
return '{}', 200
@app.route('/submitScores', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitScores():
content = request.get_json(force=True)
print('Content:',content)
if "game" in content:
if content["game"]=="starship":
highscore_lb_starship.rank_member(content["aduser"], content["score"])
return '{"status":"OK"}', 200
@app.route("/get_my_ip", methods=["GET"])
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
def get_my_ip():
if 'X-Real-Ip' in request.headers:
return jsonify({'ip':request.headers['X-Real-Ip']}), 200
else:
return jsonify({'ip':'-'}), 200
#return json.dumps({k:v for k, v in request.headers.items()}), 200
@app.route('/submitShots', methods=['POST'])
@cross_origin(origin='localhost',headers=['Content-Type','application/json'])
def submitShots():
content = request.get_json(force=True)
print('Content:',content)
shotSubmission = {}
totalShots = 0
if "game" in content:
if content["game"]=="starship":
if "shots" in content:
totalShots = content["shots"]
shotSubmission["shots"] = totalShots
if 'X-Real-Ip' in request.headers:
shotSubmission["ip"] = request.headers['X-Real-Ip']
else:
shotSubmission["ip"] = "-"
parseData(shotSubmission)
return '{"status":"OK"}', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6001)
| StarcoderdataPython |
3287685 | <filename>Warmup-1/missing_char.py
# MISSING_CHAR
def missing_char(str, n):
return str[:n] + str[n+1:] | StarcoderdataPython |
176089 | class Solution:
def rob(self, num):
ls = [[0, 0]]
for e in num:
ls.append([max(ls[-1][0], ls[-1][1]), ls[-1][0] + e])
return max(ls[-1])
| StarcoderdataPython |
1777671 | <filename>attacks/methods/refinement_tricks.py<gh_stars>10-100
"""
- UNUSED in current submission -
Various ideas for refining an adversarial example by modifying the pixels in certain patterns.
NOTE:
- BiasedBoundaryAttack contains these patterns, and supersedes this implementation.
TODO: Add Salt&pepper noise, or gauss, to the jitter pattern
TODO: Maybe there are other mods we can try - contrast/brightness adaptation?
"""
import numpy as np
import foolbox
from utils.util import eval_distance, eval_distance_rounded, FoolboxL2Distance
from attacks.methods import mod_boundary_attack
def refinement_loop(remote_model, X_orig, X_aex, label, is_targeted, stop_early_diff=0., sample_gen=None):
# Runs a couple of refinements in a loop.
# Returns a ROUNDED image and dist!
assert hasattr(remote_model, "adv_get_n_calls"), "Please wrap the model in a RemoteModelWrapper."
dist_orig = eval_distance_rounded(X_aex, X_orig)
dist_pre = dist_orig
print("At {:.2f}. Starting final reduction...".format(dist_pre))
for i in range(5): # TODO: repeat until call / time limit
print("Reduction pass {}...".format(i + 1))
print("Jittered L_inf pass:")
n_calls_start = remote_model.adv_get_n_calls()
X_aex = refine_jitter(remote_model, X_orig=X_orig, X_aex=X_aex, label=label, is_targeted=is_targeted, stop_early_diff=0.005, optimistic=(i == 0))
n_calls_current = remote_model.adv_get_n_calls()
print("Used {} calls.".format(n_calls_current - n_calls_start))
if n_calls_current > 900:
break
print("L0 pass:")
n_calls_start = n_calls_current
X_aex = refine_pixels(remote_model, X_orig=X_orig, X_aex=X_aex, label=label, is_targeted=is_targeted, stop_early_diff=0.005, optimistic=(i == 0))
n_calls_current = remote_model.adv_get_n_calls()
print("Used {} calls.".format(n_calls_current - n_calls_start))
if n_calls_current > 900:
break
# If the first passes weren't successful, another boundary attack won't do anything.
dist_temp = eval_distance_rounded(X_aex, X_orig)
if abs(dist_temp - dist_pre) < 0.0001:
break
print("Boundary pass:")
n_calls_start = n_calls_current
X_aex = refine_with_boundary_attack(remote_model, X_orig, X_aex, label, is_targeted=is_targeted,
iterations=4, step=0.005, sample_gen=sample_gen)
n_calls_current = remote_model.adv_get_n_calls()
print("Used {} calls.".format(n_calls_current - n_calls_start))
dist_current = eval_distance_rounded(X_aex, X_orig)
print("Reduced by {:.2f} to {:.2f} ({:.1%}).".format(dist_pre - dist_current, dist_current, 1. - dist_current / dist_pre))
if abs(dist_pre - dist_current) < stop_early_diff:
break
if n_calls_current > 900:
break
dist_pre = dist_current
print("Total reduction by {:.2f} to {:.2f} ({:.1%}).".format(dist_orig - dist_pre, dist_pre, 1. - dist_pre / dist_orig))
return np.clip(np.around(X_aex), 0, 255), dist_pre
def refine_pixels(remote_model, X_orig, X_aex, label, is_targeted, optimistic=True, stop_early_diff=0.):
# Try to refine even further by modifying single pixels
# Returns an UNROUNDED image!
# Ideen:
# - Pixel insgesamt ersetzen.
# - Helligkeit ersetzen (Farbe gleich lassen, darauf ist er generell salient (Untersuchen?))
# - Einzelne Bereiche modden
X_aex = np.float32(X_aex)
X_orig = np.float32(X_orig)
X_orig_norm = X_orig / 255.
img_shape = X_orig.shape
# vis_debug = False
# if vis_debug:
# # DEBUG: Plot histogram of the diffs.
# diff_vec = np.reshape(X_aex - X_orig, -1)
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.distplot(diff_vec, kde=True)
# plt.show(block=True)
# Sort indices of the pixels, descending by difference to original.
i_highest_diffs = np.argsort(np.abs(X_aex - X_orig), axis=None)[::-1]
X_candidate_best = X_aex
dist_best = np.linalg.norm(X_candidate_best / 255. - X_orig_norm)
n_px_to_change = 1
n_tries = 100
cur_ind = 0
for i in range(n_tries):
X_candidate = X_candidate_best.copy()
# Try and replace n pixels at once.
i_pxs = i_highest_diffs[cur_ind: cur_ind + n_px_to_change]
for i_px in i_pxs:
i_px = np.unravel_index(i_px, img_shape)
X_candidate[i_px] = X_orig[i_px]
# Abort early if we don't stand to gain much.
dist_candidate = np.linalg.norm(np.float32(X_candidate) / 255. - X_orig_norm)
if dist_best - dist_candidate < stop_early_diff:
break
pred_label = np.argmax(remote_model.predictions(np.uint8(np.clip(np.around(X_candidate), 0, 255))))
if not (pred_label == label) == is_targeted:
# If failed: advance by one. BUT if we tried multiple, then try again and reduce the size.
if n_px_to_change == 1:
cur_ind += 1
else:
n_px_to_change = 1
continue
else:
# Success: advance by one. Also be optimistic and increase the "batch" size.
cur_ind += n_px_to_change
if optimistic:
n_px_to_change += 1
if dist_candidate < dist_best:
print("New dist - unrounded: {:.2f}, reduced by {:.1%}".format(dist_candidate, 1. - dist_candidate/dist_best))
dist_best = dist_candidate
X_candidate_best = X_candidate
return X_candidate_best
def refine_jitter(remote_model, X_orig, X_aex, label, is_targeted, optimistic=True, stop_early_diff=0.):
# Picks pixels from a jitter pattern, and slowly blends them towards the original.
# The rationale is: if we modify the image in a jitter pattern, it becomes very high-frequency,
# and the model will probably filter most of our modification.
# Returns an UNROUNDED image!
X_aex = np.float32(X_aex)
X_orig = np.float32(X_orig)
X_orig_norm = X_orig / 255.
img_shape = X_orig.shape
for jitter_width in [2, 7, 11, 19]:
X_candidate_best = X_aex.copy()
dist_best = np.linalg.norm(X_candidate_best / 255. - X_orig_norm)
# Prepare a jitter mask with XOR (alternating). TODO: we could really improve this pattern. S&P noise, anyone?
jitter_mask = np.empty((64, 64, 3), dtype=np.bool)
for i in range(64):
for j in range(64):
jitter_mask[i, j, :] = (i % jitter_width == 0) ^ (j % jitter_width == 0)
jitter_diff = np.zeros(img_shape, dtype=np.float32)
jitter_diff[jitter_mask] = (X_candidate_best - X_orig)[jitter_mask]
n_tries = 100
eps = 2. / n_tries if optimistic else 1. / n_tries
for i in range(n_tries):
X_candidate = X_candidate_best - eps * jitter_diff
# Abort early if we don't stand to gain much.
dist_candidate = np.linalg.norm(X_candidate / 255. - X_orig_norm)
if dist_best - dist_candidate < stop_early_diff:
break
pred_label = np.argmax(remote_model.predictions(np.uint8(np.clip(np.around(X_candidate), 0, 255))))
if not (pred_label == label) == is_targeted:
# Failure: reduce eps.
eps /= 2.
continue
eps *= 1.3
print("New dist - unrounded: {:.2f}, reduced by {:.1%}, jitter_width={}".format(dist_candidate, 1. - dist_candidate/dist_best, jitter_width))
dist_best = dist_candidate
X_candidate_best = X_candidate
# Continue next it (different jitter spacing) with the best from here.
X_aex = X_candidate_best
return X_aex
def refine_line_binary(remote_model, X_orig, X_aex, label, is_targeted, n_tries=8):
# Does a binary line search from x_aex to the original image.
# Returns a ROUNDED image!
perturbation = X_aex - X_orig
x_best = X_aex
factor_best = 1.
factor = .5
for i in range(n_tries):
x_new = X_orig + factor * perturbation
x_rounded = np.clip(np.round(x_new), 0, 255)
pred_clsid = np.argmax(remote_model.predictions(x_rounded))
if (pred_clsid == label) == is_targeted:
x_best = x_rounded # This time rounding shouldn't make a difference, as we're not iterating on this
factor_best = factor
factor /= 2.
else:
factor = factor + (factor_best - factor) / 2.
if factor_best < 1:
dist = eval_distance_rounded(x_best, X_orig)
print("New dist - unrounded: Managed to reduce dist to {:.2f}".format(dist))
return x_best
def refine_with_boundary_attack(remote_model, X_orig, X_aex, label, is_targeted=False, iterations=8, step=2e-2, stop_early_diff=None,
sample_gen=None, normal_factor=0.0):
# Uses the vanilla BoundaryAttack to refine an existing AEx.
# RETURNS ROUNDED IMG AND DIST
print_details = False
# Clip (for strictness), but don't round.
X_aex = np.clip(np.float32(X_aex), 0, 255)
dist_before = eval_distance(np.round(X_aex), X_orig)
# RemoteModelWrapper should have cached the original prediction, so it's not expensive
original_label = np.argmax(remote_model.predictions(X_orig)) if is_targeted else label
attack = mod_boundary_attack.BoundaryAttack()
criterion = foolbox.criteria.TargetClass(label) if is_targeted else foolbox.criteria.Misclassification()
adv_obj = foolbox.adversarial.Adversarial(remote_model, criterion, original_image=X_orig, original_class=original_label, distance=FoolboxL2Distance)
x_new = attack(adv_obj, iterations=iterations, spherical_step=2*step, source_step=step, step_adaptation=1.5, max_directions=5,
tune_batch_size=False, starting_point=X_aex, log_every_n_steps=1 if print_details else iterations,
stop_early_diff=stop_early_diff, sample_gen=sample_gen, normal_factor=normal_factor)
if x_new is not None:
x_rounded = np.clip(np.round(x_new), 0, 255)
dist_rounded = eval_distance(x_rounded, X_orig)
if dist_rounded < dist_before:
pred_clsid = np.argmax(remote_model.predictions(x_rounded))
if (pred_clsid == label) == is_targeted:
return x_rounded
return X_aex
| StarcoderdataPython |
4827498 | <gh_stars>0
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import pandas as pd
import skimage.io as io
import numpy as np
import matplotlib.pyplot as plt
# Load data
class LandmarksDataset(Dataset):
"""Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_metadata = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
# Bidict from ids to labels to keep labels within [0, num_classes]
self.id_to_label = dict()
self.label_to_id = dict()
self.num_classes = 0
def __len__(self):
return len(self.landmarks_metadata)
def __getitem__(self, idx):
landmark_id = self.landmarks_metadata['landmark_id'][idx]
id = self.landmarks_metadata['id'][idx]
img_name = self.root_dir + str(landmark_id) + "/" + str(id) + ".jpg"
image = io.imread(img_name)
# If id is not seen, add to id2label bidict
if landmark_id not in self.id_to_label:
self.id_to_label[landmark_id] = self.num_classes
self.label_to_id[self.num_classes] = landmark_id
self.num_classes += 1
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': self.id_to_label[landmark_id]}
return sample
class RandomDataset(Dataset):
"""Random dataset with input dimensions input_dims."""
def __init__(self, num_samples=1, input_dims=1, useLabels=False, labels=[]):
self.input_dims = input_dims
# Initialize dataset
self.dataset = np.random.normal(size=(num_samples, input_dims))
# Initialize labels
self.labels = labels if useLabels else np.random.randint(0, 2, size=num_samples)
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, idx):
return {'image': torch.FloatTensor(self.dataset[idx]), 'label': torch.from_numpy(np.array(self.labels[idx])).float()}
### SIAMESE DATA SAMPLER ###
class SiameseDataset(Dataset):
"""Landmarks dataset."""
def __init__(self, dataset):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# Get two items, with 50% chance similarity/dissimilarity
landmark_id0, landmark_id1 = None, None
should_be_similar = np.random.randint(2)
for i in range(10):
idx0, idx1 = np.random.choice(len(self.dataset), size=2)
landmark_id0 = self.dataset[idx0]['label']
landmark_id1 = self.dataset[idx1]['label']
if (should_be_similar and (landmark_id0 == landmark_id1)): break
if (not should_be_similar and (landmark_id0 != landmark_id1)): break
# Return sample
sample = {'image0': self.dataset[idx0]['image'], 'image1': self.dataset[idx1]['image'],
'label': torch.from_numpy(np.array(int(landmark_id0 != landmark_id1))).float()}
return sample
if __name__ == "__main__":
landmark_dataset = LandmarksDataset(csv_file='small-dataset/small-dataset.csv',
root_dir='small-dataset/',
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.RandomCrop(244),
transforms.ToTensor()]))
print("Dataset size: " + str(len(landmark_dataset)))
print("Row 0: " + str(landmark_dataset[0]))
siamese_landmark_dataset = SiameseDataset(dataset=landmark_dataset)
sample = next(iter(siamese_landmark_dataset))
image0, image1, label = sample['image0'], sample['image1'], sample['label']
plt.imshow(image0.transpose(0, 2).transpose(0, 1))
plt.show()
plt.imshow(image1.transpose(0, 2).transpose(0, 1))
plt.show()
print(label)
print(landmark_dataset[0]['label'])
random_dataset = RandomDataset(input_dims=1)
print(random_dataset[0]['label'])
| StarcoderdataPython |
1609124 | <gh_stars>100-1000
#dummy layer for splitting the model into multiple evaluations
import numpy
import theano
import denet.common.logging as logging
from denet.layer import AbstractLayer
class SplitLayer(AbstractLayer):
type_name = "split"
def __init__(self, layers, json_param={}):
super().__init__(layer_index=len(layers))
self.enabled = json_param.get("enabled", True)
self.has_split = self.enabled
self.input = layers[-1].output
self.output_shape = self.input_shape = layers[-1].output_shape
if self.enabled:
self.output = theano.shared(numpy.zeros(self.output_shape).astype(theano.config.floatX), str(self) + " - output")
else:
self.output = self.input
logging.verbose("Adding", self, "layer - input:", self.input_shape, "enabled:", self.enabled)
#on forward split phase store input in output
def split_forward(self):
return [(self.output, self.input)]
#on backward split phase store gradient
def split_backward(self, cost, known_grads):
return [(self.output, theano.tensor.grad(cost, self.output, known_grads=known_grads))]
#known gradients after split_backward()
def split_known_grads(self):
return {self.input:self.output}
def export_json(self):
json = super().export_json()
json.update({"enabled": self.enabled})
return json
def parse_desc(layers, name, tags, params):
if name != "SPLIT":
return False
layers.append(SplitLayer(layers))
return True
| StarcoderdataPython |
27421 | from .. app.pyefi.ttyp import ttyP
ttyP(0, "0 - ttyP test")
ttyP(1, "1 - header")
ttyP(2, "2 - bold")
ttyP(3, "3 - okblue")
ttyP(4, "4 - okgreen")
ttyP(5, "5 - underline")
ttyP(6, "6 - warning")
ttyP(7, "7 - fail")
| StarcoderdataPython |
6638 | <filename>netbox/extras/forms.py
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect,
CommentField, ContentTypeMultipleChoiceField, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField,
JSONField, SlugField, StaticSelect2, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import ConfigContext, CustomField, ImageAttachment, JournalEntry, ObjectChange, Tag
from .utils import FeatureQuery
#
# Custom fields
#
class CustomFieldForm(forms.Form):
"""
Extend Form to include custom field support.
"""
model = None
def __init__(self, *args, **kwargs):
if self.model is None:
raise NotImplementedError("CustomFieldForm must specify a model class.")
self.custom_fields = []
super().__init__(*args, **kwargs)
# Append relevant custom fields to the form instance
obj_type = ContentType.objects.get_for_model(self.model)
for cf in CustomField.objects.filter(content_types=obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldModelForm(forms.ModelForm):
"""
Extend ModelForm to include custom field support.
"""
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
self.custom_fields = []
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this model.
"""
# Append form fields; assign initial values if modifying and existing object
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
if self.instance.pk:
self.fields[field_name] = cf.to_form_field(set_initial=False)
self.fields[field_name].initial = self.instance.custom_field_data.get(cf.name)
else:
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
def clean(self):
# Save custom field data on instance
for cf_name in self.custom_fields:
self.instance.custom_field_data[cf_name[3:]] = self.cleaned_data.get(cf_name)
return super().clean()
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _append_customfield_fields(self):
# Append form fields
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(for_csv_import=True)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type)
for cf in custom_fields:
# Annotate non-required custom fields as nullable
if not cf.required:
self.nullable_fields.append(cf.name)
self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False)
# Annotate this as a custom field
self.custom_fields.append(cf.name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super().__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
for cf in custom_fields:
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False)
#
# Tags
#
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
fieldsets = (
('Tag', ('name', 'slug', 'color', 'description')),
)
class TagCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = Tag.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
self.fields['remove_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(
required=False,
label=_('Search')
)
content_type_id = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.filter(FeatureQuery('tags').get_query()),
required=False,
label=_('Tagged object type')
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.MultipleHiddenInput
)
color = forms.CharField(
max_length=6,
required=False,
widget=ColorSelect()
)
description = forms.CharField(
max_length=200,
required=False
)
class Meta:
nullable_fields = ['description']
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
site_groups = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
device_types = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'site_groups', 'sites', 'roles', 'device_types',
'platforms', 'cluster_groups', 'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConfigContext.objects.all(),
widget=forms.MultipleHiddenInput
)
weight = forms.IntegerField(
required=False,
min_value=0
)
is_active = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect()
)
description = forms.CharField(
required=False,
max_length=100
)
class Meta:
nullable_fields = [
'description',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
field_order = [
'q', 'region_id', 'site_group_id', 'site_id', 'role_id', 'platform_id', 'cluster_group_id', 'cluster_id',
'tenant_group_id', 'tenant_id',
]
q = forms.CharField(
required=False,
label=_('Search')
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Regions')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site groups')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
label=_('Sites')
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
label=_('Device types')
)
role_id = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False,
label=_('Roles')
)
platform_id = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False,
label=_('Platforms')
)
cluster_group_id = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
label=_('Cluster groups')
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label=_('Clusters')
)
tenant_group_id = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
label=_('Tenant groups')
)
tenant_id = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False,
label=_('Tenant')
)
tag = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
to_field_name='slug',
required=False,
label=_('Tags')
)
#
# Filter form for local config context data
#
class LocalConfigContextFilterForm(forms.Form):
local_context_data = forms.NullBooleanField(
required=False,
label=_('Has local config context data'),
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
#
# Image attachments
#
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = [
'name', 'image',
]
#
# Journal entries
#
class JournalEntryForm(BootstrapMixin, forms.ModelForm):
comments = CommentField()
class Meta:
model = JournalEntry
fields = ['assigned_object_type', 'assigned_object_id', 'kind', 'comments']
widgets = {
'assigned_object_type': forms.HiddenInput,
'assigned_object_id': forms.HiddenInput,
}
class JournalEntryBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=JournalEntry.objects.all(),
widget=forms.MultipleHiddenInput
)
kind = forms.ChoiceField(
choices=JournalEntryKindChoices,
required=False
)
comments = forms.CharField(
required=False,
widget=forms.Textarea()
)
class Meta:
nullable_fields = []
class JournalEntryFilterForm(BootstrapMixin, forms.Form):
model = JournalEntry
q = forms.CharField(
required=False,
label=_('Search')
)
created_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
created_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
created_by_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
assigned_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
kind = forms.ChoiceField(
choices=add_blank_choice(JournalEntryKindChoices),
required=False,
widget=StaticSelect2()
)
#
# Change logging
#
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(
required=False,
label=_('Search')
)
time_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
time_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
action = forms.ChoiceField(
choices=add_blank_choice(ObjectChangeActionChoices),
required=False,
widget=StaticSelect2()
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
changed_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
#
# Scripts
#
class ScriptForm(BootstrapMixin, forms.Form):
_commit = forms.BooleanField(
required=False,
initial=True,
label="Commit changes",
help_text="Commit changes to the database (uncheck for a dry-run)"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Move _commit to the end of the form
commit = self.fields.pop('_commit')
self.fields['_commit'] = commit
@property
def requires_input(self):
"""
A boolean indicating whether the form requires user input (ignore the _commit field).
"""
return bool(len(self.fields) > 1)
| StarcoderdataPython |
139532 | import os
import tqdm
import torch
import numpy as np
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.decode_helper import extract_dets_from_outputs
from lib.helpers.decode_helper import decode_detections
class Tester(object):
def __init__(self, cfg, model, data_loader, logger):
self.cfg = cfg
self.model = model
self.data_loader = data_loader
self.logger = logger
self.class_name = data_loader.dataset.class_name
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if self.cfg.get('resume_model', None):
load_checkpoint(model = self.model,
optimizer = None,
filename = cfg['resume_model'],
logger = self.logger,
map_location=self.device)
self.model.to(self.device)
def test(self):
torch.set_grad_enabled(False)
self.model.eval()
results = {}
progress_bar = tqdm.tqdm(total=len(self.data_loader), leave=True, desc='Evaluation Progress')
for batch_idx, (inputs, calibs, coord_ranges, _, info) in enumerate(self.data_loader):
# load evaluation data and move data to current device.
inputs = inputs.to(self.device)
calibs = calibs.to(self.device)
coord_ranges = coord_ranges.to(self.device)
# the outputs of centernet
outputs = self.model(inputs,coord_ranges,calibs,K=50,mode='test')
dets = extract_dets_from_outputs(outputs=outputs, K=50)
dets = dets.detach().cpu().numpy()
# get corresponding calibs & transform tensor to numpy
calibs = [self.data_loader.dataset.get_calib(index) for index in info['img_id']]
info = {key: val.detach().cpu().numpy() for key, val in info.items()}
cls_mean_size = self.data_loader.dataset.cls_mean_size
dets = decode_detections(dets = dets,
info = info,
calibs = calibs,
cls_mean_size=cls_mean_size,
threshold = self.cfg['threshold'])
results.update(dets)
progress_bar.update()
# save the result for evaluation.
self.save_results(results)
progress_bar.close()
def save_results(self, results, output_dir='./outputs'):
output_dir = os.path.join(output_dir, 'data')
os.makedirs(output_dir, exist_ok=True)
for img_id in results.keys():
out_path = os.path.join(output_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for i in range(len(results[img_id])):
class_name = self.class_name[int(results[img_id][i][0])]
f.write('{} 0.0 0'.format(class_name))
for j in range(1, len(results[img_id][i])):
f.write(' {:.2f}'.format(results[img_id][i][j]))
f.write('\n')
f.close()
| StarcoderdataPython |
3313860 | """Event topics."""
import enum
class RobotEventTopics(str, enum.Enum):
"""All robot-server event topics."""
HARDWARE_EVENTS = "hardware_events"
| StarcoderdataPython |
3253494 | from django.test import TestCase
class UserTests(TestCase):
# testing load of donate page
def test_donate_page_load(self):
response = self.client.get('/donate/')
self.assertEqual(response.status_code, 200)
# testing load of relevant templates to donate page
def test_donate_page_templates_load(self):
response = self.client.get('/donate/')
self.assertTemplateUsed(response, "donation/donation.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateUsed(response, "donation/includes/intro.html")
| StarcoderdataPython |
1783618 | <reponame>wotsushi/competitive-programming
from functools import reduce
L, A, B, M = map(int, input().split())
MOD = M
class ModInt:
def __init__(self, x):
self.x = x % MOD
def __str__(self):
return str(self.x)
__repr__ = __str__
def __add__(self, other):
return (
ModInt(self.x + other.x) if isinstance(other, ModInt) else
ModInt(self.x + other)
)
def __sub__(self, other):
return (
ModInt(self.x - other.x) if isinstance(other, ModInt) else
ModInt(self.x - other)
)
def __mul__(self, other):
return (
ModInt(self.x * other.x) if isinstance(other, ModInt) else
ModInt(self.x * other)
)
def __truediv__(self, other):
return (
ModInt(
self.x * pow(other.x, MOD - 2, MOD)
) if isinstance(other, ModInt) else
ModInt(self.x * pow(other, MOD - 2, MOD))
)
def __pow__(self, other):
return (
ModInt(
pow(self.x, other.x, MOD)
) if isinstance(other, ModInt) else
ModInt(pow(self.x, other, MOD))
)
def __radd__(self, other):
return ModInt(other + self.x)
def __rsub__(self, other):
return ModInt(other - self.x)
def __rmul__(self, other):
return ModInt(other * self.x)
def __rtruediv__(self, other):
return ModInt(other * pow(self.x, MOD - 2, MOD))
def __rpow__(self, other):
return ModInt(pow(other, self.x, MOD))
def iceil(a, b):
return (a + b - 1) // b
# 等差数列を桁数が同一の要素ごとに区切る
# 区切った等差数列の各要素について、それを末尾に追加する操作を線形変換とみなす
# 線形変換はダブリングで高速に計算する
T = {}
x = A
while x <= A + B * (L - 1):
k = len(str(x))
r = iceil(
min(
10**k,
A + B * (L - 1) + 1
) - x,
B
)
T[k] = r
x += r * B
def mut_mul(X, Y):
return [
[
sum(X[i][k] * Y[k][j] for k in range(len(Y)))
for j in range(len(Y[0]))
]
for i in range(len(X))
]
def mut_pow(X, n):
if n == 0:
return [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]
else:
Y = mut_pow(X, n // 2)
return (
mut_mul(Y, Y) if n % 2 == 0 else
mut_mul(mut_mul(Y, Y), X)
)
def f(p, q, k, r):
Y = mut_pow(
[
[ModInt(10**k), ModInt(1), ModInt(0)],
[ModInt(0), ModInt(1), ModInt(1)],
[ModInt(0), ModInt(0), ModInt(1)]
],
r
)
Z = mut_mul(
Y,
[
[p],
[q],
[ModInt(B)]
]
)
return (Z[0][0], Z[1][0])
ans, _ = reduce(
lambda acc, kr: f(acc[0], acc[1], kr[0], kr[1]),
sorted(T.items()),
(ModInt(0), ModInt(A))
)
print(ans)
| StarcoderdataPython |
1692821 | # pip3 install blynk-library-python
# sudo pip3 install adafruit-circuitpython-shtc3
#from __future__ import print_function
import BlynkLib
import time
#import busio
#import board
#import adafruit_shtc3
import RPi.GPIO as GPIO
#time.sleep(40)
BLYNK_AUTH = '<KEY>'
blynk = BlynkLib.Blynk(BLYNK_AUTH, server='blynk.honey.co.th', port=8080)
#i2c = busio.I2C(board.SCL, board.SDA)
#sht = adafruit_shtc3.SHTC3(i2c)
relay1 = 37
relay2 = 35
relay3 = 33
relay4 = 31
relay5 = 29
relay6 = 15
relay7 = 13
relay8 = 11
#statustimer = '0'
GPIO.setmode(GPIO.BOARD)
# GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(relay1, GPIO.OUT)
GPIO.setup(relay2, GPIO.OUT)
GPIO.setup(relay3, GPIO.OUT)
GPIO.setup(relay4, GPIO.OUT)
GPIO.setup(relay5, GPIO.OUT)
GPIO.setup(relay6, GPIO.OUT)
GPIO.setup(relay7, GPIO.OUT)
GPIO.setup(relay8, GPIO.OUT)
@blynk.on("connected")
def blynk_connected():
print("Updating values from the server...")
blynk.sync_virtual(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)
print("status OK")
# @blynk.on("V22")
# def v22_write_handler(value):
# #print('Current slider value: {}'.format(value[0]))
# statustimer = format(value[0])
# print(type(statustimer))
# print(statustimer)
#timer = x22
@blynk.on("V1")
def v1_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x1 = format(value[0])
if x1 == "1":
GPIO.output(relay1, GPIO.HIGH)
print("relay1-work")
else:
GPIO.output(relay1, GPIO.LOW)
print("relay1-not-work")
@blynk.on("V2")
def v2_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x2 = format(value[0])
if x2 == "1":
GPIO.output(relay2, GPIO.HIGH)
blynk.virtual_write(34, 255)
blynk.virtual_write(35, 0)
print("relay2-work")
else:
GPIO.output(relay2, GPIO.LOW)
blynk.virtual_write(34, 0)
blynk.virtual_write(35, 255)
print("relay2-not-work")
@blynk.on("V3")
def v3_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x3 = format(value[0])
if x3 == "1":
GPIO.output(relay3, GPIO.HIGH)
GPIO.output(relay1, GPIO.HIGH)
blynk.virtual_write(24, 255)
blynk.virtual_write(25, 0)
print("relay1-work")
else:
GPIO.output(relay3, GPIO.LOW)
GPIO.output(relay1, GPIO.LOW)
blynk.virtual_write(24, 0)
blynk.virtual_write(25, 255)
print("relay3-not-work")
@blynk.on("V4")
def v4_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x4 = format(value[0])
if x4 == "1":
GPIO.output(relay4, GPIO.HIGH)
GPIO.output(relay1, GPIO.HIGH)
blynk.virtual_write(26, 255)
blynk.virtual_write(27, 0)
print("relay4-work")
else:
GPIO.output(relay4, GPIO.LOW)
GPIO.output(relay1, GPIO.LOW)
blynk.virtual_write(26, 0)
blynk.virtual_write(27, 255)
print("relay4-not-work")
@blynk.on("V5")
def v5_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x5 = format(value[0])
if x5 == "1":
GPIO.output(relay5, GPIO.HIGH)
blynk.virtual_write(28, 255)
blynk.virtual_write(29, 0)
print("relay5-work")
else:
GPIO.output(relay5, GPIO.LOW)
blynk.virtual_write(28, 0)
blynk.virtual_write(29, 255)
print("relay5-not-work")
@blynk.on("V6")
def v6_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x6 = format(value[0])
if x6 == "1":
GPIO.output(relay6, GPIO.HIGH)
blynk.virtual_write(30, 255)
blynk.virtual_write(31, 0)
print("relay6-work")
else:
GPIO.output(relay6, GPIO.LOW)
blynk.virtual_write(30, 0)
blynk.virtual_write(31, 255)
print("relay6-not-work")
@blynk.on("V7")
def v7_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x7 = format(value[0])
if x7 == "1":
GPIO.output(relay7, GPIO.HIGH)
blynk.virtual_write(32, 255)
blynk.virtual_write(33, 0)
print("relay7-work")
else:
GPIO.output(relay7, GPIO.LOW)
blynk.virtual_write(32, 0)
blynk.virtual_write(33, 255)
print("relay7-not-work")
@blynk.on("V8")
def v8_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x8 = format(value[0])
if x8 == "1":
GPIO.output(relay8, GPIO.HIGH)
print("relay8-work")
else:
GPIO.output(relay8, GPIO.LOW)
print("relay8-not-work")
@blynk.on("V9")
def v9_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x9 = format(value[0])
if x9 == "1":
GPIO.output(relay1, GPIO.HIGH)
blynk.virtual_write(1, 1)
print("relay1-work")
else:
GPIO.output(relay1, GPIO.LOW)
blynk.virtual_write(1, 0)
print("relay1-not-work")
@blynk.on("V10")
def v10_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x10 = format(value[0])
if x10 == "1":
GPIO.output(relay2, GPIO.HIGH)
blynk.virtual_write(2, 1)
blynk.virtual_write(34, 255)
blynk.virtual_write(35, 0)
print("relay2-work")
else:
GPIO.output(relay2, GPIO.LOW)
blynk.virtual_write(2, 0)
blynk.virtual_write(34, 0)
blynk.virtual_write(35, 255)
print("relay2-not-work")
@blynk.on("V11")
def v11_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x11 = format(value[0])
if x11 == "1":
GPIO.output(relay3, GPIO.HIGH)
GPIO.output(relay1, GPIO.HIGH)
blynk.virtual_write(3, 1)
blynk.virtual_write(24, 255)
blynk.virtual_write(25, 0)
print("relay3-work")
else:
GPIO.output(relay3, GPIO.LOW)
GPIO.output(relay1, GPIO.LOW)
blynk.virtual_write(3, 0)
blynk.virtual_write(24, 0)
blynk.virtual_write(25, 255)
print("relay3-not-work")
@blynk.on("V12")
def v12_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x12 = format(value[0])
if x12 == "1":
GPIO.output(relay4, GPIO.HIGH)
GPIO.output(relay1, GPIO.HIGH)
blynk.virtual_write(4, 1)
blynk.virtual_write(26, 255)
blynk.virtual_write(27, 0)
print("relay4-work")
else:
GPIO.output(relay4, GPIO.LOW)
GPIO.output(relay1, GPIO.LOW)
blynk.virtual_write(4, 0)
blynk.virtual_write(26, 0)
blynk.virtual_write(27, 255)
print("relay4-not-work")
@blynk.on("V13")
def v13_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x13 = format(value[0])
if x13 == "1":
GPIO.output(relay5, GPIO.HIGH)
blynk.virtual_write(5, 1)
blynk.virtual_write(28, 255)
blynk.virtual_write(29, 0)
print("relay5-work")
else:
GPIO.output(relay5, GPIO.LOW)
blynk.virtual_write(5, 0)
blynk.virtual_write(28, 0)
blynk.virtual_write(29, 255)
print("relay5-not-work")
@blynk.on("V14")
def v14_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x14 = format(value[0])
if x14 == "1":
GPIO.output(relay6, GPIO.HIGH)
blynk.virtual_write(6, 1)
blynk.virtual_write(30, 255)
blynk.virtual_write(31, 0)
print("relay6-work")
else:
GPIO.output(relay6, GPIO.LOW)
blynk.virtual_write(6, 0)
blynk.virtual_write(30, 0)
blynk.virtual_write(31, 255)
print("relay6-not-work")
@blynk.on("V15")
def v15_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x15 = format(value[0])
if x15 == "1":
GPIO.output(relay7, GPIO.HIGH)
blynk.virtual_write(7, 1)
blynk.virtual_write(32, 255)
blynk.virtual_write(33, 0)
print("relay7-work")
else:
GPIO.output(relay7, GPIO.LOW)
blynk.virtual_write(7, 0)
blynk.virtual_write(32, 0)
blynk.virtual_write(33, 255)
print("relay7-not-work")
@blynk.on("V16")
def v16_write_handler(value):
#print('Current slider value: {}'.format(value[0]))
x16 = format(value[0])
if x16 == "1":
# print("timer-on")
# if statustimer == '1':
GPIO.output(relay8, GPIO.HIGH)
blynk.virtual_write(8, 1)
print("relay8-work")
else:
GPIO.output(relay8, GPIO.LOW)
blynk.virtual_write(8, 0)
print("relay8-not-work")
# @blynk.on("readV17")
# def v17_read_handler():
# temperature, relative_humidity = sht.measurements
# blynk.virtual_write(17, temperature)
# @blynk.on("readV18")
# def v18_read_handler():
# temperature, relative_humidity = sht.measurements
# blynk.virtual_write(18, relative_humidity)
while True:
blynk.run()
| StarcoderdataPython |
42833 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Defines a class for the COMPAS dataset."""
import pandas as pd
import numpy as np
from .base_wrapper import BasePerformanceDatasetWrapper
from tempeh.constants import FeatureType, Tasks, DataTypes, ClassVars, CompasDatasets # noqa
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
def recover_categorical_encoding_for_compas_race(data):
return np.array(list(map(lambda tuple: "".join(list(tuple)), zip(
[
"African-American" if is_column_true else "" for is_column_true in data[:, 9]
],
[
"Caucasian" if is_column_true else "" for is_column_true in data[:, 10]
]))))
class CompasPerformanceDatasetWrapper(BasePerformanceDatasetWrapper):
"""COMPAS Datasets"""
dataset_map = {
CompasDatasets.COMPAS: (compas_data_loader, "two_year_recid",
[FeatureType.NOMINAL] + [FeatureType.CONTINUOUS] * 5 +
[FeatureType.NOMINAL] * 8)
}
metadata_map = {
CompasDatasets.COMPAS: (Tasks.BINARY, DataTypes.TABULAR, (6172, 14))
}
load_function = None
feature_type = None
target_col = None
def __init__(self, drop_race=True, drop_sex=True):
"""Initializes the COMPAS dataset """
bunch = type(self).load_function()
target = bunch[self._target_col].astype(int)
bunch.drop(self._target_col, axis=1, inplace=True)
bunch = bunch.astype(float)
super().__init__(bunch, target, nrows=self._size[0], data_t=self._feature_type)
self._features = list(bunch)
if drop_race:
self._race_train = recover_categorical_encoding_for_compas_race(self._X_train)
self._race_test = recover_categorical_encoding_for_compas_race(self._X_test)
# race is in columns 9-10 because the super class constructor removes the target
self._X_train = np.delete(self._X_train, np.s_[9:11], axis=1)
self._X_test = np.delete(self._X_test, np.s_[9:11], axis=1)
del[self._features[9:11]]
if drop_sex:
self._sex_train = self._X_train[:, 0]
self._sex_test = self._X_test[:, 0]
self._X_train = np.delete(self._X_train, 0, axis=1)
self._X_test = np.delete(self._X_test, 0, axis=1)
del[self._features[0]]
self._target_names = np.unique(target)
@classmethod
def generate_dataset_class(cls, name, nrows=None):
"""Generate a dataset class.
:param name: the name of the dataset
:type name: str
:param nrows: number of rows to resize the dataset to
:type nrows: int
:rtype: cls
"""
load_function, target_col, feature_type = cls.dataset_map[name]
task, data_type, size = cls.metadata_map[name]
if nrows is not None:
size = (nrows, size[1])
class_name = name.title() + "PerformanceDatasetWrapper"
return type(class_name, (cls, ), {ClassVars.LOAD_FUNCTION: load_function,
ClassVars.FEATURE_TYPE: feature_type,
ClassVars.TASK: task,
ClassVars.DATA_TYPE: data_type,
ClassVars.SIZE: size,
ClassVars.TARGET_COL: target_col})
| StarcoderdataPython |
185847 | """Produce a greys cmap."""
from pyiem.plot.use_agg import plt
def main():
"""Go Main Go."""
cmap = plt.get_cmap("Greys_r")
for i in range(256):
c = cmap(i / 255.0)
print("%s %.0f %.0f %.0f" % (i, c[0] * 255, c[1] * 255, c[2] * 255))
if __name__ == "__main__":
main()
| StarcoderdataPython |
8981 | <reponame>Ramsha04/kits19-2d-reproduce
import os
from os.path import join, isdir
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import nibabel as nib
import numpy as np
import json
from .resample import resample_patient
from .custom_augmentations import resize_data_and_seg, crop_to_bbox
class Preprocessor(object):
"""
Preprocesses the original dataset (interpolated).
Procedures:
* Resampled all volumes to have a thickness of 3mm.
* Clipped to [-30, 300] HU
* z-score standardization (zero mean and unit variance)
* Standardization per 3D image instead of ACROSS THE WHOLE
TRAINING SET
* save as .npy array
* imaging.npy
* segmentation.npy (if with_masks)
"""
def __init__(self, in_dir, out_dir, cases=None, kits_json_path=None,
bbox_json_path=None, clip_values=[-30, 300], with_mask=True,
fg_classes=[0, 1, 2], resize_xy_shape=(256, 256)):
"""
Attributes:
in_dir (str): directory with the input data. Should be the
kits19/data directory.
out_dir (str): output directory where you want to save each case
cases: list of case folders to preprocess
kits_json_path (str): path to the kits.json file in the kits19/data
directory. This only should be specfied if you're resampling.
Defaults to None.
bbox_json_path (str): path to the bbox_stage1.json file made from
stage1 post-processing. Triggers cropping to the bboxes.
Defaults to None.
target_spacing (list/tuple): spacing to resample to
clip_values (list, tuple): values you want to clip CT scans to.
Defaults to None for no clipping.
with_mask (bool): whether or not to preprocess with masks or no
masks. Applicable to preprocessing test set (no labels
available).
fg_classes (list): of foreground class indices
if None, doesn't gather fg class stats.
"""
self.in_dir = in_dir
self.out_dir = out_dir
self._load_kits_json(kits_json_path)
self._load_bbox_json(bbox_json_path)
self.clip_values = clip_values
self.with_mask = with_mask
self.fg_classes = fg_classes
if not self.with_mask:
assert self.fg_classes is None, \
"When with_mask is False, fg_classes must be None."
self.cases = cases
# automatically collecting all of the case folder names
if self.cases is None:
self.cases = [os.path.join(self.in_dir, case) \
for case in os.listdir(self.in_dir) \
if case.startswith("case")]
self.cases = sorted(self.cases)
assert len(self.cases) > 0, \
"Please make sure that in_dir refers to the proper directory."
# making directory if out_dir doesn't exist
if not isdir(out_dir):
os.mkdir(out_dir)
print("Created directory: {0}".format(out_dir))
self.resize_xy_shape = tuple(resize_xy_shape)
def gen_data(self, save_fnames=["imaging", "segmentation"]):
"""
Generates and saves preprocessed data as numpy arrays (n, x, y).
Args:
task_path: file path to the task directory
(must have the corresponding "dataset.json" in it)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
Returns:
None
"""
# Generating data and saving them recursively
for case in tqdm(self.cases):
x_path, y_path = join(case, "imaging.nii.gz"), join(case, "segmentation.nii.gz")
image = nib.load(x_path).get_fdata()[None]
label = nib.load(y_path).get_fdata()[None] if self.with_mask \
else None
preprocessed_img, preprocessed_label = self.preprocess(image,
label,
case)
if self.bbox_dict is not None:
preprocessed_img, preprocessed_label = self.crop_case_to_bbox(preprocessed_img,
preprocessed_label,
case)
self.save_imgs(preprocessed_img, preprocessed_label, case,
save_fnames=save_fnames)
def preprocess(self, image, mask, case=None):
"""
Clipping, cropping, and resampling.
Args:
image: numpy array
shape (c, n, x, y)
mask: numpy array or None
shape (c, n, x, y)
case (str): path to a case folder
Returns:
tuple of:
- preprocessed image
shape: (n, x, y)
- preprocessed mask or None
shape: (n, x, y)
"""
raw_case = Path(case).name # raw case name, i.e. case_00000
# resampling
if self.kits_json is not None:
for info_dict in self.kits_json:
# guaranteeing that the info is corresponding to the right
# case
if info_dict["case_id"] == raw_case:
case_info_dict = info_dict
break
# resampling the slices axis to 3mm
orig_spacing = (case_info_dict["captured_slice_thickness"],
case_info_dict["captured_pixel_width"],
case_info_dict["captured_pixel_width"])
target_spacing = (3,) + orig_spacing[1:]
image, mask = resample_patient(image, mask, np.array(orig_spacing),
target_spacing=np.array(target_spacing))
if self.clip_values is not None:
image = np.clip(image, self.clip_values[0], self.clip_values[1])
if self.resize_xy_shape is not None:
# image coming in : shape (c, n, h, w); mask is same shape
zdim_size = image.shape[1]
resize_xy_shape = (zdim_size,) + self.resize_xy_shape
image, mask = resize_data_and_seg(image, size=resize_xy_shape,
seg=mask)
image = standardize_per_image(image)
mask = mask.squeeze() if mask is not None else mask
return (image.squeeze(), mask)
def save_imgs(self, image, mask, case,
save_fnames=["imaging", "segmentation"]):
"""
Saves an image and mask pair as .npy arrays in the KiTS19 file structure
Args:
image: numpy array
mask: numpy array
case: path to a case folder (each element of self.cases)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
"""
for fname in save_fnames:
assert not ".npy" in fname, \
"Filenames in save_fnames should not include .npy in the name."
# saving the generated dataset
# output dir in KiTS19 format
# extracting the raw case folder name
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
np.save(os.path.join(out_case_dir, f"{save_fnames[0]}.npy"), image)
if mask is not None:
np.save(os.path.join(out_case_dir, f"{save_fnames[1]}.npy"), mask)
def save_dir_as_2d(self, base_fnames=["imaging", "segmentation"],
delete3dcase=False):
"""
Takes preprocessed 3D numpy arrays and saves them as slices
in the same directory.
Arrays must have shape (n, h, w).
Args:
base_fnames (List[str]): names to read for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
delete3dcase (bool): whether or not to delete the 3D volume after
saving the 2D sliced versions
"""
for fname in base_fnames:
assert not ".npy" in fname, \
"Filenames in base_fnames should not include .npy in the name."
self.pos_per_class_dict = {} # saves slices per class
self.pos_per_slice_dict = defaultdict(list) # saves classes per slice
# Generating data and saving them recursively
for case in tqdm(self.cases):
# output dir in KiTS19 format
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
# assumes the .npy files have shape: (d, h, w)
paths = [join(out_case_dir, f"{base_fnames[0]}.npy"),
join(out_case_dir, f"{base_fnames[1]}.npy")]
image, label = np.load(paths[0]), np.load(paths[1])
self.save_3d_as_2d(image, label, case_raw, out_case_dir)
# to deal with colaboratory storage limitations
if delete3dcase:
os.remove(paths[0]), os.remove(paths[1])
if self.fg_classes is not None:
self._save_pos_slice_dict()
def save_3d_as_2d(self, image, mask, case_raw, out_case_dir):
"""
Saves a 3D volume as separate 2D arrays for each slice across the
axial axis. The naming convention is as follows:
imaging_{parsed_slice_idx}.npy
segmentation_{parsed_slice_idx}.npy
where parsed_slice_idx is just the slice index but filled with
zeros until it hits 5 digits (so sorting is easier.)
Args:
image: numpy array
mask: numpy array
case: raw case folder name
"""
# saving the generated dataset
# iterates through all slices and saves them individually as 2D arrays
assert len(image.shape) == 3, \
"Image shape should be (n, h, w)"
slice_idx_per_class = defaultdict(list)
for slice_idx in range(image.shape[0]):
# naming
slice_idx_str = parse_slice_idx_to_str(slice_idx)
case_str = f"{case_raw}_{slice_idx_str}"
if mask is not None:
label_slice = mask[slice_idx]
# appending fg slice indices
if self.fg_classes is not None:
for label_idx in self.fg_classes:
if label_idx != 0 and (label_slice == label_idx).any():
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
elif label_idx == 0 and np.sum(label_slice) == 0:
# for completely blank labels
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
self._save_slices(image, mask, out_case_dir=out_case_dir,
slice_idx=slice_idx, slice_idx_str=slice_idx_str)
if self.fg_classes is not None:
self.pos_per_class_dict[case_raw] = slice_idx_per_class
def _save_pos_slice_dict(self):
"""
Saves the foreground (positive) class dictionaries:
- slice_indices.json
saves the slice indices per class
{
case: {fg_class1: [slice indices...],
fg_class2: [slice indices...],
...}
}
- classes_per_slice.json
the keys are not cases, but the actual filenames that are
being read.
{
case_slice_idx_str: [classes_in_slice],
case_slice_idx_str2: [classes_in_slice],
}
"""
save_path_per_slice = join(self.out_dir, "classes_per_slice.json")
# saving the dictionaries
print(f"Logged the classes in {self.fg_classes} for each slice at",
f"{save_path_per_slice}.")
with open(save_path_per_slice, "w") as fp:
json.dump(self.pos_per_slice_dict, fp)
save_path = join(self.out_dir, "slice_indices.json")
# saving the dictionaries
print(f"Logged the slice indices for each class in {self.fg_classes} at",
f"{save_path}.")
with open(save_path, "w") as fp:
json.dump(self.pos_per_class_dict, fp)
def _save_slices(self, image, mask, out_case_dir, slice_idx,
slice_idx_str):
"""
For saving the slices in self.save_3d_as_2d()
"""
np.save(join(out_case_dir, f"imaging_{slice_idx_str}.npy"),
image[slice_idx])
if mask is not None:
label_slice = mask[slice_idx]
np.save(join(out_case_dir, f"segmentation_{slice_idx_str}.npy"),
label_slice)
def _load_kits_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.kits_json = None
print("`kits_json_path is empty, so not resampling.`")
elif json_path is not None:
with open(json_path, "r") as fp:
self.kits_json = json.load(fp)
def _load_bbox_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.bbox_dict = None
print("bbox_json_path, so not cropping volumes to their bbox.")
else:
with open(json_path, "r") as fp:
self.bbox_dict = json.load(fp)
def crop_case_to_bbox(self, image, label, case):
"""
Crops a 3D image and 3D label to the corresponding bounding box.
"""
bbox_coord = self.bbox_dict[case]
return (crop_to_bbox(image, bbox), crop_to_bbox(label, case))
def standardize_per_image(image):
"""
Z-score standardization per image.
"""
mean, stddev = image.mean(), image.std()
return (image - mean) / stddev
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}"
| StarcoderdataPython |
3375287 | <gh_stars>1-10
import numpy as np
# b|b|b
# -----
# b|b|b
# -----
# b|b|b
def printBoard(game):
for i in range(3):
for j in range(3):
if i is 0:
#print("Printing first row")
if j == 0 or j ==1:
print(game[i][j].decode() + str("|"), end = "")
elif j == 2:
print(game[i][j].decode())
elif i is 1:
#print("Printing second row")
if j is 0:
print("-----")
if j is not 2:
print(game[i][j].decode() + str("|"), end = "")
else:
print(game[i][j].decode())
if j is 2:
print("-----")
else:
#print("Prining third row")
if j is not 2:
print(game[i][j].decode() + str("|"), end= "")
else:
print(game[i][j].decode())
def printCoordinates():
count = 1
for i in range(3):
for j in range(3):
if i is 0:
#print("Printing first row")
if j == 0 or j ==1:
print(str(count).decode() + str("|"), end = "")
elif j == 2:
print(str(count).decode())
elif i is 1:
#print("Printing second row")
if j is 0:
print("-----")
if j is not 2:
print(game[i][j].decode() + str("|"), end = "")
else:
print(game[i][j].decode())
if j is 2:
print("-----")
else:
#print("Prining third row")
if j is not 2:
print(game[i][j].decode() + str("|"), end= "")
else:
print(game[i][j].decode())
def main():
game = np.chararray((3,3))
game[:][:] = "b"
print(game[0])
printBoard(game)
if __name__ == "__main__":
main() | StarcoderdataPython |
1664436 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Kumagai group.
import numpy as np
from pydefect.analyzer.defect_charge_distribution import RadialDist
from pymatgen import Structure, Lattice, Spin
from pymatgen.io.vasp import Chgcar
def test():
structure = Structure(Lattice.cubic(3), ["H"], [[0, 0, 0]])
data = {"total": np.array([[[3]*3]*3]*3), "diff": np.array([[[-1]*3]*3]*3)}
chgcar = Chgcar(structure, data)
rad = RadialDist(chgcar, [0, 0, 0])
assert len(rad.distances_data) == 1 + 6 + 12
np.testing.assert_almost_equal(rad.distances_data[0][0], np.array([-1/3, -1./3, 0.0]))
assert rad.distances_data[0][1] == np.sqrt(2)
assert rad.distances_data[0][2] == 24
np.testing.assert_almost_equal(rad.distances_data[0][3], np.array([-1, -1, 0.0]))
hist_data, half_point, summed = rad.histogram(Spin.up)
np.testing.assert_almost_equal(hist_data[0], [5.00000000e-02, 1.16355283e-03])
| StarcoderdataPython |
1671535 | import hashlib
from datetime import datetime
class Marvel:
def __init__(self, private_key, public_key):
self.private_key = private_key
self.public_key = public_key
def get_auth_data(self):
timestamp = datetime.now().timestamp()
formatted_string = f'{timestamp}{self.private_key}{self.public_key}'
hashed_data = hashlib.md5(formatted_string.encode('utf-8')).hexdigest()
data = dict(ts=timestamp, apikey=self.public_key, hash=hashed_data)
return data
| StarcoderdataPython |
121798 | from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
setup(
name='malwarefeeds',
version='0.1.0',
description='An aggregator for malware feeds.',
long_description=readme,
packages=['malwarefeeds'],
url='https://github.com/neriberto/malwarefeeds',
license='BSD 3-Clause License',
author='<NAME>',
author_email='<EMAIL>',
install_requires=["feedparser"],
entry_points={
"console_scripts": [
"malwarefeeds=malwarefeeds:main"
],
}
)
| StarcoderdataPython |
1726134 | # Adding a Line Feature to a Vector Layer
# https://github.com/GeospatialPython/Learn/raw/master/paths.zip
vectorLyr = QgsVectorLayer('/qgis_data/paths/paths.shp', 'Paths' , "ogr")
vectorLyr.isValid()
vpr = vectorLyr.dataProvider()
points = []
points.append(QgsPoint(430841.61703,5589485.34838))
points.append(QgsPoint(432438.36523,5575114.61462))
points.append(QgsPoint(447252.64015,5567663.12304))
line = QgsGeometry.fromPolyline(points)
f = QgsFeature()
f.setGeometry(line)
vpr.addFeatures([f])
vectorLyr.updateExtents()
| StarcoderdataPython |
17811 | <reponame>jacobtobias/s3parq<filename>tests/test_publish_parq.py
import pytest
from mock import patch
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import boto3
from string import ascii_lowercase
import random
from dfmock import DFMock
import s3parq.publish_parq as parq
import s3fs
from moto import mock_s3
@mock_s3
class Test:
def setup_s3(self):
def rand_string():
return ''.join([random.choice(ascii_lowercase) for x in range(0, 10)])
bucket = rand_string()
key = rand_string()
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
return bucket, key
def setup_df(self):
df = DFMock(count=100)
df.columns = {"grouped_col": {"option_count": 4, "option_type": "string"},
"text_col": "string",
"int_col": "integer",
"float_col": "float"}
df.generate_dataframe()
df.dataframe
return tuple(df.columns.keys()), df.dataframe
def test_works_without_partitions(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = []
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
def test_accepts_valid_partitions(self):
columns, dataframe = self.setup_df()
parq.check_partitions(columns, dataframe)
def test_reject_non_column_partitions(self):
columns, dataframe = self.setup_df()
with pytest.raises(ValueError):
parq.check_partitions(('banana',), dataframe)
def test_reject_timedelta_dataframes(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = ['text_col']
dataframe['time_col'] = pd.Timedelta('1 days')
with pytest.raises(NotImplementedError):
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
def test_reject_protected_name_partitions(self):
assert parq._check_partition_compatibility("shoe")
assert parq._check_partition_compatibility("all") is False
def test_generates_partitions_in_order(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = columns[:1]
with patch('s3parq.publish_parq.boto3', return_value=True) as mock_boto3:
with patch('s3parq.publish_parq.pq.write_to_dataset', return_value=None) as mock_method:
parq._gen_parquet_to_s3(bucket, key, dataframe, partitions)
arg, kwarg = mock_method.call_args
assert kwarg['partition_cols'] == partitions
def test_input_equals_output(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
s3_path = f"s3://{bucket}/{key}"
partitions = [columns[0]]
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
from_s3 = pq.ParquetDataset(s3_path, filesystem=s3fs.S3FileSystem())
s3pd = from_s3.read().to_pandas()
pre_df = dataframe
assert set(zip(s3pd.int_col, s3pd.float_col, s3pd.text_col, s3pd.grouped_col)) - \
set(zip(dataframe.int_col, dataframe.float_col,
dataframe.text_col, dataframe.grouped_col)) == set()
def test_reject_empty_dataframe(self):
dataframe = pd.DataFrame()
bucket, key = self.setup_s3()
s3_path = f"s3://{bucket}/{key}"
with pytest.raises(ValueError):
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=[])
def test_set_metadata_correctly(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
s3_client = boto3.client('s3')
partitions = ['grouped_col']
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
for obj in s3_client.list_objects(Bucket=bucket)['Contents']:
if obj['Key'].endswith(".parquet"):
meta = s3_client.get_object(
Bucket=bucket, Key=obj['Key'])['Metadata']
assert meta['partition_data_types'] == str(
{"grouped_col": "string"})
'''
## timedeltas no good
def test_timedeltas_rejected(self):
bucket = MockHelper().random_name()
key = MockHelper().random_name()
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
df = DFMock(count=100)
df.columns = {"timedelta_col": "timedelta", "int_col": "int", "float_col": "float",
"bool_col": "boolean", "grouped_col": {"option_count": 4, "option_type": "string"}}
df.generate_dataframe()
with pytest.raises(NotImplementedError):
parq = pub_parq.S3PublishParq(
dataframe=df.dataframe, bucket=bucket, key=key, partitions=['grouped_col'])
'''
| StarcoderdataPython |
4816091 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import pathlib
import tempfile
from io import StringIO
from sys import getsizeof
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.text import slugify
import pypandoc
if hasattr(settings, 'PANPUB_MEDIA'):
PANPUB_MEDIA = settings.PANPUB_MEDIA
else:
PANPUB_MEDIA = 'panpub-media'
class Crafter(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.user)
def get_absolute_url(self):
return reverse('crafter_detail', args=[str(self.pk), ])
def claims(self, claim_type=None):
claims = Claim.objects.filter(crafter=self)
if claim_type in ['CRT', 'CUR', 'MED']:
claims = claims.filter(claim_type=claim_type)
return claims
def collectives(self):
collectives = Collective.objects.filter(members__in=[self, ])
return collectives
class Collective(models.Model):
name = models.CharField(max_length=100)
circles = models.ManyToManyField('self',
blank=True)
members = models.ManyToManyField(Crafter,
blank=True)
manifeste = models.ForeignKey('Text',
models.SET_NULL,
blank=True,
null=True,
)
def get_absolute_url(self):
return reverse('collective_detail', args=[str(self.pk), ])
def __str__(self):
return self.name
class Corpus(models.Model):
name = models.CharField(max_length=100)
datestamp = models.DateField(null=True)
description = models.TextField(blank=True)
license = models.CharField(max_length=100)
ready = models.BooleanField(default=False)
is_exhibit = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('corpus_detail', args=[str(self.pk), ])
def __str__(self):
return self.name
def filefriendly_name(self):
return slugify(self.name)
def only():
contents = Content.objects.values_list('pk', flat=True)
return Corpus.objects.exclude(pk__in=contents)
def get_contents(self):
return Content.objects.filter(corpuses=self)
def add_content(self, pk):
if Content.objects.filter(pk=pk).exists():
content = Content.objects.get(pk=pk)
content.corpuses.add(self)
def sup_content(self, pk):
if Content.objects.filter(pk=pk).exists():
content = Content.objects.get(pk=pk)
content.corpuses.delete(self)
self.delete(content)
def publish(self):
self.ready = True
def claims(self):
contents = self.get_contents()
claims = Claim.objects.filter(content__in=contents)
return claims
def claimers(self):
claims = self.claims()
claimers = claims.values('crafter__pk').distinct()
crafters = Crafter.objects.filter(pk__in=claimers)
return crafters
class Content(Corpus):
claims = models.ManyToManyField(
Crafter,
through='Claim',
through_fields=('content', 'crafter'),
)
corpuses = models.ManyToManyField(Corpus, related_name='+')
def get_absolute_url(self):
return reverse('content_detail', args=[str(self.pk), ])
def __str__(self):
return self.name
class Text(Content):
pandoc_formats = (
('markdown', 'Markdown'),
('gfm', 'Markdown (github-flavour)'),
('latex', 'LaTeX'),
('docx', 'Word docx'),
('odt', 'OpenDocument ODT'),
('html', 'HTML'),
('mediawiki', 'MediaWiki markup'),
('rst', 'reStructuredText'),
('json', 'JSON'),
('native', 'Haskell (pandoc-native)'),
)
input_type = models.CharField(max_length=10,
choices=pandoc_formats,
default='markdown')
# todo: homemade validator. quickwin: FileExtensionAllowed() ?
document = models.FileField(
upload_to='{}/texts/'.format(PANPUB_MEDIA),
)
def get_absolute_url(self):
return reverse('text_detail', args=[str(self.pk), ])
def save(self, *args, **kwargs):
try:
data = self.document.read()
data = pypandoc.convert_text(data, to='md', format=self.input_type)
datafile = StringIO(data)
dataname = hashlib.sha256(data.encode()).hexdigest()
self.document = InMemoryUploadedFile(
datafile,
'FileField',
'{}.md'.format(dataname),
'text/markdown',
getsizeof(datafile),
'UTF-8',
)
except Exception:
raise Exception
else:
super(Text, self).save(*args, **kwargs)
def available_pubformats(self):
# pdf requires xetex
return ('gfm',
'html',
'markdown',
'docx',
'epub',
'odt',
)
def export(self, pubformat='markdown'):
if pubformat not in self.available_pubformats():
raise Exception
try:
with tempfile.NamedTemporaryFile() as f:
outpath = pathlib.Path(tempfile.tempdir,
f.name).as_posix()
pypandoc.convert_file(self.document.path,
pubformat,
format='md',
outputfile=outpath)
f.seek(0)
datafile = f.read()
except Exception:
raise Exception
else:
filelen = len(datafile)
filename = '{}.{}'.format(self.filefriendly_name(),
pubformat)
return datafile, filename, filelen
class Dataset(Content):
tablib_formats = ('csv',
'json',
'xls',
'yaml',
'tsv',
'html',
'xlsx',
'ods',
'dbf',
'df',
)
document = models.FileField(
upload_to='{}/datasets/'.format(PANPUB_MEDIA),
)
def get_absolute_url(self):
return reverse('dataset_detail', args=[str(self.pk), ])
def save(self, *args, **kwargs):
pass
def available_pubformats(self):
return self.tablib_formats+'latex' # TODO
def export(self, pubformat='csv'):
pass
class Picture(Content):
pillow_formats = ('bmp',
'eps',
'gif',
'icns',
'ico',
'im',
'jpeg',
'jpeg2000',
'msp',
'pcx',
'png',
'ppm',
'sgi',
'spider',
'tga',
'tiff',
'webp',
'xbm',
)
pillow_r_formats = ('blp',
'cur',
'dcx',
'dds',
'fli',
'flc',
'fpx',
'ftex',
'gbr',
'gd',
'imt',
'iptc/naa',
'mcidas',
'mic',
'mpo',
'pcd',
'pixar',
'psd',
'wal',
'xpm',
) + pillow_formats
pillow_w_formats = ('palm',
'pdf',
'xvthumbnail',
) + pillow_formats
document = models.FileField(
upload_to='{}/pictures/'.format(PANPUB_MEDIA),
)
def get_absolute_url(self):
return reverse('picture_detail', args=[str(self.pk), ])
def save(self, *args, **kwargs):
pass
def available_pubformats(self):
return self.pillow_w_formats
def export(self, pubformat='png'):
pass
class Record(Content):
document = models.FileField(upload_to='{}/records/'.format(PANPUB_MEDIA))
class OutsideLink(models.Model):
pass
class Claim(models.Model):
CREATOR = 'CRT'
CURATOR = 'CUR'
MEDIATOR = 'MED'
CLAIMS = (
(CREATOR, 'creator'),
(CURATOR, 'curator'),
(MEDIATOR, 'mediator'),
)
content = models.ForeignKey(Content, on_delete=models.CASCADE)
crafter = models.ForeignKey(Crafter, on_delete=models.CASCADE)
claim_type = models.CharField(
max_length=3,
choices=CLAIMS,
default=CREATOR
)
def __str__(self):
return "{} has a {} claim on {}".format(self.crafter,
self.claim_type,
self.content)
@receiver(post_save, sender=User)
def create_crafter(sender, instance, created, **kwargs):
if created:
Crafter.objects.create(user=instance)
@receiver(post_save, sender=User)
def update_crafter(sender, instance, **kwargs):
instance.crafter.save(**kwargs)
| StarcoderdataPython |
3281471 | from evaluator.music_demixing import MusicDemixingPredictor
import numpy as np
print("Calculating scores for local run...")
submission = MusicDemixingPredictor(model_name='tdf+demucs0.5')
scores = submission.scoring()
scores = np.array([list(score.values()) for score in scores.values()])
print(np.mean(scores, 0), np.median(scores, 0))
| StarcoderdataPython |
4837570 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2018, 9t9it and Contributors
# See license.txt
from __future__ import unicode_literals
from frappe.utils import getdate
import unittest
from toolz import pluck
from park_management.park_management.report.item_consumption_report.helpers \
import generate_intervals
class TestItemConsumptionReport(unittest.TestCase):
def test_generate_intervals(self):
actual = generate_intervals(None, '2012-12-12', '2012-12-12')
expected = []
self.assertEqual(actual, expected)
def test_generate_intervals_weekly(self):
actual = len(
generate_intervals('Weekly', '2012-08-19', '2012-09-12')
)
expected = 5
self.assertEqual(actual, expected)
def test_generate_intervals_weekly_keys(self):
actual = list(
pluck(
'key',
generate_intervals('Weekly', '2012-08-19', '2012-09-12')
)
)
expected = ['12W33', '12W34', '12W35', '12W36', '12W37']
self.assertEqual(actual, expected)
def test_generate_intervals_weekly_labels(self):
actual = list(
pluck(
'label',
generate_intervals('Weekly', '2012-08-19', '2012-08-24')
)
)
expected = ['2012-08-13', '2012-08-20']
self.assertEqual(actual, expected)
def test_generate_intervals_weekly_start_dates(self):
actual = list(
pluck(
'start_date',
generate_intervals('Weekly', '2012-08-19', '2012-08-24')
)
)
expected = [getdate('2012-08-13'), getdate('2012-08-20')]
self.assertEqual(actual, expected)
def test_generate_intervals_weekly_end_dates(self):
actual = list(
pluck(
'end_date',
generate_intervals('Weekly', '2012-08-19', '2012-08-24')
)
)
expected = [getdate('2012-08-19'), getdate('2012-08-26')]
self.assertEqual(actual, expected)
def test_generate_intervals_monthly(self):
actual = len(
generate_intervals('Monthly', '2012-08-12', '2012-12-12')
)
expected = 5
self.assertEqual(actual, expected)
def test_generate_intervals_monthly_keys(self):
actual = list(
pluck(
'key',
generate_intervals('Monthly', '2012-12-12', '2013-03-19')
)
)
expected = ['12M12', '13M01', '13M02', '13M03']
self.assertEqual(actual, expected)
def test_generate_intervals_monthly_labels(self):
actual = list(
pluck(
'label',
generate_intervals('Monthly', '2012-12-12', '2013-04-19')
)
)
expected = ['Dec 12', 'Jan 13', 'Feb 13', 'Mar 13', 'Apr 13']
self.assertEqual(actual, expected)
def test_generate_intervals_monthly_start_dates(self):
actual = list(
pluck(
'start_date',
generate_intervals('Monthly', '2012-12-12', '2013-01-19')
)
)
expected = [getdate('2012-12-01'), getdate('2013-01-01')]
self.assertEqual(actual, expected)
def test_generate_intervals_monthly_end_dates(self):
actual = list(
pluck(
'end_date',
generate_intervals('Monthly', '2012-12-12', '2013-01-19')
)
)
expected = [getdate('2012-12-31'), getdate('2013-01-31')]
self.assertEqual(actual, expected)
def test_generate_intervals_yearly(self):
actual = len(
generate_intervals('Yearly', '2012-08-12', '2012-12-12')
)
expected = 1
self.assertEqual(actual, expected)
def test_generate_intervals_yearly_keys(self):
actual = list(
pluck(
'key',
generate_intervals('Yearly', '2012-12-12', '2013-03-19')
)
)
expected = ['12Y', '13Y']
self.assertEqual(actual, expected)
def test_generate_intervals_yearly_labels(self):
actual = list(
pluck(
'label',
generate_intervals('Yearly', '2012-12-12', '2013-04-19')
)
)
expected = ['2012', '2013']
self.assertEqual(actual, expected)
def test_generate_intervals_yearly_start_dates(self):
actual = list(
pluck(
'start_date',
generate_intervals('Yearly', '2012-12-12', '2013-01-19')
)
)
expected = [getdate('2012-01-01'), getdate('2013-01-01')]
self.assertEqual(actual, expected)
def test_generate_intervals_yearly_end_dates(self):
actual = list(
pluck(
'end_date',
generate_intervals('Yearly', '2012-12-12', '2013-01-19')
)
)
expected = [getdate('2012-12-31'), getdate('2013-12-31')]
self.assertEqual(actual, expected)
| StarcoderdataPython |
108798 | <reponame>dacosta2213/cfdi<filename>cfdi/cfdi/doctype/cfdi/cfdi.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015, C0D1G0 B1NAR10 and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.file_manager import save_url
from frappe.model.mapper import get_mapped_doc
from frappe.utils import cint, flt
import shutil
import os
import sys
import time
import base64
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import threading
from xml.dom import minidom
import requests
from datetime import datetime
import collections
import random
import string
from datetime import date
from datetime import *
import re
class CFDI(Document):
pass
@frappe.whitelist()
def ticket(source_name, target_doc=None):
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "CFDI",
"field_map": {
"name": "ticket",
}
},
"Sales Invoice Item": {
"doctype": "CFDI Item",
"field_map": {
"rate": "precio_de_venta",
"net_rate": "precio_unitario_neto",
"amount": "monto",
"parent": "fuente",
"net_amount": "precio_neto",
# "impuesto": "tax",
}
}
}, target_doc)
return doclist
# RG - Inicia SmarterWEb
@frappe.whitelist()
def validar_rfc(url, token, rfc):
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("GET", url + "/lrfc/" + rfc, headers=headers)
frappe.errprint(response.text)
return response.text
@frappe.whitelist()
def cancel_by_uuid(url, token,docname, rfc, uuid):
# frappe.errprint(rfc)
c = frappe.get_doc("CFDI", docname)
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
frappe.msgprint((response.json().get('message')), "ERROR ENCONTRADO AL TIMBRAR")
else:
for d in c.items:
frappe.db.set_value("Sales Invoice",d.fuente , 'cfdi_status', 'Sin Timbrar')
return response.text
@frappe.whitelist()
def cancel_by_uuid_pago(docname):
c = frappe.get_doc("Payment Entry", docname)
uuid = c.uuid_pago
d = frappe.get_doc("Configuracion CFDI", c.company)
url = 'http://' + d.url
token = d.token
rfc = d.rfc_emisor
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
frappe.msgprint((response.json().get('message')), "ERROR ENCONTRADO AL TIMBRAR")
else:
frappe.db.set_value("Payment Entry",c.name, 'cfdi_status', 'Cancelado')
frappe.msgprint(str(c.name) + " Cancelada exitosamente " )
return response.text
@frappe.whitelist()
def cancel_by_uuid_egreso(url, token,docname, rfc, uuid):
c = frappe.get_doc("CFDI Nota de Credito", docname)
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
frappe.msgprint((response.json().get('message')), "ERROR ENCONTRADO AL CANCELAR")
else:
frappe.db.set_value("CFDI Nota de Credito",c.name, 'cfdi_status', 'Cancelado')
frappe.msgprint(str(c.name) + " Cancelada exitosamente " )
return response.text
# RG - Timbrado de CFDI
@frappe.whitelist()
def issue(url, token, docname, version, b64=False):
# RG - POST request al server de swarterweb
xml = genera_xml(docname)
boundary = "----=_Part_11_11939969.1490230712432" #RG- declararlo como en issue_pago
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: " \
"form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers)
liga = url + "/cfdi33/issue/" + version + "/"
frappe.errprint(response.json()) #RG - para ver la respuesta en la consola
frappe.errprint(payload)
frappe.errprint(headers)
frappe.errprint(liga)
if response.json().get('status') == 'error': #RG - Ver si podemos manejar con una funcion separada el error
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
# RG- Recuperar el response y manejar la info pa grabar los archivos/datos en el CFDI
c = frappe.get_doc("CFDI", docname)
fechaxml = str(c.creation)
# webfolder = c.folder
uuid = response.json().get('data').get('uuid')
# generar xml
cfdi_recibido = response.json().get('data').get('cfdi')
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + '/public/files/' + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "CFDI" , c.name , "Home/Attachments" , 0)
# EscribirPNG
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("CFDI",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
# escribir todos los demas campos
frappe.db.set_value("CFDI",c.name, 'cfdi_status', 'Timbrado')
for d in c.items:
frappe.db.set_value("Sales Invoice", d.fuente , 'cfdi_status', 'Timbrado')
frappe.db.set_value("CFDI",c.name, 'SelloCFD', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("CFDI",c.name, 'cadenaOriginalSAT', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("CFDI",c.name, 'FechaTimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("CFDI",c.name, 'uuid', uuid)
frappe.db.set_value("CFDI",c.name, 'NoCertificadoSAT', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("CFDI",c.name, 'SelloSAT', response.json().get('data').get('selloSAT') )
# frappe.msgprint(str(c.name) + " Timbrada exitosamente " )
mensaje = str(c.name)+" TIMBRADO EXITOSO . <a class= 'alert-info' href='https://" + frappe.local.site + "/files/" + c.name + "_" + fechaxml[0:10] + ".xml' download> Descarga XML </a>"
frappe.msgprint(mensaje)
return response.json()
def genera_xml(docname):
tieneiva = 0
notieneiva = 0
c = frappe.get_doc("CFDI", docname)
cant = len(c.items)
mytime = datetime.strptime('0800','%H%M').time()
#fecha_actual = datetime.combine(c.posting_date,mytime).isoformat()[0:19] #dacosta - para hacer que se timbre con la fecha de posting_date
fecha_actual = (datetime.now()- timedelta(minutes=480)).isoformat()[0:19]
# fecha_actual = (datetime.now()- timedelta(minutes=360)).isoformat()[0:19]
# fecha_obj = datetime.strptime(c.creation, "%Y-%m-%d %H:%M:%S.%f")
# fecha_actual = fecha_obj.isoformat()[0:19]
serie = c.naming_series.replace('-','')
folio = c.name.replace(serie,'')
FormaPago = c.forma_de_pago
SubTotal = 0
# Falta descuento
Total = '%.2f' % c.total
TipoDeComprobante = c.tipo_de_comprobante
MetodoPago = c.metodo_pago
LugarExpedicion = c.lugar_expedicion
NoCertificado = c.no_certificado
rfc_emisor = c.rfc_emisor
nombre_emisor = c.nombre_emisor
regimen_fiscal = c.regimen_fiscal
tax_id = c.tax_id.replace('&','&')
# nombre_receptor = c.customer_name.encode('ascii', 'ignore').decode('ascii')
# nombre_receptor = c.customer_name.encode('UTF-8', 'ignore')
# nombre_receptor = c.customer_name.decode('UTF-8')
nombre_receptor = c.customer_name.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ñ','N').replace('ñ','n').replace('Ü', 'U')
tipo = []
tasa = []
cantidad = []
cfdi_items = ""
cfdi_traslados = ""
for d in c.items:
NoIdentificacion = d.item_code.replace('"','')
ClaveProdServ = d.clave_producto
ClaveUnidad = d.clave_unidad
Cantidad = d.qty
Unidad = d.stock_uom
ValorUnitario = '%.2f' % d.precio_unitario_neto
Importe = '%.2f' % d.precio_neto
idx = d.idx
Descripcion = d.item_name
SubTotal = round(SubTotal + float(d.precio_neto), 2)
if d.tipo_de_impuesto == "IVA":
TrasladosBase = '%.2f' % d.precio_neto
Impuesto = "002"
TasaOCuota = .01 * float(d.tax)
ImpuestosTrasladosTasaOCuota='%.6f' % TasaOCuota
Importetax = '%.2f' % (TasaOCuota * float(d.precio_neto))
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
elif d.tipo_de_impuesto == "IEPS":
TrasladosBase = '%.2f' % d.precio_neto
Impuesto="003"
TasaOCuota = .01 * float(d.tax)
ImpuestosTrasladosTasaOCuota='%.6f' % TasaOCuota
Importetax = '%.2f' % (TasaOCuota * float(d.precio_neto))
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
else:
notieneiva = 1
TrasladosBase= '%.2f' % d.precio_neto
Impuesto="002"
TasaOCuota="0.000000"
Importetax= "0.00"
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="0.00">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="Tasa" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
uso_cfdi = c.uso_cfdi
cTipo = collections.Counter(tipo)
cTasa = collections.Counter(tasa)
total_impuesto = 0
TotalImpuestosTrasladados = 0.0
for w, val1 in cTipo.items():
for y, val2 in cTasa.items():
suma = 0.0
for z in range(0,cant):
if (tasa[z] == y) and (tipo[z] == w):
suma = suma+float(cantidad[z])
b = y
t = w
total_impuesto = total_impuesto + suma
TotalImpuestosTrasladados = round(suma,2)
# total_impuesto = '%.2f' % total_impuesto1
if(suma>0):
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="Tasa" TasaOCuota="{b}" Importe="{TotalImpuestosTrasladados}"/>""".format(**locals())
else:
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="Tasa" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
Total = round(SubTotal + TotalImpuestosTrasladados, 2)
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd"
Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Descuento="0.00" Moneda="MXN" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{regimen_fiscal}"/>
<cfdi:Receptor Rfc="{tax_id}" Nombre="{nombre_receptor}" UsoCFDI="{uso_cfdi}"/>
<cfdi:Conceptos>""".format(**locals())
cfdi += cfdi_items
cfdi_conceptos = """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados> """.format(**locals())
cfdi += cfdi_conceptos
cfdi += cfdi_traslados
cfdi += """
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Comprobante>
""".format(**locals())
frappe.errprint(cfdi)
return cfdi
# RG- Para los complementos de pago (REP)
@frappe.whitelist()
def issue_pago(url, token, docname, version,user_id,user_password,folder,nombre_emisor,no_certificado, b64=False):
# RG - POST request al server de swarterweb
xml = genera_xml_pago(docname,url,user_id,user_password,folder,nombre_emisor,no_certificado)
frappe.errprint(xml)
# boundary = "----=_Part_11_11939969.1490230712432"
lst = [random.choice(string.ascii_letters + string.digits) for n in range(30)]
boundary = "".join(lst)
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers, timeout=300)
liga = url + "/cfdi33/issue/" + version + "/"
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
c = frappe.get_doc("Payment Entry", docname)
# webfolder = c.folder
uuid = response.json().get('data').get('uuid')
fechaxml = str(c.creation)
# generar xml
cfdi_recibido = response.json().get('data').get('cfdi')
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "Payment Entry" , c.name , "Home/Attachments" , 0)
# EscribirPNG
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("Payment Entry",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
# escribir todos los demas campos
frappe.db.set_value("Payment Entry",c.name, 'cfdi_status', 'Timbrado')
frappe.db.set_value("Payment Entry",c.name, 'SelloCFD', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("Payment Entry",c.name, 'cadenaOriginalSAT', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("Payment Entry",c.name, 'FechaTimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("Payment Entry",c.name, 'uuid_pago', uuid)
frappe.db.set_value("Payment Entry",c.name, 'NoCertificadoSAT', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("Payment Entry",c.name, 'SelloSAT', response.json().get('data').get('selloSAT') )
frappe.msgprint(str(c.name) + " Timbrada exitosamente " )
return response.json()
def genera_xml_pago(docname, url,user_id,user_password,folder,nombre_emisor,no_certificado):
Fecha = (datetime.now()- timedelta(minutes=480)).isoformat()[0:19]
c = frappe.get_doc("Payment Entry", docname)
cliente = frappe.get_doc("Customer", c.party_name)
# if frappe.local.site == "demo.totall.mx":
Fecha = c.fecha.isoformat()[0:19] if c.fecha else (datetime.now()- timedelta(minutes=480)).isoformat()[0:19]
#si = frappe.get_doc(tipo, invoice)
#SerieCFDI = si.naming_series
#FolioCFDI = si.name
url_timbrado = url
user_id = user_id
user_password = <PASSWORD>
webfolder =folder
RegimenFiscal = c.regimen_fiscal
if c.es_factoraje == 1:
b = frappe.get_doc("Bank", c.banco)
RfcReceptor = b.tax_id.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
else:
RfcReceptor = cliente.tax_id.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
#########+++++++++++++++++SE COMENTARON LOS .replace DE LAS LINEAS 382 Y 214
if c.es_factoraje == 1:
NombreReceptor = c.banco
else:
NombreReceptor = c.party_name.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
LugarExpedicion = c.lugar_expedicion
mytime = datetime.strptime('1200','%H%M').time()
FechaContabilizacion = datetime.combine(c.posting_date,mytime).isoformat()[0:19]
Serie = c.naming_series.replace('-','')
Folio = c.name.replace(Serie,'')
rfc_emisor = c.rfc_emisor
nombre_emisor = nombre_emisor
NoCertificado = no_certificado
FormaDePagoP = c.forma_de_pago
Monto = '%.2f' % c.received_amount
IdDocumento = c.documento_relacionado
# Currency = c.paid_from_account_currency
#TipoCambio = 1 if c.currency == "MXN" else '%2f' % c.source_enchange_rate
MetodoDePagoDR = c.metodo_pago_cfdi
NumOperacion = c.reference_no
# ImpSaldoAnt = '%.2f' % c.references[0].outstanding_amount
ImpSaldoAnt = '%.2f' % c.impsaldoanterior
# ImpSaldoInsoluto = '%.2f' % (c.references[0].outstanding_amount - c.received_amount )
ImpSaldoInsoluto = '%.2f' % (c.impsaldoanterior - c.received_amount )
cfdi_pago= """<?xml version="1.0" encoding="utf-8" ?>
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:pago10="http://www.sat.gob.mx/Pagos" xsi:schemaLocation=" http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/Pagos http://www.sat.gob.mx/sitio_internet/cfd/Pagos/Pagos10.xsd" Version="3.3"
Serie="{Serie}" Folio="{Folio}" Fecha="{Fecha}" Sello="" NoCertificado="{NoCertificado}" Certificado="" SubTotal="0" Moneda="XXX" Total="0" TipoDeComprobante="P" LugarExpedicion="{LugarExpedicion}">
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{RegimenFiscal}"/>
<cfdi:Receptor Rfc="{RfcReceptor}" Nombre="{NombreReceptor}" UsoCFDI="P01"/>
<cfdi:Conceptos>
<cfdi:Concepto ClaveProdServ="84111506" Cantidad="1" ClaveUnidad="ACT" Descripcion="Pago" ValorUnitario="0" Importe="0">
</cfdi:Concepto>
</cfdi:Conceptos>
<cfdi:Complemento>
<pago10:Pagos Version="1.0">""".format(**locals())
# MonedaP= c.moneda
TipoCambioP = c.target_exchange_rate
MonedaP = c.paid_to_account_currency
if MonedaP != 'MXN': # Si el Pago es diferente a MXN
cfdi_pago+="""
<pago10:Pago FechaPago="{FechaContabilizacion}" FormaDePagoP="{FormaDePagoP}" TipoCambioP ="{TipoCambioP}" MonedaP="{MonedaP}" Monto="{Monto}" NumOperacion="{NumOperacion}">""".format(**locals())
else:
cfdi_pago+="""
<pago10:Pago FechaPago="{FechaContabilizacion}" FormaDePagoP="{FormaDePagoP}" MonedaP="MXN" Monto="{Monto}" NumOperacion="{NumOperacion}">""".format(**locals())
for x in c.references:
si = frappe.get_doc('Sales Invoice', x.reference_name)
MonedaDR = si.currency
# if MonedaDR != MonedaP:
# frappe.throw('Las moneda del documento original es diferente a la moneda de pago. Documento Original: ' + str(si.name))
TipoCambioDR = None if si.currency == "MXN" else ('%2f' % si.conversion_rate)
# TipoCambioDR = None
IdDocumento = x.uuid
SerieCFDI = si.naming_series
FolioCFDI = si.name.replace(SerieCFDI,'')
MetodoPago = si.metodo_pago
ImpSaldoAnt = '%.2f' % x.monto_pendiente
ImpPagado = '%.2f' % x.pagado
parc = 0
frappe.errprint(x.reference_name)
parcialidades = frappe.db.sql("""SELECT * from `tabPayment Entry Reference` WHERE reference_name = %s AND docstatus = 1""",(x.reference_name),as_dict=1)
for conteo in parcialidades:
parc += 1
ImpSaldoInsoluto= '%.2f' % (float(ImpSaldoAnt) - float(ImpPagado))
frappe.db.set_value("Sales Invoice",si.name, 'monto_pendiente', ImpSaldoInsoluto)
frappe.errprint(IdDocumento)
cfdi_pago+="""
<pago10:DoctoRelacionado IdDocumento="{IdDocumento}" Serie="{SerieCFDI}" Folio="{FolioCFDI}" """.format(**locals())
#if frappe.local.site == "demo.totall.mx": # Remover if y bloque else completo una ves comprovado el correcto funcionamiento del if - AG - 18/01/21
if TipoCambioDR: #Solo si Factura diferente a MXN
#frappe.msgprint('En transacciones de moneda extranjera, solo puede existir 1 factura relacionada en Referencias del Pago (Payment Reference) ')
ImpSaldoAnt = '%.2f' % ( flt(x.outstanding_amount) / flt(TipoCambioDR) )
if si.monto_pendiente > 1:
ImpSaldoAnt = '%.2f' % ( si.monto_pendiente )
# ImpPagado = '%.2f' % flt(Monto)
# ImpSaldoInsoluto= '%.2f' % (float(ImpSaldoAnt) - float(ImpPagado))
if MonedaP == MonedaDR:# Si Moneda de Pago igual a Moneda de Factura - Factura y Pago USD
ImpSaldoAnt = '%.2f' % (x.monto_pendiente)
ImpPagado = '%.2f' % (flt(x.pagado) / flt(c.tipo_cambio))
ImpSaldoInsoluto = '%.2f' % (float(ImpSaldoAnt) - float(ImpPagado))
frappe.errprint("Saldo Anterior: " + ImpSaldoAnt + "Pago: " + ImpPagado + "Saldo Nuevo: " + ImpSaldoInsoluto)
if float(ImpSaldoInsoluto) < 0 and float(ImpPagado) > float(ImpSaldoAnt):
ImpSaldoInsoluto = '%.2f' % 0
ImpPagado = '%.2f' % flt(ImpSaldoAnt)
frappe.errprint("Saldo Anterior: " + ImpSaldoAnt + "Pago: " + ImpPagado + "Saldo Nuevo: " + ImpSaldoInsoluto)
frappe.db.set_value("Sales Invoice",si.name, 'monto_pendiente', ImpSaldoInsoluto)
cfdi_pago+="""MonedaDR="{MonedaDR}" MetodoDePagoDR="{MetodoPago}" NumParcialidad="{parc}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
""".format(**locals())
else: #Si la Moneda de Pago es Diferente a la Moneda de Factura - Factura USD - Pago MXN
tipocambio = round(c.tipo_cambio, 4)
ImpPagado = round(x.pagado / tipocambio, 2)
ImpSaldoAnt = '%.2f' % (x.monto_pendiente)
ImpSaldoInsoluto = '%.2f' % (float(ImpSaldoAnt) - float(ImpPagado))
if float(ImpSaldoInsoluto) < 0 and float(ImpPagado) > float(ImpSaldoAnt):
ImpSaldoInsoluto = '%.2f' % 0
ImpPagado = '%.2f' % flt(ImpSaldoAnt)
frappe.errprint("Saldo Anterior: " + ImpSaldoAnt + "Pago: " + ImpPagado + "Saldo Nuevo: " + ImpSaldoInsoluto)
frappe.db.set_value("Sales Invoice",si.name, 'monto_pendiente', ImpSaldoInsoluto)
cfdi_pago+="""TipoCambioDR="{tipocambio}" MonedaDR="{MonedaDR}" MetodoDePagoDR="{MetodoPago}" NumParcialidad="{parc}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
""".format(**locals())
else: # Si la Factura es en MXN
if MonedaP != "MXN": #Si el Pago es Diferente de MXN
cfdi_pago+="""TipoCambioDR="{TipoCambioP}" MonedaDR="{MonedaDR}" MetodoDePagoDR="{MetodoPago}" NumParcialidad="{parc}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
""".format(**locals())
else:
cfdi_pago+="""MonedaDR="{MonedaDR}" MetodoDePagoDR="{MetodoPago}" NumParcialidad="{parc}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
""".format(**locals())
# else:
# if TipoCambioDR:
# ImpSaldoAnt = '%.2f' % x.total_moneda_original
# ImpPagado = '%.2f' % x.total_moneda_original
# ImpSaldoInsoluto= '%.2f' % (float(ImpSaldoAnt) - float(ImpPagado))
# cfdi_pago+="""MonedaDR="{MonedaDR}" MetodoDePagoDR="PPD" NumParcialidad="{no_parcialidad}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
# """.format(**locals())
# else:
# cfdi_pago+="""MonedaDR="{MonedaDR}" MetodoDePagoDR="PPD" NumParcialidad="{no_parcialidad}" ImpSaldoAnt="{ImpSaldoAnt}" ImpPagado="{ImpPagado}" ImpSaldoInsoluto="{ImpSaldoInsoluto}"/>
# """.format(**locals())
if c.es_factoraje == 1:
pagado_imp = round(c.total_allocated_amount -c.base_received_amount, 2)
parc = parc + 1
cfdi_pago+="""</pago10:Pago>
<pago10:Pago Monto="{pagado_imp}" MonedaP="{MonedaP}" FormaDePagoP="17" FechaPago="{FechaContabilizacion}">
<pago10:DoctoRelacionado Serie="{SerieCFDI}" Folio="{FolioCFDI}" NumParcialidad="{parc}" MonedaDR="{MonedaP}" MetodoDePagoDR="PUE" ImpSaldoInsoluto="0.00" ImpSaldoAnt="{pagado_imp}" ImpPagado="{pagado_imp}" IdDocumento="{IdDocumento}"/>
""".format(**locals())
cfdi_pago+="""</pago10:Pago>
</pago10:Pagos>
</cfdi:Complemento>
</cfdi:Comprobante>""".format(**locals())
return cfdi_pago
@frappe.whitelist()
def parcialidades_pe(doc,method=None):
# if frappe.local.site == "demo.totall.mx":
for item in doc.references:
parc = 1
# Obtiene todos los Pagos del Sinv en la lina actual
parcialidades = frappe.get_list('Payment Entry Reference', filters={'reference_name':item.reference_name}, fields=['parent'])
for parcialidad in parcialidades:
pe_rel = frappe.get_doc('Payment Entry',parcialidad.parent)
if pe_rel.fecha:
pago = frappe.db.get_value('Payment Entry',{'name':parcialidad.parent,'fecha':['<',doc.fecha],'docstatus':['=',1]},'posting_date')
else:
pago = frappe.db.get_value('Payment Entry',{'name':parcialidad.parent,'creation':['<',doc.creation],'docstatus':['=',1]},'posting_date')
if pago:
parc = parc + 1
item.no_parcialidad = parc
return
# MAPA DE complementos
# Num_Operacion = c.name
# FormaDePago = c.forma_de_pago
#
# PAGO10 - Iterar sobre reference
# id documento=uuid_pago
# serie y folio = reference_name
# met pago = 'PPD'
# ImpSaldoAnt=total_moneda_original
# ImpPagado=alocated_amount
# ImpSaldoInsoluto=ImpSaldoAnt - ImpPagado
# RG-Para las notas de credito CFDI
@frappe.whitelist()
def issue_egreso(url, token, docname, version, b64=False):
# RG - POST request al server de swarterweb
xml = genera_xml_egreso(docname)
boundary = "----=_Part_11_11939969.1490230712432"
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: " \
"form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers)
liga = url + "/cfdi33/issue/" + version + "/"
frappe.errprint(response.json())
frappe.errprint(payload)
frappe.errprint(headers)
frappe.errprint(liga)
if response.json().get('status') == 'error':
frappe.msgprint((response.json().get('message')), "ERROR ENCONTRADO AL TIMBRAR")
else:
# RG- Recuperar el response y manejar la info pa grabar los archivos/datos en el CFDI
c = frappe.get_doc("CFDI Nota de Credito", docname)
# webfolder = c.folder
uuid = response.json().get('data').get('uuid')
fechaxml = str(c.creation)
# generar xml
cfdi_recibido = response.json().get('data').get('cfdi')
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "CFDI Nota de Credito" , c.name , "Home/Attachments" , 0)
# EscribirPNG
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("CFDI Nota de Credito",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
# escribir todos los demas campos
frappe.db.set_value("CFDI Nota de Credito",c.name, 'cfdi_status', 'Timbrado')
frappe.db.set_value("CFDI Nota de Credito",c.name, 'sellocfd', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("CFDI Nota de Credito",c.name, 'cadenaoriginalsat', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("CFDI Nota de Credito",c.name, 'fechatimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("CFDI Nota de Credito",c.name, 'uuid', uuid)
frappe.db.set_value("CFDI Nota de Credito",c.name, 'nocertificadosat', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("CFDI Nota de Credito",c.name, 'sellosat', response.json().get('data').get('selloSAT') )
frappe.msgprint(str(c.name) + " Timbrada exitosamente " )
return response.json()
def genera_xml_egreso(docname):
tieneiva = 0
notieneiva = 0
c = frappe.get_doc("CFDI Nota de Credito", docname)
mytime = datetime.strptime('0800','%H%M').time()
#fecha_actual = datetime.combine(c.posting_date,mytime).isoformat()[0:19] #dacosta - para hacer que se timbre con la fecha de posting_date
mytime = datetime.strptime('0800','%H%M').time()
fecha_actual = datetime.combine(c.posting_date,mytime).isoformat()[0:19] #dacosta - para hacer que se timbre con la fecha de posting_date
serie = c.naming_series.replace('-','')
folio = c.name.replace(serie,'')
FormaPago = c.forma_de_pago
SubTotal = float(c.total_neto)
# Falta descuento
Total = '%.2f' % (c.total)
TipoDeComprobante = "E"
MetodoPago = c.metodo_pago
LugarExpedicion = c.lugar_expedicion
NoCertificado = c.no_certificado
rfc_emisor = c.rfc_emisor
nombre_emisor = c.nombre_emisor.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
regimen_fiscal = c.regimen_fiscal
tax_id = c.tax_id.replace('&','&')
nombre_receptor = c.customer_name.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
uso_cfdi = c.uso_cfdi
# fac_rel = frappe.get_doc(c.tipo_documento,c.factura_fuente)
tipo_rel = c.tipo_de_relacion
# uuid_rel = fac_rel.uuid
Currency = c.currency
if Currency == 'MXN':
TipoCambio = 1
else:
TipoCambio = '%.4f' % float(c.conversion_rate)
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd"
Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Moneda="{Currency}" TipoCambio = "{TipoCambio}" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
site = frappe.local.site
# if site == "demo.totall.mx":
cfdi+= """
<cfdi:CfdiRelacionados TipoRelacion="{tipo_rel}">""".format(**locals())
for d in c.si_sustitucion:
cfdi+="""
<cfdi:CfdiRelacionado UUID="{d.uuid}"/>""".format(**locals())
cfdi+="""
</cfdi:CfdiRelacionados>""".format(**locals())
cfdi+="""
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{regimen_fiscal}"/>
<cfdi:Receptor Rfc="{tax_id}" Nombre="{nombre_receptor}" UsoCFDI="{uso_cfdi}"/>
<cfdi:Conceptos>""".format(**locals())
for d in c.items:
NoIdentificacion = d.item_code
ClaveProdServ = d.clave_producto
ClaveUnidad = d.clave_unidad
Cantidad = d.qty
Unidad = d.stock_uom
ValorUnitario = '%.2f' % d.precio_unitario_neto
Importe = '%.2f' % d.precio_neto
idx =d.idx
Descripcion = d.item_name
if d.tax == 16:
tieneiva = 1
TrasladosBase = '%.2f' % d.precio_neto
Impuesto = "002"
TasaOCuota = "0.160000"
Importetax = '%.2f' % d.impuestos_totales
else:
notieneiva = 1
TrasladosBase= '%.2f' % d.precio_neto
Impuesto="002"
TasaOCuota="0.000000"
Importetax= "0.00"
cfdi += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="Tasa" TasaOCuota="{TasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
TotalImpuestosTrasladados='%.2f' % c.total_impuestos
TotalIva = '%.2f' % c.total_iva
frappe.errprint(TotalIva)
TotalIeps = '%.2f' % c.total_ieps
cfdi += """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados>
""".format(**locals())
if notieneiva == 1:
cfdi += """ <cfdi:Traslado Impuesto="002" TipoFactor="Tasa" TasaOCuota="0.000000" Importe="0.00"/>""".format(**locals())
else:
cfdi += """ <cfdi:Traslado Impuesto="002" TipoFactor="Tasa" TasaOCuota="0.160000" Importe="{TotalIva}"/>""".format(**locals())
cfdi += """
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Comprobante>
""".format(**locals())
frappe.errprint(cfdi)
return cfdi
# Dev-<NAME> Sales Invoice
@frappe.whitelist()
def sales_invoice_timbrado(url, token, docname, version, b64=False):
# RG - POST request al server de swarterweb
xml = sales_invoice_timbrado_xml(docname)
frappe.errprint(xml)
lst = [random.choice(string.ascii_letters + string.digits) for n in range(30)]
boundary = "".join(lst)
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: " \
"form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers)
liga = url + "/cfdi33/issue/" + version + "/"
frappe.errprint(response.json())
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
# RG- Recuperar el response y manejar la info pa grabar los archivos/datos en el CFDI
c = frappe.get_doc("Sales Invoice", docname)
uuid = response.json().get('data').get('uuid')
cfdi_recibido = response.json().get('data').get('cfdi')
fechaxml = str(c.creation)
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "Sales Invoice" , c.name , "Home/Attachments" , 0)
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("Sales Invoice",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
frappe.db.set_value("Sales Invoice",c.name, 'cfdi_status', 'Timbrado')
frappe.db.set_value("Sales Invoice",c.name, 'sellocfd', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("Sales Invoice",c.name, 'cadenaoriginalsat', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("Sales Invoice",c.name, 'fechatimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("Sales Invoice",c.name, 'uuid', uuid)
frappe.db.set_value("Sales Invoice",c.name, 'nocertificadosat', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("Sales Invoice",c.name, 'sellosat', response.json().get('data').get('selloSAT') )
mensaje = "TIMBRADO EXITOSO . <a class= 'alert-info' href='https://" + frappe.local.site + "/files/" + c.name + "_" + fechaxml[0:10] + ".xml' download> Descarga XML </a>"
frappe.msgprint(mensaje)
return ["TIMBRADO EXITOSO!",mensaje,uuid,xml]
def sales_invoice_timbrado_xml(docname):
c = frappe.get_doc("Sales Invoice", docname)
cliente = frappe.get_doc("Customer", c.customer)
cant = len(c.items)
company = frappe.get_doc("Configuracion CFDI", c.company)
# horaminuto = c.posting_time
# frappe.errprint(h oraminuto)
# mytime = horaminuto.strftime("%H:%M:%S")
# frappe.errprint(horaminuto)
# return
#fecha_actual = datetime.combine(c.posting_date,mytime).isoformat()[0:19] #dacosta - para hacer que se timbre con la fecha de posting_date
descuento = round(c.discount_amount, 2)
fecha_actual = (c.creation).isoformat()[0:19]
serie = c.naming_series.replace('-','')
folio = c.name.replace(serie,'')
# frappe.errprint(c.name.replace(serie,''))
FormaPago = c.forma_de_pago
#SubTotal = '%.2f' % c.net_total
SubTotal = 0
Total = '%.2f' % (c.grand_total)
# Total = 3509.40
TipoDeComprobante = 'I'
# TipoCambio = 1 if c.currency = "MXN" else '%2f' % c.conversion_rate
MetodoPago = c.metodo_pago
LugarExpedicion = company.lugar_expedicion
Currency = c.currency
if Currency == 'MXN':
TipoCambio = 1
else:
TipoCambio = '%.4f' % c.conversion_rate
rfc_emisor = company.rfc_emisor
nombre_emisor = company.nombre_emisor.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
regimen_fiscal = company.regimen_fiscal
tax_id = cliente.tax_id.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
nombre_receptor = c.customer_name.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
uso_cfdi = c.uso_cfdi
tipo = []
tasa = []
cantidad = []
cfdi_items = ""
cfdi_traslados = ""
cfdi_mercancias = ""
if c.comercio_exterior == 1:
cfdi_mercancias = """
<cce11:Mercancias>
""".format(**locals())
for x in c.items:
i = frappe.get_doc("Item", x.item_code)
if c.comercio_exterior == 1:
arancelaria = frappe.get_doc("Fraccion Arancelaria", i.fraccion_arancelaria)
UMT = arancelaria.umt
else:
arancelaria = ""
UMT = ""
NoIdentificacion = x.item_code.replace('"','').replace('&','&')
ClaveProdServ = i.clave_producto
ClaveUnidad = i.clave_unidad
Cantidad = x.qty
Unidad = x.stock_uom
ValorUnitario = '%.2f' % x.rate
Importe = '%.2f' % x.amount
idx = x.idx
Descripcion = x.description.replace('"','').replace('&','&').replace('<div class="ql-editor read-mode"><p>','').replace('<div><p>','').replace('</p></div>','').replace('<br>','').replace('<p>','').replace('</p>','').replace('<div class=ql-editor read-mode>','').replace('@','@').replace('<strong>','').replace('</strong>','')
des = round(x.descuento*x.qty, 2)
TrasladosBase= '%.2f' % (float(x.amount) - float(des))
SubTotal = round(SubTotal + float(x.amount), 2)
TasaOCuota = .01 * float(x.tasa)
ImpuestosTrasladosTasaOCuota='%.6f' % TasaOCuota
Importetax= '%.2f' % (TasaOCuota * (float(x.amount) - des))
Tasa = 'Tasa'
FraccionArancelaria = i.fraccion_arancelaria
UnidadAduana = i.unidad_aduana
TipoImpuesto = x.tipo_de_impuesto
if c.comercio_exterior == 1:
NoIdentificacion_exterior = str(NoIdentificacion) + " " + str(x.idx)
cfdi_mercancias += """
<cce11:Mercancia NoIdentificacion="{NoIdentificacion_exterior}" FraccionArancelaria="{FraccionArancelaria}" CantidadAduana="{Cantidad}" UnidadAduana="{UMT}" ValorUnitarioAduana="{ValorUnitario}" ValorDolares="{Importe}"/>
""".format(**locals())
TipoImpuesto = "EXTERIOR"
if TipoImpuesto == 'IVA':
Impuesto = '002'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif TipoImpuesto == "SIN IVA":
Impuesto="002"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif TipoImpuesto == "IEPS":
Impuesto="003"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif TipoImpuesto == "EXENTO":
TrasladosBase1= x.net_amount
TrasladosBase= '%.2f' % (TrasladosBase1)
Impuesto="002"
ImpuestosTrasladosTasaOCuota="0.000000"
Importetax= "0.00"
Tasa = 'Exento'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif TipoImpuesto == "EXTERIOR":
NoIdentificacion_exterior = str(NoIdentificacion) + " " + str(x.idx)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion_exterior}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
</cfdi:Concepto>""".format(**locals())
# tipo.append(Impuesto)
# tasa.append(ImpuestosTrasladosTasaOCuota)
# cantidad.append(Importetax)
# frappe.errprint(Importetax)
# cfdi_items += """
# <cfdi:Concepto ClaveProdServ="{ClaveProdServ}" NoIdentificacion="{NoIdentificacion}" Cantidad="{Cantidad}" ClaveUnidad="{ClaveUnidad}" Unidad="{Unidad}" Descripcion="{Descripcion}" ValorUnitario="{ValorUnitario}" Importe="{Importe}" Descuento="{des}">
# <cfdi:Impuestos>
# <cfdi:Traslados>
# <cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
# </cfdi:Traslados>
# </cfdi:Impuestos>
# </cfdi:Concepto>""".format(**locals())
# # TotalImpuestosTrasladados= 4558.38
cTipo = collections.Counter(tipo)
cTasa = collections.Counter(tasa)
total_impuesto = 0
TotalImpuestosTrasladados = 0.00
for w, val1 in cTipo.items():
for y, val2 in cTasa.items():
if c.comercio_exterior == 1:
suma = "EXTERIOR"
else:
suma =0
for z in range(0,cant):
if (tasa[z] == y) and (tipo[z] == w):
suma1 = suma+float(cantidad[z])
suma = round(suma1, 2)
b = y
t = w
total_impuesto = total_impuesto+suma
TotalImpuestosTrasladados = suma
if(suma>0):
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
elif(suma==0):
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
elif(suma=="EXTERIOR"):
cfdi_traslados += ""
Total = round(SubTotal - descuento + TotalImpuestosTrasladados, 2)
cfdi = ""
if c.comercio_exterior == 1:
Totalant = round(SubTotal - descuento + TotalImpuestosTrasladados, 2)
Total = '%.2f' % (Totalant)
cfdi_comprobante = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:cce11="http://www.sat.gob.mx/ComercioExterior11" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/ComercioExterior11 http://www.sat.gob.mx/sitio_internet/cfd/ComercioExterior11/ComercioExterior11.xsd"
Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Descuento="{descuento}" Moneda="{Currency}" TipoCambio = "{TipoCambio}" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
else:
cfdi_comprobante = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xmlns:cfdi="http://www.sat.gob.mx/cfd/3" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd"
Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Descuento="{descuento}" Moneda="{Currency}" TipoCambio = "{TipoCambio}" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
cfdi += cfdi_comprobante
#
#es_sustitucion = frappe.get_value('Sales Invoice', docname,'sustituidos')
# try:
#site = frappe.local.site
# if site == "demo.totall.mx":
if c.cfdi_sustitucion == 1:
relacion = c.relacion
cfdi += """
<cfdi:CfdiRelacionados TipoRelacion="{relacion}">""".format(**locals())
for d in c.si_sustitucion:
cfdi+="""
<cfdi:CfdiRelacionado UUID="{d.uuid}"/>""".format(**locals())
cfdi+="""
</cfdi:CfdiRelacionados>""".format(**locals())
# if es_sustitucion:
# if c.sustituidos == 1:
# frappe.errprint('si existe')
# else:
# frappe.errprint('no existe')
cfdi+= """
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{regimen_fiscal}"/>
<cfdi:Receptor Rfc="{tax_id}" Nombre="{nombre_receptor}" UsoCFDI="{uso_cfdi}"/>
<cfdi:Conceptos>""".format(**locals())
cfdi += cfdi_items
if c.comercio_exterior == 1:
tax_id = "XAXX010101000"
cfdi_conceptos = """
</cfdi:Conceptos>""".format(**locals())
else:
cfdi_conceptos = """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados>""".format(**locals())
cfdi += cfdi_conceptos
cfdi += cfdi_traslados
cfdi_complemento = ""
cfdi_emisor = ""
cfdi_receptor = ""
cfdi_header = """
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Comprobante>
""".format(**locals())
if c.comercio_exterior == 1:
EDireccion = frappe.get_doc("Address", c.customer_address)
ECalle = re.findall("[^0-9]+", EDireccion.address_line1)[0].replace('#', '')
ENumeroExterior = re.findall("\d+", EDireccion.address_line1)[0]
EColonia = EDireccion.county
EEstado = EDireccion.clave_estado
ECp = EDireccion.pincode
#########################################
#Letras del pais UNIDECODE Origen
pais = frappe.get_doc("CFDI Clave Estado", EDireccion.clave_estado)
EPais = pais.pais
if c.comercio_exterior == 1:
cfdi_complemento = """
<cfdi:Complemento>
<cce11:ComercioExterior Version="1.1" TipoOperacion="2" ClaveDePedimento="{c.clave_pedimento}" CertificadoOrigen="0" Incoterm="{c.incoterm}" Subdivision="0" TipoCambioUSD="{TipoCambio}" TotalUSD="{Total}" xmlns:cce11="http://www.sat.gob.mx/ComercioExterior11" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/ComercioExterior11 http://www.sat.gob.mx/sitio_internet/cfd/ComercioExterior11/ComercioExterior11.xsd">""".format(**locals())
cfdi_emisor = """
<cce11:Emisor>
<cce11:Domicilio Calle="López Cotilla" NumeroExterior="13" Localidad="12" Municipio="101" Estado="JAL" Pais="MEX" CodigoPostal="{company.lugar_expedicion}"/>
</cce11:Emisor>
""".format(**locals())
cfdi_receptor = """
<cce11:Receptor>
<cce11:Domicilio Calle="{ECalle}" NumeroExterior="{ENumeroExterior}" Colonia="{EColonia}" Estado="{EEstado}" Pais="{EPais}" CodigoPostal="{ECp}"/>
</cce11:Receptor>
""".format(**locals())
cfdi_header = """
</cce11:Mercancias>
</cce11:ComercioExterior>
</cfdi:Complemento>
</cfdi:Comprobante>
""".format(**locals())
cfdi += cfdi_complemento
cfdi += cfdi_emisor
cfdi += cfdi_receptor
cfdi += cfdi_mercancias
cfdi += cfdi_header
frappe.errprint(cfdi)
return cfdi
@frappe.whitelist()
def cancel_by_uuid_sales_invoice(url, token,uuid,docname, rfc):
c = frappe.get_doc("Sales Invoice", docname)
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
frappe.db.set_value("Sales Invoice", c.name, 'cfdi_status','Cancelado')
frappe.msgprint(str(c.name)+ " Cancelada Exitosamente")
return response.text
#############################################
#Comienza Carta Porte
@frappe.whitelist()
def carta_porte_timbrado(url, token, docname, version, b64=False):
# RG - POST request al server de swarterweb
xml = carta_porte_timbrado_xml(docname)
frappe.errprint(xml)
lst = [random.choice(string.ascii_letters + string.digits) for n in range(30)]
boundary = "".join(lst)
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: " \
"form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers)
liga = url + "/cfdi33/issue/" + version + "/"
frappe.errprint(response.json())
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
# RG- Recuperar el response y manejar la info pa grabar los archivos/datos en el CFDI
c = frappe.get_doc("Delivery Trip", docname)
uuid = response.json().get('data').get('uuid')
cfdi_recibido = response.json().get('data').get('cfdi')
fechaxml = str(c.creation)
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "Delivery Trip" , c.name , "Home/Attachments" , 0)
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("Delivery Trip",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
frappe.db.set_value("Delivery Trip",c.name, 'cfdi_status', 'Timbrado')
frappe.db.set_value("Delivery Trip",c.name, 'sellocfd', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("Delivery Trip",c.name, 'cadenaoriginalsat', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("Delivery Trip",c.name, 'fechatimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("Delivery Trip",c.name, 'uuid', uuid)
frappe.db.set_value("Delivery Trip",c.name, 'nocertificadosat', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("Delivery Trip",c.name, 'sellosat', response.json().get('data').get('selloSAT') )
mensaje = "TIMBRADO EXITOSO . <a class= 'alert-info' href='https://" + frappe.local.site + "/files/" + uuid + ".xml' download> Descarga XML </a>"
frappe.msgprint(mensaje)
return ["TIMBRADO EXITOSO!",mensaje,uuid,xml]
def carta_porte_timbrado_xml(docname):
c = frappe.get_doc("Delivery Trip", docname)
TranspInternac = ""
if c.transporte_internacional == 1:
TranspInternac = 'Si'
else:
TranspInternac = 'No'
company = frappe.get_doc("Configuracion CFDI", c.company)
fecha_actual = (c.creation).isoformat()[0:19]
fecha_salida = (c.departure_time).isoformat()[0:19]
serie = c.naming_series.replace('-','')
folio = c.name.replace(serie,'')
FormaPago = c.forma_de_pago
if c.tipo_de_comprobante == "I":
SubTotal = '%.2f' % c.precio_traslado
Total = round(c.precio_traslado * 1.16, 2)
else:
SubTotal = 0
Total = 0
TipoDeComprobante = c.tipo_de_comprobante
MetodoPago = c.metodo_de_pago
LugarExpedicion = company.lugar_expedicion
Currency = c.currency
if Currency == 'MXN':
TipoCambio = 1
else:
TipoCambio = '%.4f' % c.conversion_rate
rfc_emisor = company.rfc_emisor
nombre_emisor = company.nombre_emisor.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
regimen_fiscal = company.regimen_fiscal
uso_cfdi = c.uso_cfdi
##########################################
#Datos de Direccion de Origen
ODireccion = frappe.get_doc("Address", c.driver_address)
OCalle = re.findall("[^0-9]+", ODireccion.address_line1)[0].replace('#', '')
ONumeroExterior = re.findall("\d+", ODireccion.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
OClave_estado = ODireccion.clave_estado
InfOClave_estado = frappe.get_doc("CFDI Clave Estado", OClave_estado)
OPais = InfOClave_estado.pais
articulo_claveDT = c.unidad_pesocp
suma_distancia = 0
##########################################
#Datos de Direccion de destinatario
for dest in c.delivery_stops:
UCliente = dest.customer
cliente = frappe.get_doc("Customer", UCliente)
nombre_receptor = UCliente.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
tax_id = cliente.tax_id.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
rfc_receptor = cliente.tax_id
Fecha_llegada = (dest.estimated_arrival).isoformat()[0:19]
UDireccion = frappe.get_doc("Address", dest.address)
UCalle = re.findall("[^0-9]+", UDireccion.address_line1)[0].replace('#', '')
UNumeroExterior = re.findall("\d+", UDireccion.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
UClave_estado = UDireccion.clave_estado
InfUClave_estado = frappe.get_doc("CFDI Clave Estado", UClave_estado)
UPais = InfUClave_estado.pais
UCodigo_postal = UDireccion.pincode
##########################################
distancia = round(dest.distance, 2)
suma_distancia += round(distancia, 2)
##########################################
#Obtener informacion de Notra de Entrega
DN = frappe.get_doc("Delivery Note", dest.delivery_note)
cant = len(DN.items)
PesoBrutoTotal = DN.total_net_weight
cfdi_ubicacion_destino = """
<cartaporte20:Ubicacion TipoUbicacion="Destino" RFCRemitenteDestinatario="{rfc_receptor}" FechaHoraSalidaLlegada="{Fecha_llegada}" DistanciaRecorrida="{distancia}">
<cartaporte20:Domicilio Calle="{UCalle}" NumeroExterior="{UNumeroExterior}" Estado="{UClave_estado}" Pais="{UPais}" CodigoPostal="{UCodigo_postal}" />
</cartaporte20:Ubicacion>
""".format(**locals())
##########################################
#Obtener informacion de articulos en Notra de Entrega
tipo = []
tasa = []
cantidad = []
cfdi_items = ""
cfdi_traslados = ""
for articulos_nota in DN.items:
articulo_qty = articulos_nota.qty
articulo_peso = articulos_nota.total_weight
row = str(articulos_nota.idx)
NumTotalMercancias = len(row)
##########################################
#Obtener informacion del articulo en general
informacion_articulo = frappe.get_doc("Item", articulos_nota.item_code)
articulo_cps = informacion_articulo.clave_producto
articulo_cu = informacion_articulo.clave_unidad
articulo_claveUP = informacion_articulo.unidad_pesocp
material_peligroso = informacion_articulo.material_peligroso
articulo_descripcion = informacion_articulo.description
articulos_mercancias_header = """ </cartaporte20:Ubicaciones>
<cartaporte20:Mercancias PesoBrutoTotal="{PesoBrutoTotal}" UnidadPeso="{articulo_claveDT}" NumTotalMercancias="{NumTotalMercancias}" >""".format(**locals())
articulos_mercancias = """
<cartaporte20:Mercancia BienesTransp="{articulo_cps}" Descripcion="{articulo_descripcion}" Cantidad="{articulo_qty}" ClaveUnidad="{articulo_claveUP}" PesoEnKg="{articulo_peso}">
</cartaporte20:Mercancia>""".format(**locals())
NoIdentificacion = articulos_nota.item_code.replace('"','').replace('&','&')
ClaveProdServ = informacion_articulo.clave_producto
ClaveUnidad = informacion_articulo.clave_unidad
Cantidad = articulos_nota.qty
Unidad = articulos_nota.stock_uom
ValorUnitario = '%.2f' % c.precio_traslado
Importe = '%.2f' % c.precio_traslado
idx = articulos_nota.idx
Descripcion = articulos_nota.item_name.replace('"','').replace('&','&')
TrasladosBase= '%.2f' % c.precio_traslado
TasaOCuota = .01 * float(informacion_articulo.tasa)
ImpuestosTrasladosTasaOCuota='%.6f' % TasaOCuota
Importetax= '%.2f' % (TasaOCuota * (float(c.precio_traslado)))
Tasa = 'Tasa'
if informacion_articulo.tipo_de_impuesto == 'IVA':
Impuesto = '002'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "SIN IVA":
Impuesto="002"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "IEPS":
Impuesto="003"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "EXENTO":
TrasladosBase1= articulos_nota.net_amount
TrasladosBase= '%.2f' % (TrasladosBase1)
Impuesto="002"
ImpuestosTrasladosTasaOCuota="0.000000"
Importetax= "0.00"
Tasa = 'Exento'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
cTipo = collections.Counter(tipo)
cTasa = collections.Counter(tasa)
total_impuesto = 0
TotalImpuestosTrasladados = 0.00
for w, val1 in cTipo.items():
for y, val2 in cTasa.items():
suma =0
for z in range(0,cant):
if (tasa[z] == y) and (tipo[z] == w):
suma1 = suma+float(cantidad[z])
suma = round(suma1, 2)
b = y
t = w
total_impuesto = total_impuesto+suma
TotalImpuestosTrasladados = suma
if(suma>0):
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
else:
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
# Total = round(SubTotal + TotalImpuestosTrasladados, 2)
##########################################
#Si es auto transporte AutotransporteFederal
if c.via == '01':
#Obtener datos de Vehiculo
vehicle = frappe.get_doc("Vehicle", c.vehicle)
PermSCT = vehicle.tipo_permiso
NumPermisoSCT = vehicle.numero_permiso
NombreAseg = vehicle.insurance_company
NumPolizaSeguro = vehicle.policy_no
ConfigVehicular = vehicle.configuracion_vehicular
AnioModeloVM = vehicle.model
PlacaVM = c.vehicle.replace("-","")
##########################################
#Obtener datos de Operador
operador = frappe.get_doc("Driver", c.driver)
RFCOperador = operador.rfc
NumLicencia = operador.license_number
NombreOperador = operador.full_name
#Obtener datos de Direccion de Operador
DO = frappe.get_doc("Address", c.driver_address)
DOCalle = re.findall("[^0-9]+", DO.address_line1)[0].replace('#', '')
DONumeroExterior = re.findall("\d+", DO.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
InfDOClave_estado = frappe.get_doc("CFDI Clave Estado", UClave_estado)
DOPais = InfDOClave_estado.pais
DOClave_estado = DO.clave_estado
DOCodigo_postal = DO.pincode
if c.tipo_de_comprobante == "I":
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/CartaPorte20 http://www.sat.gob.mx/sitio_internet/cfd/CartaPorte/CartaPorte20.xsd"
xmlns:cartaporte20="http://www.sat.gob.mx/CartaPorte20" Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
xmlns:cfdi="http://www.sat.gob.mx/cfd/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Moneda="{Currency}" TipoCambio = "{TipoCambio}" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
else:
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/CartaPorte20 http://www.sat.gob.mx/sitio_internet/cfd/CartaPorte/CartaPorte20.xsd"
xmlns:cartaporte20="http://www.sat.gob.mx/CartaPorte20" Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" NoCertificado=""
xmlns:cfdi="http://www.sat.gob.mx/cfd/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Certificado="" SubTotal="{SubTotal}" Moneda="XXX" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
cfdi+= """
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{regimen_fiscal}"/>
<cfdi:Receptor Rfc="{tax_id}" Nombre="{nombre_receptor}" UsoCFDI="{uso_cfdi}"/>
<cfdi:Conceptos>""".format(**locals())
if c.tipo_de_comprobante == "I":
cfdi += cfdi_items
else:
cfdi += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}" />
</cfdi:Conceptos>
""".format(**locals())
cfdi_conceptos = """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados>""".format(**locals())
if c.tipo_de_comprobante == "I":
cfdi += cfdi_conceptos
cfdi += cfdi_traslados
cfdi += """
</cfdi:Traslados>
</cfdi:Impuestos>
""".format(**locals())
cfdi_carta_porte = """<cfdi:Complemento>
<cartaporte20:CartaPorte Version="2.0" TranspInternac="{TranspInternac}" TotalDistRec="{suma_distancia}">
<cartaporte20:Ubicaciones>""".format(**locals())
cfdi_ubicacion_origen = """
<cartaporte20:Ubicacion TipoUbicacion="Origen" RFCRemitenteDestinatario="{rfc_emisor}" FechaHoraSalidaLlegada="{fecha_salida}">
<cartaporte20:Domicilio Calle="{DOCalle}" NumeroExterior="{DONumeroExterior}" Estado="{DOClave_estado}" Pais="MEX" CodigoPostal="{DOCodigo_postal}" />
</cartaporte20:Ubicacion>""".format(**locals())
cfdi_autotransporte = """
<cartaporte20:Autotransporte PermSCT="{PermSCT}" NumPermisoSCT="{NumPermisoSCT}">
<cartaporte20:IdentificacionVehicular ConfigVehicular="{ConfigVehicular}" PlacaVM="{PlacaVM}" AnioModeloVM="{AnioModeloVM}" />
<cartaporte20:Seguros AseguraRespCivil="{NombreAseg}" PolizaRespCivil="{NumPolizaSeguro}"/>
</cartaporte20:Autotransporte>
</cartaporte20:Mercancias>
""".format(**locals())
cfdi_figura_transporte = """ <cartaporte20:FiguraTransporte>
<cartaporte20:TiposFigura TipoFigura="01" RFCFigura="VAAM130719H60" NumLicencia="a234567890">
</cartaporte20:TiposFigura>
</cartaporte20:FiguraTransporte>
</cartaporte20:CartaPorte>
</cfdi:Complemento>
</cfdi:Comprobante>""".format(**locals())
cfdi += cfdi_carta_porte
cfdi += cfdi_ubicacion_origen
cfdi += cfdi_ubicacion_destino
cfdi += articulos_mercancias_header
cfdi += articulos_mercancias
cfdi += cfdi_autotransporte
cfdi += cfdi_figura_transporte
frappe.errprint(cfdi)
return cfdi
@frappe.whitelist()
def cancel_by_uuid_carta_porte(url, token,uuid,docname, rfc):
c = frappe.get_doc("Delivery Trip", docname)
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
frappe.db.set_value("Delivery Trip", c.name, 'cfdi_status','Cancelado')
frappe.msgprint(str(c.name)+ " Cancelada Exitosamente")
return response.text
| StarcoderdataPython |
3271625 | <filename>spiders/cnjsj.py
from datetime import datetime
from urllib.parse import urlencode
import gevent
import requests
from bs4 import BeautifulSoup
from gevent.queue import Queue
from config import *
# 代理ip
proxy = ""
# 获取数据输出路径
path_ = version_control('new')
# 记录最大任务数量
max_len = 0
# 记录任务实时进度
count = 0
# 任务队列
idQ = Queue()
# 数据表头
title = ['作品编号', '作品名称', '作品分类', '作品简介', '开源代码与组件使用情况说明', '作品安装说明',
'作品效果图', '设计思路', '设计重点和难点', '指导老师自评', '其他说明', '部署链接1', '部署链接2', '插图']
def load_id_queue(works_command: str or list):
"""
:param works_command:
:return:
"""
global max_len
try:
# 当指令为字符串时执行语句
if isinstance(works_command, str):
# 当无指令为空时,默认采集所有在库id
if works_command == '':
data_flow = load_data_from_id_set(mode='spider_key')[1:]
for data in data_flow:
idQ.put_nowait(data)
# 当指定id时,将该id加入任务队列
else:
idQ.put_nowait(works_command)
# 当指令为列表时执行语句
elif isinstance(works_command, list):
# => if works_command == []
if not works_command:
data_flow = load_data_from_id_set(mode='spider_key')[1:]
for data in data_flow:
idQ.put_nowait(data)
# 遍历id队列,添加任务
else:
for data in works_command:
idQ.put_nowait(data)
finally:
# 记录任务队列最大长度,用于修饰调试信息
max_len = idQ.qsize()
def save_data(flow=None, _init=False):
"""
:param flow:
:param _init:
:return:
"""
if flow is None:
flow = []
if _init:
with open(path_, 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(TITLE)
print(magic_msg('>>> the csv file has been initialed\n>>> {}'.format(path_), 'g'))
with open(path_, 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(flow)
class CnJsjSpider(object):
"""中国大学生计算机设计大赛 作品信息采集"""
def __init__(self, work_id=''):
save_data(_init=True)
load_id_queue(work_id)
@staticmethod
def handle_html(key):
global count
# 根据作品编号组装URL
data = {
'keys': key
}
url = DEMO_URL + urlencode(data)
count += 1
print(magic_msg('\r>>>【{}/{}】 goto {}'.format(count, max_len, url), 'c'), end='')
# 代理ip参数组装
proxies = {
# proxy = ip:port
'http': 'http://' + proxy
}
try:
if proxy:
res = requests.get(url, headers=SPIDER_HEADERS, proxies=proxies)
else:
res = requests.get(url, headers=SPIDER_HEADERS)
# 状态码200,请求正常
if res.status_code == 200:
return res.text
# 状态码302 表示ip封禁;请执行IP更换策略
elif res.status_code == 302:
print(magic_msg(text=url, text_color='r'))
except requests.exceptions.RequestException:
print(magic_msg(url, text_color='yellow'))
return None
@staticmethod
def parse_html(html: str) -> dict:
"""
:param html: response.text
:return: OUT_FLOW dict
"""
soup = BeautifulSoup(html, 'html.parser')
# 解析作品详细信息
data_flow = [item.find('td').text.strip() for item in soup.find_all('tr', attrs={'bgcolor': '#ffffff'})][3:]
# 解析作品简介
flag_flow = [info.text for info in soup.find_all('td', attrs={'colspan': '5'})]
# 解析图片链接
img_flow = [HOME_PAGE + img['src'] for img in soup.find_all('img')]
try:
link_1 = data_flow[8][5:].strip()
link_2 = data_flow[9][5:].strip()
except IndexError:
link_1, link_2 = 'N/A', 'N/A'
# 组装数据流
out_flow = {
'作品编号': HOME_PAGE + '/chaxun/?keys=' + flag_flow[0],
'作品名称': flag_flow[1],
'作品分类': flag_flow[-1],
'作品简介': data_flow[0][4:].strip(),
'开源代码与组件使用情况说明': data_flow[1][13:].strip(),
'作品安装说明': data_flow[2][6:].strip(),
'作品效果图': data_flow[3][5:].strip(),
'设计思路': data_flow[4][4:].strip(),
'设计重点和难点': data_flow[5][7:].strip(),
'指导老师自评': data_flow[6][6:].strip(),
'其他说明': data_flow[7][4:].strip(),
'部署链接1': link_1,
'部署链接2': link_2,
'插图': img_flow
}
return out_flow
def coroutines_acceleration(self, power: int, ):
"""
携程加速
:param power: 协程数
:return:
"""
task_list = []
# 设置有误,则使用限定功率
if power <= 0:
power = 3
print(magic_msg('Warning : Invalid credentials(crawl power)', 'y'))
# 当协程数大于任务数时,将协程数强制转化为最大任务数,避免闲置协程浪费资源
elif power > max_len:
power = max_len
print(magic_msg('>>> POWER = {}'.format(power), 'g'))
for x in range(power):
task = gevent.spawn(self.start_the_crawler)
task_list.append(task)
gevent.joinall(task_list)
def start_the_crawler(self):
while not idQ.empty():
key = idQ.get_nowait()
try:
# 信息采集与解析
html: str = self.handle_html(key)
flow: dict = self.parse_html(html)
# 留下痕迹
println(flow)
# 保存数据
save_data(flow=list(flow.values()))
except Exception as et:
# 任务出错,记录日志
with open(LOG_PATH, 'a', encoding='utf-8', ) as f:
now_ = str(datetime.now()).strip('.')[0]
log_msg = HOME_PAGE + '/chaxun/?keys={}\n'.format(key)
err_msg = """
>>>【{}】
ERROR_KEY:{}
ERROR_INFO:{}
""".format(now_, log_msg, et)
f.write(err_msg)
if __name__ == '__main__':
cjs = CnJsjSpider(work_id='70775')
cjs.coroutines_acceleration(power=POWER)
| StarcoderdataPython |
1614678 | <filename>utilities/get_system.py
# -*- coding: utf-8 -*-
# (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["<NAME>"]
# __license__ = "Apache2.0"
# __maintainer__ = "<NAME>"
# __email__ = "<EMAIL>".
from flask import Blueprint, render_template, request, redirect, session, url_for, abort
import os
# from werkzeug import secure_filename
from mongoengine import Q
import json
import requests
from database.system import System
import time
from collections import OrderedDict
# from qumulo.rest_client import RestClient
requests.packages.urllib3.disable_warnings()
def get_system():
# Get user informaation from data base
system = System.objects.first()
uuid = system.uuid.encode('utf-8')
family = system.family.encode('utf-8')
serno = system.serno.encode('utf-8')
model = system.model.encode('utf-8')
software = system.software.encode('utf-8')
build = system.build.encode('utf-8')
system=[uuid,family,serno,model,software,build]
return system
| StarcoderdataPython |
183070 | <gh_stars>10-100
import shlex
from prompt_toolkit.completion import Completer, Completion
class CommandCompleter(Completer):
"""Manage completion suggestions to the CLI."""
def __init__(self, client):
"""Create a CLI commands completer based on the client object."""
self.client = client
self.definitions = client.swagger_spec.definitions
def get_completions(self, document, complete_event):
"""Yields CLI completion based on the input text and the client object."""
for completion, position in self._text_to_completions(document.text):
yield Completion(completion, start_position=-position)
def _text_to_completions(self, text):
"""Convert raw text into completion suggestions."""
try:
words = shlex.split(text)
except ValueError:
words = text.split(" ")
operation, remaining_text = self._extract_operation(words=words)
if callable(operation):
return self._get_operation_params_completions(
original_text=text,
remaining_text=remaining_text,
operation=operation,
)
return self._get_completion(
original_text=text,
remaining_text=remaining_text,
options=dir(operation)
)
def _extract_operation(self, words):
"""Get the required client operation and separate it from the remaining text."""
operation = self.client
for word in words:
attr = getattr(operation, word, None)
if attr is None:
return operation, words[-1]
operation = attr
return operation, ""
def _get_operation_params_completions(self, original_text, remaining_text, operation):
"""Get suggestions based on operation and remaining text."""
completion_offset = 0
# Strip argument prefix
if remaining_text.startswith("--"):
if len(remaining_text.split("=")) == 2:
# Already a valid param
remaining_text = ""
else:
remaining_text = remaining_text[2:]
completion_offset = 2
# Handel definition type argument completions
if "." in remaining_text:
return self._get_definition_completions(
original_text=original_text,
remaining_text=remaining_text,
operation=operation
)
if self.should_hide_completions(original_text=original_text,
remaining_text=remaining_text,
allowed_suffixes=(" ", "-")):
return []
return [("--" + attribute, len(remaining_text) + completion_offset)
for attribute in operation.operation.params
if attribute.startswith(remaining_text) and not attribute.startswith("_")]
def _get_definition_completions(self, original_text, remaining_text, operation):
"""Get suggestions based on definition and remaining text."""
param_words = remaining_text.split(".")
# Only two words parameter completion are supported
if len(param_words) != 2:
return []
param_name, sub_name = param_words
if param_name not in operation.operation.params:
return []
param_object = operation.operation.params[param_name]
param_schema = param_object.param_spec.get("schema")
if not param_schema:
return []
param_ref = param_schema.get("$ref")
if not param_ref:
return []
definition_name = param_ref.split('/')[-1]
definition = self.definitions.get(definition_name)
if not definition:
return []
return self._get_completion(
original_text=original_text,
remaining_text=sub_name,
options=dir(definition())
)
def _get_completion(self, original_text, remaining_text, options):
"""Get completion properties based on text and possible options."""
if self.should_hide_completions(original_text=original_text,
remaining_text=remaining_text,
allowed_suffixes=(" ", ".")):
return []
return [(option, len(remaining_text)) for option in options
if option.startswith(remaining_text) and not option.startswith("_")]
@staticmethod
def should_hide_completions(original_text, remaining_text, allowed_suffixes):
return (original_text and
not remaining_text and
original_text[-1] not in allowed_suffixes)
| StarcoderdataPython |
65079 | """model.py"""
import torch
import torch.nn as nn
# import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def reparametrize(mu, logvar):
std = logvar.div(2).exp()
eps = Variable(std.data.new(std.size()).normal_())
return mu + std * eps
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class BetaVAE_H(nn.Module):
"""Model proposed in original beta-VAE paper(Higgins et al, ICLR, 2017)."""
def __init__(self, z_dim=10, nc=3):
super(BetaVAE_H, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 120, 180
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 60, 90
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, (3, 4)), # B, 64, 32, 48
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 16, 24
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 8, 12
nn.ReLU(True),
nn.Conv2d(64, 128, 4, 2, 1), # B, 128, 4, 6
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1), # B, 256, 2, 3
nn.ReLU(True),
View((-1, 256 * 2 * 3)), # B, 256 * 2 * 3
nn.Linear(1536, z_dim * 2), # B, z_dim*2
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 1536), # B, 5120
View((-1, 256, 2, 3)), # B, 256, 2, 3
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1), # B, 128, 4, 6
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1), # B, 64, 8, 12
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 16, 24
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 2), # B, 64, 32, 48
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, (1, 2)), # B, 32, 60, 91
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 120, 180
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 240, 360
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z)
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class BetaVAE_B(BetaVAE_H):
"""Model proposed in understanding beta-VAE paper(Burgess et al, arxiv:1804.03599, 2018)."""
def __init__(self, z_dim=10, nc=1):
super(BetaVAE_B, self).__init__()
self.nc = nc
self.z_dim = z_dim
self.encoder = nn.Sequential(
nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 8, 8
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 4, 4
nn.ReLU(True),
View((-1, 32 * 4 * 4)), # B, 512
nn.Linear(32 * 4 * 4, 256), # B, 256
nn.ReLU(True),
nn.Linear(256, 256), # B, 256
nn.ReLU(True),
nn.Linear(256, z_dim * 2), # B, z_dim*2
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256), # B, 256
nn.ReLU(True),
nn.Linear(256, 256), # B, 256
nn.ReLU(True),
nn.Linear(256, 32 * 4 * 4), # B, 512
nn.ReLU(True),
View((-1, 32, 4, 4)), # B, 32, 4, 4
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 8, 8
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 16, 16
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32
nn.ReLU(True),
nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, x):
distributions = self._encode(x)
mu = distributions[:, :self.z_dim]
logvar = distributions[:, self.z_dim:]
z = reparametrize(mu, logvar)
x_recon = self._decode(z).view(x.size())
return x_recon, mu, logvar
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
if __name__ == '__main__':
pass
| StarcoderdataPython |
2127 | <filename>csat/django/fields.py<gh_stars>0
from lxml import etree
from django import forms
from django.db import models
class XMLFileField(models.FileField):
def __init__(self, *args, **kwargs):
self.schema = kwargs.pop('schema')
super(XMLFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(XMLFileField, self).clean(*args, **kwargs)
with data as fh:
doc = etree.parse(fh)
with open(self.schema) as fh:
schema = etree.XMLSchema(etree.parse(fh))
if not schema.validate(doc):
raise forms.ValidationError('The XML file failed to validate '
'against the supplied schema.')
return data
| StarcoderdataPython |
192212 | import functools
from flask import (
request, g, redirect, url_for,
flash, render_template,
Blueprint, session
)
from werkzeug.security import check_password_hash, generate_password_hash
from blog.model import User
from blog.db import db_session
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
"""register"""
username = request.form['username']
password = request.form['password']
err = None
if not username:
err = 'Username is required!'
elif not password:
err = 'Password is required!'
elif db_session.query(User)\
.filter_by(username=username)\
.first() is not None:
err = 'User {} is already registered!'.format(username)
if err is None:
user = User(username=username, password=generate_password_hash(password))
db_session.add(user)
db_session.commit()
return redirect(url_for('auth.login'))
flash(err)
return render_template('auth/register.html')
@bp.route('/login', methods=['GET','POST'])
def login():
if request.method == 'POST':
"""login"""
username = request.form['username']
password = request.form['password']
err = None
user = db_session.query(User)\
.filter_by(username=username) \
.first()
if not username:
err = 'Username is required!'
elif not password:
err = 'Password is required!'
elif user is None:
err = 'User {} is not exists!'.format(username)
elif not check_password_hash(user.password, password):
err = 'Password is incorrect!'
if err is None:
session.clear()
session['uid'] = user.id
return redirect(url_for('home'))
flash(err)
return render_template('auth/login.html')
@bp.before_app_request
def load_logged_in_user():
uid = session.get('uid')
if uid is None:
g.user = None
else:
g.user = db_session.query(User) \
.filter_by(id=uid) \
.first()
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('home'))
def login_required(fn):
@functools.wraps(fn)
def wrapper(**kwargs):
if not g.user:
return redirect(url_for('auth.login'))
return fn(**kwargs)
return wrapper
| StarcoderdataPython |
104373 | # -*- coding: utf-8 -*-
"""Sanity checks for testing."""
import unittest
class TestSanity(unittest.TestCase):
"""A trivial test case."""
def test_sanity(self):
"""Run a trivial test."""
self.assertIsNone(None)
| StarcoderdataPython |
1645678 | # https://github.com/Wireframe-Magazine/Wireframe56
# Wireframe #56:
import pgzrun
import pickle
editorState = True
editorEnabled = True
if editorState:
WIDTH = 1000
gameState = count = 0
editItem = "blank"
editorMessage = ""
editorMessageCount = 0
blockTypes = [
Actor('blank', center=(900, 250)),
Actor('soil', center=(900, 300)),
Actor('rock', center=(900, 350)),
Actor('gem', center=(900, 400)),
Actor('wall', center=(900, 450))
]
loadButton = Actor('load', center=(850, 580))
saveButton = Actor('save', center=(950, 580))
items = [[] for _ in range(14)]
gems = collected = 0
rockford = Actor('rockford-1', center=(60, 100))
def draw():
screen.fill((0,0,0))
if gems == 0 and collected > 0: infoText("YOU COLLECTED ALL THE GEMS!")
else: infoText("GEMS : "+ str(collected))
for r in range(0, 14):
for c in range(0, 20):
if items[r][c] != "" and items[r][c] != "rockford":
screen.blit(items[r][c], ((c*40), 40+(r*40)))
if gameState == 0 or (gameState == 1 and count%4 == 0): rockford.draw()
drawEditor()
def update():
global count,gems
mx = my = 0
if count%10 == 0:
gems = 0
for r in range(13, -1, -1):
for c in range(19, -1, -1):
if items[r][c] == "gem":
gems += 1
if items[r][c] == "rockford":
if keyboard.left: mx = -1
if keyboard.right: mx = 1
if keyboard.up: my = -1
if keyboard.down: my = 1
if items[r][c] == "rock": testRock(r,c)
rockford.image = "rockford"+str(mx)
if gameState == 0 and editorState == False: moveRockford(mx,my)
count += 1
def on_mouse_down(pos):
global editItem
if editorState:
c = int(pos[0]/40)
r = int((pos[1]-40)/40)
if loadButton.collidepoint(pos): loadMap()
if saveButton.collidepoint(pos): saveMap()
if r > 0 and r < 14 and c > 0 and c < 20:
if editItem != "blank":
items[r][c] = editItem
else : items[r][c] = ""
else:
for b in range(0, len(blockTypes)):
if blockTypes[b].collidepoint(pos):
editItem = blockTypes[b].image
def on_key_down(key):
global editorState, gameState, rockford, collected, gems
if key == keys.SPACE and editorEnabled:
editorState = not editorState
if key == keys.ESCAPE:
gems = collected = gameState = 0
rockford = Actor('rockford-1', center=(60, 100))
loadMap()
def infoText(t):
screen.draw.text(t, center = (400, 20), owidth=0.5, ocolor=(255,255,255), color=(255,0,255) , fontsize=40)
def moveRockford(x,y):
global collected
rx, ry = int((rockford.x-20)/40), int((rockford.y-40)/40)
if items[ry+y][rx+x] != "rock" and items[ry+y][rx+x] != "wall":
if items[ry+y][rx+x] == "gem": collected +=1
items[ry][rx], items[ry+y][rx+x] = "", "rockford"
rockford.pos = (rockford.x + (x*40), rockford.y + (y*40))
if items[ry+y][rx+x] == "rock" and y == 0:
if items[ry][rx+(x*2)] == "":
items[ry][rx], items[ry][rx+(x*2)], items[ry+y][rx+x] = "", "rock", "rockford"
rockford.x += x*40
def testRock(r,c):
if items[r+1][c] == "":
moveRock(r,c,r+1,c)
elif items[r+1][c] == "rock" and items[r+1][c-1] == "" and items[r][c-1] == "":
moveRock(r,c,r+1,c-1)
elif items[r+1][c] == "rock" and items[r+1][c+1] == "" and items[r][c+1] == "":
moveRock(r,c,r+1,c+1)
def moveRock(r1,c1,r2,c2):
global gameState
items[r1][c1], items[r2][c2] = "", items[r1][c1]
if items[r2+1][c2] == "rockford": gameState = 1
def drawEditor():
global editorMessageCount
screen.draw.text("EDITOR", center = (900, 20), owidth=0.5, ocolor=(255,255,255), color=(0,0,255) , fontsize=40)
if editorState: screen.draw.text("ON", center = (900, 50), owidth=0.5, ocolor=(255,255,255), color=(255,0,0) , fontsize=40)
for b in range(0, len(blockTypes)):
blockTypes[b].draw()
if editItem != "":
screen.blit(editItem,(880,100))
loadButton.draw()
saveButton.draw()
if editorMessageCount > 0:
screen.draw.text(editorMessage, center = (400, 300), owidth=0.5, ocolor=(255,255,255), color=(0,0,255) , fontsize=40)
editorMessageCount -= 1
def loadMap():
global items, rockford, editorMessage, editorMessageCount
try:
with open ('mymap.map', 'rb') as fp:
items = pickle.load(fp)
editorMessage = "MAP LOADED"
editorMessageCount = 200
except IOError:
editorMessage = "DEFAULT MAP LOADED"
editorMessageCount = 200
for r in range(0, 14):
for c in range(0, 20):
itype = "soil"
if(r == 0 or r == 13 or c == 0 or c == 19): itype = "wall"
items[r].append(itype)
items[1][1] = "rockford"
def saveMap():
global editorMessage, editorMessageCount
try:
with open('mymap.map', 'wb') as fp:
pickle.dump(items, fp)
editorMessage = "MAP SAVED"
editorMessageCount = 200
except IOError:
editorMessage = "ERROR SAVING MAP"
editorMessageCount = 200
loadMap()
pgzrun.go()
| StarcoderdataPython |
1670875 | <filename>pyapp/__init__.py<gh_stars>0
from .pyapp import PyApp # noqa
# Package version
# Follows semantics versioning (https://semver.org/)
__version__ = "0.1.0-dev0"
| StarcoderdataPython |
3360115 | import base64
from docker.errors import NotFound
from armada_backend import docker_client
from armada_backend.api_run import Run
from armada_backend.api_stop import Stop
from armada_backend.models.services import get_services_by_ship
from armada_backend.utils import shorten_container_id
from armada_command import armada_api
from armada_command.consul.kv import kv_get
from armada_command.scripts.compat import json
class Restart(Run, Stop):
def on_post(self, req, resp):
container_id, error = self.get_post_parameter(req, 'container_id')
target_ship, _ = self.get_post_parameter(req, 'target_ship')
force_restart, _ = self.get_post_parameter(req, 'force')
if error:
return self.status_error(resp, error)
try:
new_container_id, service_endpoints = self._restart_service(container_id, target_ship, force_restart)
short_container_id = shorten_container_id(new_container_id)
return self.status_ok(resp, {'container_id': short_container_id, 'endpoints': service_endpoints})
except Exception as e:
return self.status_exception(resp, "Unable to restart service", e)
def _restart_service(self, container_id, target_ship=None, force_restart=False):
restart_parameters = self._get_restart_parameters(container_id)
if not restart_parameters:
raise Exception('Could not get RESTART_CONTAINER_PARAMETERS. Container ID: {}'.format(container_id))
if target_ship:
return self._restart_service_remote(container_id, restart_parameters,
target_ship, force_restart)
else:
return self._restart_service_local(container_id, restart_parameters)
def _get_restart_parameters(self, container_id):
try:
docker_api = docker_client.api()
docker_inspect = docker_api.inspect_container(container_id)
for env_var in docker_inspect['Config']['Env']:
env_key, env_value = (env_var.strip('"').split('=', 1) + [''])[:2]
if env_key == 'RESTART_CONTAINER_PARAMETERS':
return json.loads(base64.b64decode(env_value))
except NotFound:
for service in get_services_by_ship(ship=None):
if service.split('/')[-1] == container_id:
return kv_get(service).get('params')
def _restart_service_local(self, container_id, restart_parameters):
new_container_id = self._create_service(**restart_parameters)
self._stop_service(container_id)
service_endpoints = self._start_container(new_container_id)
return new_container_id, service_endpoints
def _restart_service_remote(self, container_id, restart_parameters, target_ship, force_restart):
mounted_volumes = restart_parameters.get('volumes')
static_ports = restart_parameters.get('ports')
if (mounted_volumes or static_ports) and not force_restart:
error = "Cannot restart service on another host. Mounted volumes or static ports detected. \n" \
"\tVolumes: {0}\n" \
"\tPorts: {1}\n" \
"Use --force to restart anyway.".format(mounted_volumes, static_ports)
raise Exception(error)
new_container_id = self.__create_service_remote(restart_parameters, target_ship)
self._stop_service(container_id)
service_endpoints = self.__start_service_remote(new_container_id, target_ship)
return new_container_id, service_endpoints
def __create_service_remote(self, restart_parameters, target_ship):
result = armada_api.post('create', restart_parameters, ship_name=target_ship)
if result['status'] != "ok":
raise Exception(result['error'])
return result['long_container_id']
def __start_service_remote(self, container_id, target_ship):
start_payload = {'long_container_id': container_id}
start_result = armada_api.post('start', start_payload, ship_name=target_ship)
if start_result['status'] != "ok":
raise Exception(start_result['error'])
return start_result['endpoints']
| StarcoderdataPython |
3278522 | <reponame>dfint/dfrus
from collections import namedtuple
from enum import IntEnum, Enum, auto
class Cond(IntEnum):
"""Condition codes"""
(o, no, b, nb, e, ne, be, a, s, ns, p, np, l, nl, le, g) = range(16)
nae = b
not_above_equal = nae
c = b
ae = nb
nc = nb
z = e
zero = z
nz = ne
not_zero = nz
na = be
pe = p
po = np
nge = l
ge = nl
nle = g
class RegType(Enum):
general = auto()
segment = auto()
mm = auto()
xmm = auto()
RegData = namedtuple("RegData", "type,code,size")
class Reg(Enum):
eax, ecx, edx, ebx, esp, ebp, esi, edi = ((RegType.general, i, 4) for i in range(8))
ax, cx, dx, bx, sp, bp, si, di = ((RegType.general, i, 2) for i in range(8))
al, cl, dl, bl, ah, ch, dh, bh = ((RegType.general, i, 1) for i in range(8))
es, cs, ss, ds, fs, gs = ((RegType.segment, i, 2) for i in range(6))
mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7 = ((RegType.mm, i, 8) for i in range(8))
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 = ((RegType.xmm, i, 16) for i in range(8))
def __init__(self, *reg_data):
reg_data = RegData(*reg_data)
self.type = reg_data.type
self.code = reg_data.code
self.size = reg_data.size
if reg_data.type == RegType.general:
assert reg_data.size <= 4, "Fix me!"
if reg_data.size == 4: # TODO: fix this when 64-bit general purpose registers will be added
self.parent = self
elif reg_data.size == 2:
self.parent = type(self)(RegData(RegType.general, self.code, 4))
elif reg_data.size == 1:
self.parent = type(self)(RegData(RegType.general, self.code % 4, 4))
elif reg_data == RegType.mm:
# Assume that parent for mm registers are xmm ones
self.parent = type(self)(RegData(RegType.xmm, self.code, 16))
else:
self.parent = self
@classmethod
def segment(cls, code: int):
return cls((RegType.segment, code, 2))
@classmethod
def mm(cls, code: int):
return cls((RegType.mm, code, 8))
@classmethod
def xmm(cls, code: int):
return cls((RegType.xmm, code, 16))
def __int__(self):
return self.code
def __index__(self):
return self.code
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
if isinstance(other, int):
return self.code == other
else:
return self is other
def __hash__(self):
return hash(self.value)
class Prefix(IntEnum):
"""Prefix codes"""
rep = 0xf3
repe = rep
repz = rep
repne = 0xf2
repnz = repne
lock = 0xf0
operand_size = 0x66
address_size = 0x67
seg_es = 0x26
seg_cs = 0x2e
seg_ss = 0x36
seg_ds = 0x3e
seg_fs = 0x64
seg_gs = 0x65
jmp_near = 0xe9
jmp_short = jmp_near+2
jmp_indir = bytes([0xff, 0x20])
jcc_short = 0x70 # + cond
jcc_near = bytes([0x0f, 0x80]) # + {0,cond}
call_near = 0xe8
call_indir = bytes([0xff, 0x10])
setcc = bytes([0x0f, 0x90])
cmp_rm_imm = 0x80
cmp_rm_reg = 0x38 # | dir<<1 | width
nop = 0x90
cdq = 0x99
lea = 0x8d
ret_near = 0xc3
ret_far = 0xcb
ret_near_n = 0xc2
ret_far_d = 0xca
leave = 0xc9
int3 = 0xcc
push_reg = 0x50 # + reg
push_imm32 = 0x68
push_imm8 = push_imm32 + 2
push_indir = bytes([0xff, 0x30]) # + размер смещение * 40h + базовый регистр [& sib]
pushfd = 0x9c
popfd = 0x9d
pop_reg = 0x58 # + reg
pop_rm = 0x8f
pushad = 0x60
popad = 0x61
add_acc_imm = 0x04 # + width
sub_acc_imm = 0x2c # + width
xor_acc_imm = 0x34 # + width
or_acc_imm = 0x0c # + width
and_acc_imm = 0x24 # + width
test_acc_imm = 0xa8 # + width
cmp_acc_imm = 0x3c # + width
adc_acc_imm = 0x14
sbb_acc_imm = 0x1c
add_rm_reg = 0x00 # + 2*dir + width
sub_rm_reg = 0x28 # + 2*dir + width
sub_reg_rm = sub_rm_reg+2 # + width
xor_rm_reg = 0x30 # + 2*dir + width
or_rm_reg = 0x08 # + 2*dir + width
and_rm_reg = 0x20 # + 2*dir + width
adc_rm_reg = 0x10
sbb_rm_reg = 0x18
op_rm_imm = 0x80
op_rm_imm8 = 0x83
xchg_rm_reg = 0x86 # + width
xchg_acc_reg = 0x90 # + reg # no width bit, so only eax and ax are acceptable
test_rm_reg = 0x84 # + width
mov_reg_imm = 0xb0 # + 8*width + reg
mov_acc_mem = 0xa0 # + 2*dir + width
mov_rm_reg = 0x88 # + 2*dir + width
mov_reg_rm = mov_rm_reg+2 # + width
mov_rm_imm = 0xc6 # + width
mov_rm_seg = 0x8c # + 2*dir
movsb = 0xa4
movsd = 0xa5
# movsw = (Prefix.operand_size, movsd)
inc_reg = 0x40 # + reg
dec_reg = 0x48 # + reg
# Opcodes after 0x0f prefix
x0f_setcc = 0x90
x0f_movzx = 0xB6
x0f_movsx = 0xBE
x0f_jcc_near = 0x80
x0f_movups = 0x10 # + dir
x0f_movaps = 0x28 # + dir
x0f_movd_mm = 0x6e # + dir << 4 + size_flag
x0f_movq_rm_xmm = 0xd6
x0f_cmov = 0x40 # + cond
shift_op_rm_1 = 0xd0 # + width
shift_op_rm_cl = 0xd2 # + width
shift_op_rm_imm8 = 0xc0 # + width
test_or_unary_rm = 0xf6 # + width & MODRM (reg==0 - test; reg==1 - n/a; reg==2 through 7 - unary ops)
| StarcoderdataPython |
4816424 | from django.test import TestCase
from .models import Location,Category,Image
# Create your tests here.
class LocationTestClass(TestCase):
# Set up method
def setUp(self):
self.kigali= Location(name = 'kigali')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.kigali,Location))
# Testing Save Method
def test_save_method(self):
self.kigali.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.flower= Image(name = 'flower', image_url ='https://images.pexels.com/photos/462118/pexels-photo-462118.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500', description ='rose flower')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.flower,Image))
# Testing Save Method
def test_save_method(self):
self.flower.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0) | StarcoderdataPython |
145314 | <reponame>ShameekConyers/covid-socio-economic-inquiry
import pandas as pd
import requests
import io
import os
import json
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import pathlib
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
pd.options.plotting.backend = "plotly"
def fetch_json_map():
if not os.path.exists("./data/states.json"):
resp = requests.get("https://gist.githubusercontent.com/mshafrir/2646763/raw/8b0dbb93521f5d6889502305335104218454c2bf/states_hash.json")
res = json.load(io.BytesIO(resp.content))
f = open("./data/states.json", "w")
json.dump(res, f)
f.close()
else:
f = open("./data/states.json")
res = json.load(f)
f.close()
result = {v: k for k, v in res.items()}
return result
def fetch_and_clean_tables_from_wikipedia():
"""
Grabs the tables of interest from wikipedia
Returns:
A DF that contains macro level data for each state
"""
gini_url = "https://en.wikipedia.org/wiki/List_of_U.S._states_by_Gini_coefficient"
pov_url = "https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_poverty_rate"
urb_url = "https://en.wikipedia.org/wiki/Urbanization_in_the_United_States"
climate_url = "" ####
urb_state_mapping = lambda x: x[:x.find('[')]
#First we grab the dirty tables
gini = pd.read_html(gini_url)
gini = gini[2] # this gets correct table from wikipedia page
pov = pd.read_html(pov_url)
pov = pov[2]
urb = pd.read_html(urb_url)
urb = urb[-1]
urb = urb.droplevel(level= 0, axis = 1) #clean the unecessary multindex
# climate = pd.read_html(climate_url) #TODO
# data sourcing of climate not straightforward like others
#Then we clean the tables such that the output is directly usable
gini.columns = gini.columns.str.replace(' ', '_')
pov.columns = pov.columns.str.replace(' ', '_')
urb.columns = urb.columns.str.replace(' ', '_')
gini = gini.rename(columns={
'State_or_federal_district': 'state',
'Gini_Coefficient': 'gini_coef'
})
gini.drop(['Rank'], axis=1, inplace=True)
gini.set_index('state', inplace=True)
gini.columns = gini.columns.str.lower()
pov = pov.rename(columns={
'State': 'state',
'2019_Poverty_rate(percent_of_persons_in_poverty)[note_2][7]': 'pov_2019',
'2014_Poverty_Rates_(includes_unrelated_children)': 'pov_2014'
})
pov.drop(['Rank', 'Supplemental_Poverty_Measure_(2017–2019_average)_(Geographically_Adjusted)'], axis=1, inplace=True)
pov.set_index('state', inplace = True)
pov.columns = pov.columns.str.lower()
urb = urb.rename(columns={'State/Territory': 'state',
'2010': 'urb_2010',
'2000': 'urb_2000' })
urb = urb[['state', 'urb_2010', 'urb_2000']].copy()
urb['state'] = urb['state'].apply(urb_state_mapping)
urb.set_index('state', inplace=True)
urb.columns = urb.columns.str.lower()
#join them all
macro_df = gini.merge(pov, 'inner', 'state').merge(urb, 'inner', 'state')
return macro_df.dropna()
def fetch_mental_health_df():
"""
Performs a sync get request to grab the data
"""
res = requests.get('https://data.cdc.gov/api/views/yni7-er2q/rows.csv?accessType=DOWNLOAD')
mental_health = pd.read_csv(io.BytesIO(res.content))
return mental_health
def chop_and_clean_mental_health(by_group: str, subgroup_name, mental_health):
"""
Chops the original df by_group and return a multindex of [time, subgroup_name
Returns:
chopped & cleaned df with a multi-index of [time, subgroup_name]
"""
to_drop= ['Group',
'Time Period Label',
'Time Period End Date',
'Suppression Flag',
'State',
'Time Period',
'index',
'Quartile Range',
'Confidence Interval']
indics_mapping = {
'Needed Counseling or Therapy But Did Not Get It, Last 4 Weeks':
'none',
'Received Counseling or Therapy, Last 4 Weeks':
'therapy',
'Took Prescription Medication for Mental Health And/Or Received Counseling or Therapy, Last 4 Weeks':
'either',
'Took Prescription Medication for Mental Health, Last 4 Weeks':
'medication'
}
education_mapping = {
"Some college/Associate's degree":
'associates',
"Bachelor's degree or higher":
'college',
'High school diploma or GED':
'highschool',
'Less than a high school diploma':
'none'
}
state_mapping = fetch_json_map()
indics_to_col_map = {}
df = mental_health[mental_health['Group'] == '{}'.format(by_group)].copy()
df.reset_index(inplace=True)
df.drop(to_drop, axis =1, inplace=True)
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.lower()
df = df.rename(columns = {
'time_period_start_date': 'time_period',
'subgroup': '{}'.format(subgroup_name)
})
if subgroup_name == 'education':
df['education'] = df['education'].apply(lambda x: education_mapping[x])
elif subgroup_name == 'state':
macro_df = fetch_and_clean_tables_from_wikipedia()
df = df.merge(macro_df, 'inner', 'state')
df['state'] = df['state'].apply(lambda x: state_mapping[x])
df.set_index(['time_period', '{}'.format(subgroup_name)], inplace=True)
#Here we map the indicator varibles to a summarized form.
#A set was used to get all unique values in the columns
df['indicator'] = df['indicator'].apply(lambda x: indics_mapping[x])
return df
def fetch_data():
if not (os.path.exists('./data/state.h5') and os.path.exists('./data/education.h5')):
mental_health = fetch_mental_health_df()
state_df = chop_and_clean_mental_health("By State", 'state', mental_health)
education_df = chop_and_clean_mental_health(
"By Education", 'education', mental_health)
state_df.to_hdf('./data/state.h5', key = 'df', mode = 'w')
education_df.to_hdf('./data/education.h5', key = 'df', mode = 'w')
else:
state_df = pd.read_hdf('./data/state.h5', key = 'df')
education_df = pd.read_hdf('./data/education.h5', key = 'df')
return [state_df, education_df]
def main():
[state_df, education_df] = fetch_data()
print(state_df, education_df)
#First we get begin and end slices
#we are only concerned with those who needed mental health care but got none
beg = state_df.loc['08/19/2020']
end = state_df.loc['06/23/2021']
beg = beg[beg['indicator'] == 'none']
end = end[end['indicator'] == 'none']
beg = beg.rename(columns = {'value': 'unmet_mental_health'})
end = end.rename(columns = {'value': 'unmet_mental_health'})
#Next we test to see which states had a statistically significant drop
#and ones that had a statistically significant increase
low_test_mask = end['unmet_mental_health'] < beg['lowci']
high_test_mask = end['unmet_mental_health'] > beg['highci']
lows = low_test_mask * 1
highs = high_test_mask * 1
end['improved_unmet_mental_health'] = low_test_mask
end['worsened_unmet_mental_health'] = high_test_mask
#make a figure for our findings
fig = px.choropleth(end,
locations = end.index,
color = 'improved_unmet_mental_health',
hover_name = end.index,
locationmode= 'USA-states')
fig.update_layout(
title_text = 'U.S. States',
geo_scope = 'usa'
)
fig.write_image("./figures/improved.png")
fig = px.choropleth(end,
locations = end.index,
color = 'worsened_unmet_mental_health',
hover_name = end.index,
locationmode= 'USA-states')
fig.update_layout(
title_text = 'U.S. States',
geo_scope = 'usa'
)
fig.write_image("./figures/worsened.png")
#Then we do sample correations.
#note we have to strip the percentages
print(end.columns)
subj_df = pd.DataFrame([
end['unmet_mental_health'],
end['gini_coef'],
end['pov_2019'].str.rstrip('%').astype('float'),
end['urb_2010'].str.rstrip('%').astype('float'),
]
)
subj_df = subj_df.transpose()
corr_df = subj_df.corr()
#remove duplicated info
mask = np.tril(np.ones(corr_df.shape)).astype(np.bool)
lt_corr_df = corr_df.where(mask).round(2)
print(lt_corr_df)
fig = ff.create_annotated_heatmap(
z = lt_corr_df.to_numpy(),
x = lt_corr_df.columns.tolist(),
y = lt_corr_df.index.tolist(),
zmax = 1,
zmin = -1,
colorscale = px.colors.diverging.RdBu
)
#We output the correlations to a figure
fig.update_layout(
yaxis_autorange='reversed',
xaxis_showgrid=False,
yaxis_showgrid=False,
uniformtext_minsize= 16
)
fig.write_image("./figures/corrs.png")
# We do tests of multicolinearity (if we need to )
#Now we use a simple multilinear regression on the data, only to test for
# statistical significance
dep_vars = end[['gini_coef']]
dep_vars['urb_2010'] = end['urb_2010'].str.rstrip('%').astype('float')
dep_vars['pov_2019'] = end['pov_2019'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model0.png')
dep_vars = end['urb_2010'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.clf()
dep_vars = end[['gini_coef']]
dep_vars['pov_2019'] = end['pov_2019'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model1.png')
dep_vars = end['urb_2010'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.clf()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model2.png')
if __name__ == '__main__':
os.chdir(pathlib.Path(__file__).parent.parent.resolve())
if not os.path.exists('./data/'):
os.mkdir('./data/')
main()
| StarcoderdataPython |
6961 | # -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| StarcoderdataPython |
4833067 | import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
@unittest.skipUnless(torch.is_vulkan_available(),
"Vulkan backend must be available for these tests.")
class TestVulkanRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
fuse_clamping_ops=False):
module_instance = self
scripted_model = torch.jit.script(module_instance)
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_vulkan_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
torch._C._jit_pass_vulkan_fold_prepacking_ops(scripted_model._c)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super(Conv2D, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(torch.rand(conv_weight_shape)), requires_grad=False)
self.bias = torch.nn.Parameter(torch.Tensor(torch.rand(conv_bias_shape)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self):
super(Conv2DRelu, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(torch.rand(conv_weight_shape)), requires_grad=False)
self.bias = torch.nn.Parameter(torch.Tensor(torch.rand(conv_bias_shape)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self):
super(Conv2DHardtanh, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(torch.rand(conv_weight_shape)), requires_grad=False)
self.bias = torch.nn.Parameter(torch.Tensor(torch.rand(conv_bias_shape)), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
if __name__ == "__main__":
run_tests()
| StarcoderdataPython |
1665276 | import sklearn
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
#replace question marks with np.nan type
def replace_question_marks(df):
try:
df = df.replace({'?' : np.nan})
print("Replaced all '?' to np.nan")
except:
print('No question marks found')
return df
#check whether dataset is balanced
def check_class_distribution(df):
print('Class distributions:')
print(df.iloc[:,-1].value_counts())
#PCA dimension reducetion
def dimension_reduction(x_train, x_test, upper_bound=500, n_components=50,):
if x_train.shape[1] >= upper_bound:
pca = PCA(n_components=n_components, random_state=33)
pca.fit(x_train)
x_train= pd.DataFrame(pca.transform(x_train))
x_test = pd.DataFrame(pca.transform(x_test))
print("Reducing dimension form %s to %s"%(x_train.shape[1],n_components))
return x_train, x_test
#encoder for X and y string values
def encode_labels(x_train, x_test, index=None):
label_encoder = sklearn.preprocessing.LabelEncoder()
df = pd.concat([x_train,x_test],axis=0)
#encoding y labels
if index == -1:
print('Encoding y label values')
not_null_df = df[df.notnull()]
label_encoder.fit(not_null_df)
x_train = label_encoder.transform(x_train)
x_test = label_encoder.transform(x_test)
#encoding x features
else:
print('Encoding X features')
for i,t in enumerate(df.dtypes):
if t == 'object':
s_df = df.iloc[:,i]
not_null_df = s_df.loc[s_df.notnull()]
label_encoder.fit(not_null_df)
try:
x_train.iloc[:,i] = x_train.iloc[:,i].astype('float')
except:
x_train.iloc[:,i] = x_train.iloc[:,i].apply(lambda x: label_encoder.transform([x])[0] if x not in [np.nan] else x)
try:
x_test.iloc[:,i] = x_test.iloc[:,i].astype('float')
except:
x_test.iloc[:,i] = x_test.iloc[:,i].apply(lambda x: label_encoder.transform([x])[0] if x not in [np.nan] else x) #np.nan
return x_train, x_test
#put class colunmn at end of dataframe
def reorder_columns(dataFrame):
cols = dataFrame.columns.tolist()
cols = cols[1:] + cols[:1]
return dataFrame[cols]
#impute np.nan using given strategy
def impute_value(x_train, x_test, strategy):
if strategy == None:
return x_train.dropna(), x_test.dropna()
else:
imp = SimpleImputer(missing_values=np.nan, strategy=strategy)
train_type_dic = dict()#keep original train data type before impute
for i,t in enumerate(x_train.dtypes):
if t != 'object':
train_type_dic[i] = t
test_type_dic = dict()#keep original test data type before impute
for i,t in enumerate(x_test.dtypes):
if t != 'object':
test_type_dic[i] = t
x_train = pd.DataFrame(imp.fit_transform(x_train))
x_test = pd.DataFrame(imp.transform(x_test))
# for key in train_type_dic:
# x_train.iloc[:,key] = x_train.iloc[:,key].astype(train_type_dic[key])
# for key in test_type_dic:
# x_test.iloc[:,key] = x_test.iloc[:,key].astype(test_type_dic[key])
return x_train, x_test
# default normalizer -> MinMaxScaelr
def normalize_data(X_train, X_test, scaler = preprocessing.MinMaxScaler()):
# scaler = preprocessing.StandardScaler().fit(X_train)
print('Normalized data by scaler: %s'%type(scaler))
scaler = scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
| StarcoderdataPython |
3258990 | DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "app_test",
"USER": "go",
"PASSWORD": "go",
"HOST": "localhost",
}
}
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': 'localhost:11211',
'TIMEOUT': 3,
'BINARY': False,
'OPTIONS': { # Maps to pylibmc "behaviors"
'tcp_nodelay': True,
'ketama': True
}
}
}
INSTALLED_BACKENDS = {
"HTTP": {
"ENGINE": "rapidsms.backends.database.DatabaseBackend",
},
}
LETTUCE_AVOID_APPS = (
'django_nose',
'south',
'django_extensions',
'rapidsms.contrib.locations',
'rapidsms.contrib.locations.nested',
'bootstrap_pagination',
'rapidsms.backends.database',
'rapidsms.contrib.httptester',
'djcelery',
)
| StarcoderdataPython |
3345037 | #-------------------------------------------------------------------------------
#
# http utilities
#
# Author: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from base64 import standard_b64encode
def encode_no_auth(**kwargs):
""" Dummy encoder. """
return {}
def encode_basic_auth(username, password, **kwargs):
""" Encode username and password as the basic HTTP access authentication
header.
"""
return {
b"Authorization": b"Basic " + standard_b64encode(
("%s:%s" % (username, password)).encode("UTF-8")
)
}
def encode_token_auth(token, **kwargs):
""" Encode token as the bearer authentication header.
"""
# NOTE: Only ASCII characters are allowed in HTTP headers.
return {
b"Authorization": b"Bearer " + token.encode("ascii")
}
| StarcoderdataPython |
3266167 | <reponame>amitbend/Restplus_Skeleton<gh_stars>1-10
# Flask settings
FLASK_DEBUG = True # Do not use debug mode in production
# Flask-Restplus settings
RESTPLUS_SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = True
RESTPLUS_MASK_SWAGGER = False
RESTPLUS_ERROR_404_HELP = False | StarcoderdataPython |
3211673 | import pandas
from sklearn.tree import DecisionTreeClassifier
# training of the supervised learning algorithm
def trainingDecisionTree():
names = ['PCA1', 'PCA2', 'PCA3', 'PCA4', 'class']
dataset = pandas.read_csv('.\\csv\\pcaData.csv', names=names)
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
model = DecisionTreeClassifier()
model.fit(X, Y)
return model | StarcoderdataPython |
134678 | # Generated by Django 3.1.4 on 2020-12-09 14:25
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import paperclip.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FileType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=128, verbose_name='File type')),
],
options={
'verbose_name': 'File type',
'verbose_name_plural': 'File types',
'ordering': ['type'],
'abstract': False,
},
),
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Network', max_length=100)),
('geom', django.contrib.gis.db.models.fields.LineStringField(spatial_index=False, srid=settings.SRID)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='River',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='River', max_length=100)),
('geom', django.contrib.gis.db.models.fields.LineStringField(spatial_index=False, srid=settings.SRID)),
('upstream', models.FloatField(db_index=True, null=True, verbose_name='Start position')),
('downstream', models.FloatField(db_index=True, null=True, verbose_name='End position')),
('width', models.FloatField(default=3.14159, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('attachment_file', models.FileField(blank=True, max_length=512, upload_to=paperclip.models.attachment_upload, verbose_name='File')),
('author', models.CharField(blank=True, db_column='auteur', default='', help_text='Original creator', max_length=128, verbose_name='Author')),
('title', models.CharField(blank=True, db_column='titre', default='', help_text='Renames the file', max_length=128, verbose_name='Filename')),
('legend', models.CharField(blank=True, db_column='legende', default='', help_text='Details displayed', max_length=128, verbose_name='Legend')),
('starred', models.BooleanField(db_column='marque', default=False, help_text='Mark as starred', verbose_name='Starred')),
('is_image', models.BooleanField(db_index=True, default=False, editable=False, help_text='Is an image file', verbose_name='Is image')),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('creator', models.ForeignKey(help_text='User that uploaded', on_delete=django.db.models.deletion.CASCADE, related_name='created_attachments', to=settings.AUTH_USER_MODEL, verbose_name='Creator')),
('filetype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.filetype', verbose_name='File type')),
],
options={
'verbose_name': 'Attachment',
'verbose_name_plural': 'Attachments',
'ordering': ['-date_insert'],
'permissions': (('add_attachment', 'Can add attachments'), ('change_attachment', 'Can change attachments'), ('delete_attachment', 'Can delete attachments'), ('read_attachment', 'Can read attachments'), ('delete_attachment_others', "Can delete others' attachments")),
'abstract': False,
'default_permissions': (),
},
),
]
| StarcoderdataPython |
29596 | """
Created on 9 Aug 2016
@author: <NAME> (<EMAIL>)
"""
import _csv
import sys
# --------------------------------------------------------------------------------------------------------------------
class Histogram(object):
"""
classdocs
"""
__HEADER_BIN = ".bin"
__HEADER_COUNT = ".count"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, minimum, maximum, bin_count, path):
"""
Constructor
"""
self.__minimum = minimum
self.__maximum = maximum
self.__bin_count = bin_count
self.__path = path
self.__counts = [0] * bin_count
self.__max_count = int(0)
self.__delta = (maximum - minimum) / bin_count
def __len__(self):
return self.__bin_count
# ----------------------------------------------------------------------------------------------------------------
def append(self, datum):
# reject out-of-range
if datum < self.__minimum or datum > self.__maximum:
raise ValueError("datum out of range:%f" % datum)
# compute index...
offset = datum - self.__minimum
index = int(offset // self.__delta)
# update counts...
self.__counts[index] += 1
if self.__counts[index] > self.__max_count:
self.__max_count = int(self.__counts[index])
return index, self.__counts[index]
def to_csv(self, filename=None):
file = sys.stdout if filename is None else open(filename, "w")
writer = _csv.writer(file)
writer.writerow((self.__path + Histogram.__HEADER_BIN, self.__path + Histogram.__HEADER_COUNT))
for i in range(self.bin_count):
writer.writerow((format(self.__bin(i), '.6f'), self.__counts[i]))
if filename is not None:
file.close()
# ----------------------------------------------------------------------------------------------------------------
@property
def bins(self):
return [self.__bin(i) for i in range(self.__bin_count)]
@property
def minimum(self):
return self.__minimum
@property
def maximum(self):
return self.__maximum
@property
def bin_count(self):
return self.__bin_count
@property
def path(self):
return self.__path
@property
def delta(self):
return self.__delta
@property
def max_count(self):
return self.__max_count
@property
def counts(self):
return self.__counts
# ----------------------------------------------------------------------------------------------------------------
def __bin(self, index):
return self.__minimum + (index * self.__delta)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Histogram:{minimum:%0.6f, maximum:%0.6f, bin_count:%d, delta:%0.6f, max_count:%d, counts:%s, " \
"path:%s}" % \
(self.minimum, self.maximum, self.bin_count, self.delta, self.max_count, self.counts,
self.path)
| StarcoderdataPython |
1680791 | # Copyright 2019 The Matrix.org Foundation CIC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
import attr
from nio.crypto import TrustState, GroupSessionStore
from nio.store import (
Accounts,
MegolmInboundSessions,
DeviceKeys,
SqliteStore,
DeviceTrustState,
use_database,
use_database_atomic,
)
from peewee import SQL, DoesNotExist, ForeignKeyField, Model, SqliteDatabase, TextField
from cachetools import LRUCache
MAX_LOADED_MEDIA = 10000
MAX_LOADED_UPLOAD = 10000
@attr.s
class FetchTask:
room_id = attr.ib(type=str)
token = attr.ib(type=str)
@attr.s
class MediaInfo:
mxc_server = attr.ib(type=str)
mxc_path = attr.ib(type=str)
key = attr.ib(type=dict)
iv = attr.ib(type=str)
hashes = attr.ib(type=dict)
def to_content(self, content: Dict, mime_type: str) -> Dict[Any, Any]:
content["file"] = {
"v": "v2",
"key": self.key,
"iv": self.iv,
"hashes": self.hashes,
"url": content["url"],
"mimetype": mime_type,
}
def to_thumbnail(self, content: Dict, mime_type: str) -> Dict[Any, Any]:
content["info"]["thumbnail_file"] = {
"v": "v2",
"key": self.key,
"iv": self.iv,
"hashes": self.hashes,
"url": content["info"]["thumbnail_url"],
"mimetype": mime_type,
}
@attr.s
class UploadInfo:
content_uri = attr.ib(type=str)
filename = attr.ib(type=str)
mimetype = attr.ib(type=str)
class DictField(TextField):
def python_value(self, value): # pragma: no cover
return json.loads(value)
def db_value(self, value): # pragma: no cover
return json.dumps(value)
class AccessTokens(Model):
token = TextField()
account = ForeignKeyField(
model=Accounts, primary_key=True, backref="access_token", on_delete="CASCADE"
)
class Servers(Model):
name = TextField()
class Meta:
constraints = [SQL("UNIQUE(name)")]
class ServerUsers(Model):
user_id = TextField()
server = ForeignKeyField(
model=Servers, column_name="server_id", backref="users", on_delete="CASCADE"
)
class Meta:
constraints = [SQL("UNIQUE(user_id,server_id)")]
class PanSyncTokens(Model):
token = TextField()
user = ForeignKeyField(model=ServerUsers, column_name="user_id")
class Meta:
constraints = [SQL("UNIQUE(user_id)")]
class PanFetcherTasks(Model):
user = ForeignKeyField(
model=ServerUsers, column_name="user_id", backref="fetcher_tasks"
)
room_id = TextField()
token = TextField()
class Meta:
constraints = [SQL("UNIQUE(user_id, room_id, token)")]
class PanMediaInfo(Model):
server = ForeignKeyField(
model=Servers, column_name="server_id", backref="media", on_delete="CASCADE"
)
mxc_server = TextField()
mxc_path = TextField()
key = DictField()
hashes = DictField()
iv = TextField()
class Meta:
constraints = [SQL("UNIQUE(server_id, mxc_server, mxc_path)")]
class PanUploadInfo(Model):
server = ForeignKeyField(
model=Servers, column_name="server_id", backref="upload", on_delete="CASCADE"
)
content_uri = TextField()
filename = TextField()
mimetype = TextField()
class Meta:
constraints = [SQL("UNIQUE(server_id, content_uri)")]
@attr.s
class ClientInfo:
user_id = attr.ib(type=str)
access_token = attr.ib(type=str)
@attr.s
class PanStore:
store_path = attr.ib(type=str)
database_name = attr.ib(type=str, default="pan.db")
database = attr.ib(type=SqliteDatabase, init=False)
database_path = attr.ib(type=str, init=False)
models = [
Accounts,
AccessTokens,
Servers,
ServerUsers,
DeviceKeys,
DeviceTrustState,
PanSyncTokens,
PanFetcherTasks,
PanMediaInfo,
PanUploadInfo,
]
def __attrs_post_init__(self):
self.database_path = os.path.join(
os.path.abspath(self.store_path), self.database_name
)
self.database = self._create_database()
self.database.connect()
with self.database.bind_ctx(self.models):
self.database.create_tables(self.models)
def _create_database(self):
return SqliteDatabase(
self.database_path, pragmas={"foreign_keys": 1, "secure_delete": 1}
)
@use_database
def _get_account(self, user_id, device_id):
try:
return Accounts.get(
Accounts.user_id == user_id, Accounts.device_id == device_id
)
except DoesNotExist:
return None
@use_database
def save_upload(self, server, content_uri, filename, mimetype):
server = Servers.get(name=server)
PanUploadInfo.insert(
server=server,
content_uri=content_uri,
filename=filename,
mimetype=mimetype,
).on_conflict_ignore().execute()
@use_database
def load_upload(self, server, content_uri=None):
server, _ = Servers.get_or_create(name=server)
if not content_uri:
upload_cache = LRUCache(maxsize=MAX_LOADED_UPLOAD)
for i, u in enumerate(server.upload):
if i > MAX_LOADED_UPLOAD:
break
upload = UploadInfo(u.content_uri, u.filename, u.mimetype)
upload_cache[u.content_uri] = upload
return upload_cache
else:
u = PanUploadInfo.get_or_none(
PanUploadInfo.server == server,
PanUploadInfo.content_uri == content_uri,
)
if not u:
return None
return UploadInfo(u.content_uri, u.filename, u.mimetype)
@use_database
def save_media(self, server, media):
server = Servers.get(name=server)
PanMediaInfo.insert(
server=server,
mxc_server=media.mxc_server,
mxc_path=media.mxc_path,
key=media.key,
iv=media.iv,
hashes=media.hashes,
).on_conflict_ignore().execute()
@use_database
def load_media_cache(self, server):
server, _ = Servers.get_or_create(name=server)
media_cache = LRUCache(maxsize=MAX_LOADED_MEDIA)
for i, m in enumerate(server.media):
if i > MAX_LOADED_MEDIA:
break
media = MediaInfo(m.mxc_server, m.mxc_path, m.key, m.iv, m.hashes)
media_cache[(m.mxc_server, m.mxc_path)] = media
return media_cache
@use_database
def load_media(self, server, mxc_server=None, mxc_path=None):
server, _ = Servers.get_or_create(name=server)
m = PanMediaInfo.get_or_none(
PanMediaInfo.server == server,
PanMediaInfo.mxc_server == mxc_server,
PanMediaInfo.mxc_path == mxc_path,
)
if not m:
return None
return MediaInfo(m.mxc_server, m.mxc_path, m.key, m.iv, m.hashes)
@use_database_atomic
def replace_fetcher_task(self, server, pan_user, old_task, new_task):
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
PanFetcherTasks.delete().where(
PanFetcherTasks.user == user,
PanFetcherTasks.room_id == old_task.room_id,
PanFetcherTasks.token == old_task.token,
).execute()
PanFetcherTasks.replace(
user=user, room_id=new_task.room_id, token=new_task.token
).execute()
@use_database
def save_fetcher_task(self, server, pan_user, task):
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
PanFetcherTasks.replace(
user=user, room_id=task.room_id, token=task.token
).execute()
@use_database
def load_fetcher_tasks(self, server, pan_user):
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
tasks = []
for t in user.fetcher_tasks:
tasks.append(FetchTask(t.room_id, t.token))
return tasks
@use_database
def delete_fetcher_task(self, server, pan_user, task):
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
PanFetcherTasks.delete().where(
PanFetcherTasks.user == user,
PanFetcherTasks.room_id == task.room_id,
PanFetcherTasks.token == task.token,
).execute()
@use_database
def save_token(self, server, pan_user, token):
# type: (str, str, str) -> None
"""Save a sync token for a pan user."""
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
PanSyncTokens.replace(user=user, token=token).execute()
@use_database
def load_token(self, server, pan_user):
# type: (str, str) -> Optional[str]
"""Load a sync token for a pan user.
Returns the sync token if one is found.
"""
server = Servers.get(name=server)
user = ServerUsers.get(server=server, user_id=pan_user)
token = PanSyncTokens.get_or_none(user=user)
if token:
return token.token
return None
@use_database
def save_server_user(self, server_name, user_id):
# type: (str, str) -> None
server, _ = Servers.get_or_create(name=server_name)
ServerUsers.insert(
user_id=user_id, server=server
).on_conflict_ignore().execute()
@use_database
def load_all_users(self):
users = []
query = Accounts.select(Accounts.user_id, Accounts.device_id)
for account in query:
users.append((account.user_id, account.device_id))
return users
@use_database
def load_users(self, server_name):
# type: (str) -> List[Tuple[str, str]]
users = []
server = Servers.get_or_none(Servers.name == server_name)
if not server:
return []
server_users = []
for u in server.users:
server_users.append(u.user_id)
query = Accounts.select(Accounts.user_id, Accounts.device_id).where(
Accounts.user_id.in_(server_users)
)
for account in query:
users.append((account.user_id, account.device_id))
return users
@use_database
def save_access_token(self, user_id, device_id, access_token):
account = self._get_account(user_id, device_id)
assert account
AccessTokens.replace(account=account, token=access_token).execute()
@use_database
def load_access_token(self, user_id, device_id):
# type: (str, str) -> Optional[str]
account = self._get_account(user_id, device_id)
if not account:
return None
try:
return account.access_token[0].token
except IndexError:
return None
@use_database
def load_all_devices(self):
# type (str, str) -> Dict[str, Dict[str, DeviceStore]]
store = dict()
query = Accounts.select()
for account in query:
device_store = defaultdict(dict)
for d in account.device_keys:
if d.deleted:
continue
try:
trust_state = d.trust_state[0].state
except IndexError:
trust_state = TrustState.unset
keys = {k.key_type: k.key for k in d.keys}
device_store[d.user_id][d.device_id] = {
"user_id": d.user_id,
"device_id": d.device_id,
"ed25519": keys["ed25519"],
"curve25519": keys["curve25519"],
"trust_state": trust_state.name,
"device_display_name": d.display_name,
}
store[account.user_id] = device_store
return store
class KeyDroppingSqliteStore(SqliteStore):
@use_database
def save_inbound_group_session(self, session):
"""Save the provided Megolm inbound group session to the database.
Args:
session (InboundGroupSession): The session to save.
"""
account = self._get_account()
assert account
MegolmInboundSessions.delete().where(
MegolmInboundSessions.sender_key == session.sender_key,
MegolmInboundSessions.account == account,
MegolmInboundSessions.room_id == session.room_id,
).execute()
super().save_inbound_group_session(session)
@use_database
def load_inbound_group_sessions(self):
store = super().load_inbound_group_sessions()
return KeyDroppingGroupSessionStore.from_group_session_store(store)
class KeyDroppingGroupSessionStore(GroupSessionStore):
def from_group_session_store(store):
new_store = KeyDroppingGroupSessionStore()
new_store._entries = store._entries
return new_store
def add(self, session) -> bool:
room_id = session.room_id
sender_key = session.sender_key
if session in self._entries[room_id][sender_key].values():
return False
self._entries[room_id][sender_key].clear()
self._entries[room_id][sender_key][session.id] = session
return True
| StarcoderdataPython |
1792125 | <gh_stars>1-10
#!/usr/bin/env python
__author__ = '<NAME>'
import unittest
from mock import Mock
from pyon.util.unit_test import PyonTestCase
from pyon.util.int_test import IonIntegrationTestCase
from nose.plugins.attrib import attr
from pyon.core.exception import BadRequest, NotFound
from pyon.public import RT, IonObject
from interface.services.coi.iobject_management_service import ObjectManagementServiceClient
from ion.services.coi.object_management_service import ObjectManagementService
@attr('UNIT', group='coi')
class TestObjectManagementServiceUnit(PyonTestCase):
def setUp(self):
self.mock_clients = self._create_service_mock('object_management')
self.oms = ObjectManagementService()
self.oms.clients = self.mock_clients
self.yaml_definition = '''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
self.bad_yaml ='''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
def rr_return_value(self):
return ['123',1]
def test_create_object(self):
ot = Mock()
ot.definition = self.bad_yaml
ot.name = "name"
with self.assertRaises(BadRequest):
self.oms.create_object_type(ot)
ot.name = "bad name"
with self.assertRaises(BadRequest):
self.oms.create_object_type(ot)
ot.name = "name"
ot.definition = self.yaml_definition
self.oms.clients.resource_registry.create.return_value = self.rr_return_value()
object_id = self.oms.create_object_type(ot)
self.assertEqual(object_id, '123')
self.oms.clients.resource_registry.create.assert_called_once_with(ot)
def test_read_and_update_object(self):
with self.assertRaises(BadRequest):
self.oms.read_object_type(None)
ot = Mock()
ot.definition = self.yaml_definition
ot.name = "name"
ot.description = "This is just a test, don't panic"
self.oms.clients.resource_registry.read.return_value = ot
ot_return = self.oms.read_object_type("123")
self.assertTrue(ot_return is ot)
self.oms.clients.resource_registry.read.assert_called_once_with('123','')
ot_return.name = "new name"
with self.assertRaises(BadRequest):
self.oms.update_object_type(ot_return)
ot_return.name = "new_name"
ot_return.definition = self.bad_yaml
with self.assertRaises(BadRequest):
self.oms.update_object_type(ot_return)
ot.definition = self.yaml_definition
self.oms.clients.resource_registry.update.return_value = ['123', 2]
ot_id = self.oms.update_object_type(ot_return)
self.assertEqual(ot_id, '123')
self.oms.clients.resource_registry.update.assert_called_once_with(ot_return)
def test_read_not_found(self):
self.oms.clients.resource_registry.read.side_effect = NotFound
with self.assertRaises(NotFound):
self.oms.read_object_type("0xBADC0FFEE")
self.oms.clients.resource_registry.read.assert_called_once_with('0xBADC0FFEE','')
def test_delete_object(self):
with self.assertRaises(BadRequest):
self.oms.delete_object_type(None)
self.oms.clients.resource_registry.delete.return_value = True
status = self.oms.delete_object_type("123")
self.assertEqual(status, True)
self.oms.clients.resource_registry.delete.assert_called_once_with("123")
def test_delete_not_found(self):
self.oms.clients.resource_registry.delete.side_effect = NotFound
with self.assertRaises(NotFound):
self.oms.delete_object_type("0xBADC0FFEE")
self.oms.clients.resource_registry.delete.assert_called_once_with('0xBADC0FFEE')
@attr('INT', group='coi')
class TestObjectManagementService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
self.oms = ObjectManagementServiceClient()
def test_create_object(self):
yaml_str = '''
TimerSchedulerEntry2: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
ot = IonObject(RT.ObjectType, {"definition": yaml_str})
object_type_id = self.oms.create_object_type(ot)
self.assertTrue(type(object_type_id) == str)
self.oms.delete_object_type(object_type_id)
def test_read_and_update_object(self):
# Create object type
# Read object type and validate
# Update object type
# Read back the object type and validate
# Delete the object type
object_definition = '''
TimerSchedulerEntry3: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
ot = IonObject(RT.ObjectType, {"definition": object_definition})
object_type_id = self.oms.create_object_type(ot)
object_type = self.oms.read_object_type(object_type_id)
self.assertEqual(object_definition,object_type.definition)
object_definition2 = '''
TimerSchedulerEntry3: !Extends_AbstractSchedulerEntry
# String to put in origin of TimerEvent
event_origin: ""
# String to put in subtype field of TimerEvent
event_subtype: ""
'''
object_type.definition = object_definition2
self.oms.update_object_type(object_type)
object_type = self.oms.read_object_type(object_type_id)
self.assertEqual(object_definition2, object_type.definition)
self.oms.delete_object_type(object_type_id)
def test_read_object_not_found(self):
object_type_id = "0xbadc0ffee"
with self.assertRaises(NotFound):
self.oms.read_object_type(object_type_id)
def test_delete_object_not_found(self):
object_type_id = "0xbadc0ffee"
with self.assertRaises(NotFound):
self.oms.delete_object_type(object_type_id)
| StarcoderdataPython |
136819 | import datetime
def _beforeDawn( hour ):
EARLY = 5
return hour < EARLY
def nextDaylightDate():
today = datetime.date.today()
hour = datetime.datetime.today().hour
if _beforeDawn( hour ):
return today
else:
return today + datetime.timedelta( 1 )
| StarcoderdataPython |
197015 | <filename>src/station.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import artikcloud
from artikcloud.rest import ApiException
from smbus import SMBus
import Adafruit_BMP.BMP085 as BMP085 # Actually using it for BMP180 here
import Adafruit_BBIO.GPIO as GPIO
import requests
import grove_oled
def blink(pin, blinktime=0.1):
""" Blink a single LED
"""
blinks([pin], blinktime)
def blinks(pins, blinktime=0.1):
""" Blink a list of LEDs
"""
for pin in pins:
GPIO.output(pin, GPIO.HIGH)
time.sleep(blinktime)
for pin in pins:
GPIO.output(pin, GPIO.LOW)
def restart_app():
"""Restart application through the resin Supervisor
"""
params = {'apikey': os.getenv('RESIN_SUPERVISOR_API_KEY')}
payload = {'appId': os.getenv('RESIN_APP_ID')}
supervisor_address = os.getenv('RESIN_SUPERVISOR_ADDRESS')
print("Restarting Application")
r = requests.post("{}/v1/restart".format(supervisor_address), supervisor_address, params=params, json=payload)
if (r.status_code == 200):
sys.exit(0)
if __name__ == "__main__":
# Set up GPIO pins
pin0 = "P9_14" # GPIO_50, blue, down
GPIO.setup(pin0, GPIO.OUT)
GPIO.output(pin0, GPIO.LOW)
pin1 = "P9_16" # GPIO_51, red, up
GPIO.setup(pin1, GPIO.OUT)
GPIO.output(pin1, GPIO.LOW)
grove_oled.oled_init()
grove_oled.oled_clearDisplay()
grove_oled.oled_setNormalDisplay()
grove_oled.oled_setVerticalMode()
time.sleep(.1)
blinkshort = 0.05
blinklong = 0.8
sensor = BMP085.BMP085(busnum=2, i2c_interface=SMBus, mode=BMP085.BMP085_ULTRAHIGHRES)
# ARTIK Cloud setup
api_client = artikcloud.ApiClient()
DEVICE_ID = os.getenv('ARTIKCLOUD_DEVICE_ID')
DEVICE_TOKEN = os.getenv('ARTIKCLOUD_DEVICE_TOKEN')
# Setting up ARTIK Cloud connection
artikcloud.configuration.access_token = DEVICE_TOKEN
# Setting up messaging
messages_api = artikcloud.MessagesApi()
message = artikcloud.Message()
message.type = "message"
message.sdid = "{}".format(DEVICE_ID)
# Default is to monitor the temperature
TEST_PRESSURE = True if os.getenv('TEST_PRESSURE', default='0') == '1' else False
if TEST_PRESSURE:
reading = sensor.read_pressure
printreading = '{} Pa'
else:
reading = sensor.read_temperature
printreading = '{:.1f} C'
# Holt-Winters parameters
alpha = 0.15
beta = 0.05
# Set up initial values
x = reading()
a = x
b = 0
print("{},{},{}".format(x, a, b))
try:
PERIOD = int(os.getenv('PERIOD', default='1'))
except ValueError:
PERIOD = 1
if PERIOD < 1:
PERIOD = 1
try:
SENDPERIOD = int(os.getenv('SENDPERIOD', default='600'))
except ValueError:
SENDPERIOD = 600
if SENDPERIOD < 1:
SENDPERIOD = 1
try:
# different display trashhold, in units of X unit/min, above which do long blink
SENSOR_THRESHOLD = float(os.getenv('SENSOR_THRESHOLD', default='1.0'))
except ValueError:
SENSOR_THRESHOLD = 1.0
if SENSOR_THRESHOLD < 0:
SENSOR_THRESHOLD = 1.0
i = 0
error_count = 0
trend = ''
while True:
loopstart = time.time()
x = reading()
aold, bold = a, b
a = alpha * x + (1 - alpha) * (aold + bold)
b = beta * (a - aold) + (1 - beta) * bold
print("Reading: {0:0.1f}; a[t]: {1:0.3f}; b[t]: {2:0.3f}".format(x, a, b))
# Do long blink if temperature change is more than 1 unit/min
blinktime = blinklong if abs(b) >= SENSOR_THRESHOLD / 60.0 * PERIOD else blinkshort
if abs(b) < 0.001:
blinks([pin0, pin1], blinktime)
trend += '-'
elif b < 0:
blink(pin0, blinktime)
trend += '\\'
else:
blink(pin1, blinktime)
trend += '/'
if len(trend) > 12:
trend = trend[-12:]
grove_oled.oled_setTextXY(0, 0)
grove_oled.oled_putString(printreading.format(x))
grove_oled.oled_setTextXY(1, 0)
grove_oled.oled_putString(trend)
grove_oled.oled_setTextXY(2, 0)
grove_oled.oled_putString("Location:")
grove_oled.oled_setTextXY(3, 0)
grove_oled.oled_putString(os.getenv("LOCATION", "unknown"))
message.ts = int(round(time.time() * 1000))
message.data = {'Temperature': x}
if i % SENDPERIOD == 0:
try:
response = messages_api.send_message(message)
print(response)
except ApiException as e:
print("Error sending message to ARTIK Cloud:{}".format(str(e)))
error_count += 1
except:
print("Unexpected error:{}".format(sys.exc_info()[0]))
error_count += 1
finally:
if (error_count >= 3):
restart_app()
i += 1
# Wait until the new period starts
newsleep = (loopstart + PERIOD) - time.time()
if newsleep < 0:
print("WARNING: loop took {}s while period is {}!".format(PERIOD - newsleep, PERIOD))
else:
time.sleep(newsleep)
| StarcoderdataPython |
8849 | <gh_stars>0
from python_clean_architecture.shared import use_case as uc
from python_clean_architecture.shared import response_object as res
class OrderDataGetUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def execute(self, request_object):
#if not request_object:
#return res.ResponseFailure.build_from_invalid_request_object(request_object)
storage_rooms = self.repo.order(items=request_object.items)
return res.ResponseSuccess(storage_rooms)
| StarcoderdataPython |
1623937 | """Example GNN for QM9"""
import json
import tensorflow as tf
from datetime import datetime
from pathlib import Path
from gnn import GNN, get_dataset_from_files
from gnn.initial import PadInitializer
from gnn.message_passing import FeedForwardMessage
from gnn.readout import GatedReadout
from gnn.update import GRUUpdate
def main(
log_dir,
training_dir,
validation_dir,
):
# Constants
node_feature_names = [
"acceptor",
"aromatic",
"atomic_number",
"donor",
"element_c",
"element_f",
"element_h",
"element_n",
"element_o",
"hybridization_null",
"hybridization_sp",
"hybridization_sp2",
"hybridization_sp3",
"hydrogen_count",
]
edge_feature_names = [
"distance", "order_1", "order_1_5", "order_2", "order_3"
]
target = "dipole_moment"
# Training params
batch_size = 1
n_epochs = 20
train_step_per_epochs = 10000
valid_step_per_epoch = 1000
validation_freq = 1
learning_schedule_params = {
"decay_steps": 20000,
"end_learning_rate": 1.84e-5,
"initial_learning_rate": 1.84e-4,
"power": 1.0,
}
# Files
training_fn = list(Path(training_dir).glob("*.json"))
validation_fn = list(Path(validation_dir).glob("*.json"))
# Optimizer Loss Metric
learning_schedule = tf.keras.optimizers.schedules.PolynomialDecay(**learning_schedule_params)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_schedule)
loss = tf.keras.losses.MeanSquaredError()
metrics = [tf.keras.metrics.MeanAbsoluteError()]
# Model
message_passing_args = {
"aggregation_fn": tf.math.reduce_sum,
"activation": "relu",
"layer": tf.keras.layers.Dense,
"num_layers": 4,
"units": 50,
}
readout_args = {
"activation": "relu",
"gate_activation": "sigmoid",
"layer": tf.keras.layers.Dense,
"num_layers": 3,
"units": 50,
}
model = GNN(hidden_state_size=25, message_size=25, message_passing_iterations=4,
output_size=1, initializer=PadInitializer, message_passing=FeedForwardMessage,
update=GRUUpdate, readout=GatedReadout, message_passing_args=message_passing_args,
readout_args=readout_args)
model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics
)
# Callbacks
log_name = f"gnn_qm9_{datetime.now().strftime('%Y%m%d-%H%M%S')}"
log_subdir = log_dir / log_name
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=log_subdir, update_freq='epoch', write_images=False, histogram_freq=1)
]
# Datasets
training = get_dataset_from_files(
training_fn, node_feature_names, edge_feature_names, target, batch_size=batch_size)
validation = get_dataset_from_files(
validation_fn, node_feature_names, edge_feature_names, target, batch_size=1)
# Fit History
loss = model.fit(
training, epochs=n_epochs, steps_per_epoch=train_step_per_epochs,
validation_data=validation, validation_freq=validation_freq,
validation_steps=valid_step_per_epoch, callbacks=callbacks, use_multiprocessing=True)
json.dump(loss.history, open(Path(log_subdir) / "history.json", "w"))
if __name__ == "__main__":
# Paths
workspace_root = Path(__file__).parent.parent.absolute()
model_path = workspace_root / Path("ignnition/qm9")
training_dir = model_path / Path("data/train")
validation_dir = model_path / Path("data/validation")
log_dir = model_path / Path("logs")
main(
log_dir=log_dir,
training_dir=training_dir,
validation_dir=validation_dir,
)
| StarcoderdataPython |
1752388 | <reponame>highfestiva/life
# Author: <NAME>
# Copyright (c) 2002-2009, Righteous Games
import os
import sys
vcver = 10
NMAKE = "bin/nmake.exe"
VCBUILD = "vcpackages/vcbuild.exe"
NETBUILD = r'C:\Windows\Microsoft.NET\Framework\v4.0.30319\MSBuild.exe'
pause_on_error = False
def _getosname():
if sys.platform == "win32": return "Windows"
if sys.platform == "cygwin": return "Cygwin"
if sys.platform.startswith("linux"): return "Linux"
if sys.platform == "darwin": return "Mac"
if os.name == "nt": return "Windows"
if os.name == "posix": return "Posix"
if os.name == "mac": return "Mac"
return sys.platform
def _gethwname():
if os.name == "nt":
ccver = sys.version.split("[")[1][:-1]
if ccver.find("32 bit") >= 0:
return "x86"
if ccver.find("64 bit") >= 0:
return "x64"
import subprocess
machine = subprocess.Popen(["uname", "-m"], stdout=subprocess.PIPE).communicate()[0]
machine = str(machine.strip())
if machine.startswith("b'") and machine.endswith("'"):
machine = machine[2:-1]
return machine
def _getdatename():
import datetime
now = datetime.datetime.isoformat(datetime.datetime.now())
return now.split("T")[0].replace("-", "")
def _filetime(filename):
return os.stat(filename).st_mtime
def _verify_base_dir():
if not os.path.exists(".git"):
print("Must be in base dir to build (currently in %s)!" % os.path.abspath(os.path.curdir))
raise BaseException("Shit hit the fan!")
def _sorted_natural(l):
import re
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key=alphanum_key)
def _getmake(builder):
global vcver
winprogs = os.getenv("PROGRAMFILES")
if winprogs:
if os.path.exists(winprogs+" Files"):
winprogs += " Files"
if os.path.exists(winprogs+" (x86)"):
winprogs += " (x86)"
dirname = os.getenv("VCINSTALLDIR")
if not dirname:
import glob
names = _sorted_natural(glob.glob(os.path.join(winprogs, "Microsoft Visual*")))
if len(names) == 0:
print("Visual Studio not installed?")
return None # No Visual Studio installed.
dirname = os.path.join(names[-1], "VC")
if not os.path.exists(dirname):
print("GOT HERE!", dirname)
return None # Visual Studio might be installed, but not VC++.
make_exe = os.path.join(dirname, builder)
if dirname.find("Studio 10.0") > 0: vcver = 10
elif dirname.find("Studio 9") > 0: vcver = 9
elif dirname.find("Studio 8") > 0: vcver = 8
else:
print("Unknown MSVC version!")
sys.exit(1)
if vcver >= 10 and builder == VCBUILD:
make_exe = NETBUILD
if not os.path.exists(make_exe):
print('Could not find %s.' % make_exe)
make_exe = None
#else:
# make_exe = '"'+make_exe+'"'
elif os.path.exists("/bin/make"):
make_exe = "/bin/make"
elif os.path.exists("/usr/bin/make"):
make_exe = "/usr/bin/make"
else:
make_exe = None
#if not make_exe:
# print("Warning: no build environment detected!")
return make_exe
def _getvcver():
return vcver
def _hasdevenv(verbose=False):
hasit = _getmake(NMAKE) and _getmake(VCBUILD)
if verbose and not hasit:
print("Warning: no C++ development environment detected.")
return hasit
def fixbuildcmd(args):
if vcver >= 10:
vc10a = { '/useenv':'/p:useenv=true', '/rebuild':'/t:rebuild', '/clean':'/t:clean', '/M2':'/M:2', '/M4':'/M:4' }
def r(a):
try: return vc10a[a]
except: return a
args,a = [],[r(a) for a in args]
for b in a:
if '|' in b:
args += '/p:configuration={} /p:platform={}'.format(*b.split('|')).split()
else:
args += [b]
return args
def _run(cmdlist, when):
path = None
if os.name == "nt":
path = os.getenv("PATH")
winpath = path.replace(":/c/", ";C:\\").replace("/", "\\")
os.environ["PATH"] = winpath
cmdlist[0] = cmdlist[0].replace("c:\\", "C:\\")
import subprocess
#print("Running %s..." % str(cmdlist))
rc = subprocess.call(cmdlist)
if path:
os.environ["PATH"] = path
if rc != 0:
err = "Error %i when %s!" % (rc, when)
input(err+"\nPress enter to continue.") if pause_on_error else print(err)
sys.exit(1)
def _zipfiles(zf, include, filenames):
import glob
for filename in filenames:
if not include(filename):
continue
if os.path.isdir(filename):
fs = glob.glob(os.path.join(filename, "*"))
_zipfiles(zf, include, fs)
else:
zf.write(filename)
def _zipdir(dirname, include, arcname):
import glob
import zipfile
zf = zipfile.ZipFile(arcname, "w", zipfile.ZIP_DEFLATED)
fs = glob.glob(os.path.join(dirname, "*"))
def arc_no_inc(filename):
if filename.endswith(arcname):
return False
return include(filename)
_zipfiles(zf, arc_no_inc, fs)
zf.close()
def _shzipdir(dirname, arcname):
_run(["zip","-r9",arcname,dirname], "zipping archive")
def _targzdir(dirname, arcname):
_run(["tar","--numeric-owner","--owner=0","--group=0","-cpf",arcname.replace('.gz',''),dirname], "taring archive")
_run(["gzip","-9",arcname.replace('.gz','')], "gzipping archive")
def _7zdir(dirname, arcname):
_run(["7z","a",arcname,dirname], "7zipping archive")
| StarcoderdataPython |
1799443 | from datetime import datetime
from pytz import timezone, utc
from authz.config import Config
def now(name=Config.TIMEZONE):
tz = timezone(name)
return datetime.utcnow().replace(tzinfo=utc).astimezone(tz).replace(
microsecond=0, tzinfo=None)
| StarcoderdataPython |
1650332 | <reponame>shaandesai1/AIMS
from data.datasets import dataset_mnist, dataset_boston, dataset_california, dataset_mini_mnist
def get_datasets(args):
print('Dataset: \t {}'.format(args.dataset.upper()))
if args.dataset == 'mnist':
args.n_features = 784
args.n_classes = 10
dataset_train, dataset_val, dataset_test = dataset_mnist()
elif args.dataset == 'mini-mnist':
args.n_features = 784
args.n_classes = 10
dataset_train, dataset_val, dataset_test = dataset_mini_mnist()
elif args.dataset == 'boston':
args.n_features = 13
args.n_classes = 1
dataset_train, dataset_val, dataset_test = dataset_boston()
elif args.dataset == 'california':
args.n_features = 80000
args.n_classes = 1
dataset_train, dataset_val, dataset_test = dataset_california()
else:
raise NotImplementedError
args.train_size = len(dataset_train)
args.val_size = len(dataset_val)
args.test_size = len(dataset_test)
return dataset_train, dataset_val, dataset_test
| StarcoderdataPython |
3394443 | <reponame>Arachnid/tweetengine<filename>src/tweetengine/i18n.py<gh_stars>1-10
""" load the message catalogs and provide them as ztk utilities."""
import os
from zope.interface import implements
from zope.component import getSiteManager
from zope.i18n.interfaces import (
ITranslationDomain,
INegotiator,
)
from zope.i18n import interpolate
from zope.i18n import translate
from zope.i18n import MessageFactory
from zope.i18n.translationdomain import TranslationDomain
from zope.i18n.gettextmessagecatalog import GettextMessageCatalog
from zope.i18nmessageid import Message
from chameleon.zpt import template
from chameleon.zpt.loader import TemplateLoader
from google.appengine.ext.webapp import Request
_ = MessageFactory('tweetengine')
basepath = os.path.join(os.path.dirname(__file__), 'locales')
gsm = getSiteManager()
available_languages = []
for lang in os.listdir(basepath):
if lang.endswith('.pot'):
continue
langpath = os.path.join(basepath, lang, 'LC_MESSAGES')
available_languages.append(lang)
for file in os.listdir(langpath):
domainpath = os.path.abspath(os.path.join(langpath, file))
if not file.endswith('.mo'):
continue
domainname = file[:-3]
domain = gsm.queryUtility(ITranslationDomain, domainname)
if domain is None:
domain = TranslationDomain(domainname)
gsm.registerUtility(domain, ITranslationDomain, name=domainname)
domain.addCatalog(GettextMessageCatalog(lang, domainname, domainpath))
# negotiation sucks :( because Chameleon never passes a context into the
# translate method of the template. but the target_language is passed.
# so we need to set the target language to what comes with the request as
# 'Accept-Language' header. The RequestNegotiator then get this header value
class RequestNegotiator(object):
implements(INegotiator)
def getLanguage(self, available_languages, accept_languages_header):
if isinstance(accept_languages_header, Request):
accept_languages_header = accept_languages_header.headers.get('Accept-Language', '')
accept_languages = self.accept_languages(accept_languages_header)
for accepted_language in accept_languages:
if accepted_language in available_languages:
return accepted_language
def accept_languages(self, browser_pref_langs):
"""Parses the request and return language list.
browser_pref_langs is the plain Accept-Language http request header
value.
Stolen from Products.PloneLanguageTool, under GPL (c) Plone Foundation,
slightly modified.
"""
browser_pref_langs = browser_pref_langs.split(',')
i = 0
langs = []
length = len(browser_pref_langs)
# Parse quality strings and build a tuple like
# ((float(quality), lang), (float(quality), lang))
# which is sorted afterwards
# If no quality string is given then the list order
# is used as quality indicator
for lang in browser_pref_langs:
lang = lang.strip().lower().replace('_', '-')
if lang:
l = lang.split(';', 2)
quality = []
if len(l) == 2:
try:
q = l[1]
if q.startswith('q='):
q = q.split('=', 2)[1]
quality = float(q)
except:
pass
if quality == []:
quality = float(length-i)
language = l[0]
langs.append((quality, language))
if '-' in language:
baselanguage = language.split('-')[0]
langs.append((quality-0.001, baselanguage))
i = i + 1
# Sort and reverse it
langs.sort()
langs.reverse()
# Filter quality string
langs = map(lambda x: x[1], langs)
return langs
negotiator = RequestNegotiator()
gsm.registerUtility(negotiator, INegotiator)
# we need a smarter translation method than Chameleon default
# maybe its slower, but we can introduce caching later
def smart_translate(msgid, domain=None, mapping=None, context=None,
target_language=None, default=None):
""" target_language is expected to be the http accept-language header
"""
if msgid is None:
return
if target_language is not None:
return translate(
msgid, domain=domain, mapping=mapping, context=target_language,
target_language=None, default=default)
if isinstance(msgid, Message):
default = msgid.default
mapping = msgid.mapping
if default is None:
default = unicode(msgid)
if not isinstance(default, basestring):
return default
return interpolate(default, mapping)
class SmartI18nPageTemplateFile(template.PageTemplateFile):
translate = staticmethod(smart_translate)
class SmartI18nPageTextTemplateFile(template.PageTextTemplateFile):
translate = staticmethod(smart_translate)
TemplateLoader.formats = {
"xml" : SmartI18nPageTemplateFile,
"text" : SmartI18nPageTextTemplateFile,
} | StarcoderdataPython |
1692202 | <gh_stars>1-10
def xm_version():
return {
"major": 2,
"minor": 1,
"alter": 5,
"build": 0,
}
| StarcoderdataPython |
170515 | <gh_stars>0
def read_spreadsheet():
file_name = "Data/day2.txt"
file = open(file_name, "r")
spreadsheet = []
for line in file:
line = list(map(int, line.split()))
spreadsheet.append(line)
return spreadsheet
def checksum(spreadsheet):
total = 0
for row in spreadsheet:
total += max(row) - min(row)
print(f"Part one: {total}")
def divisible_checksum(spreadsheet):
total = 0
for row in spreadsheet:
for i in range(len(row)-1):
for j in range(i+1, len(row)):
if row[i]%row[j] == 0:
total += row[i]//row[j]
if row[j]%row[i] == 0:
total += row[j]//row[i]
print(f"Part two: {total}")
if __name__ == "__main__":
spreadsheet = read_spreadsheet()
checksum(spreadsheet)
divisible_checksum(spreadsheet)
| StarcoderdataPython |
3256058 | <filename>kokobot/cogs/random.py
import asyncio
import logging
import random as rng
import typing
import discord
from discord.ext import commands
from discord.ext.commands.errors import BadArgument
logger = logging.getLogger('discord.kokobot.random')
emoji_bank = {
':regional_indicator_j:': '\U0001F1EF',
':regional_indicator_o:': '\U0001F1F4',
':regional_indicator_i:': '\U0001F1EE',
':regional_indicator_n:': '\U0001F1F3',
':twisted_rightwards_arrows:': '\U0001F500',
':octagonal_sign:': '\U0001F6D1',
}
class Random(commands.Cog):
def __init__(self, bot):
# config
self.config = {
'max_groups': 5,
}
self.bot = bot
self.owner = self.bot.get_user(self.bot.owner_id)
rng.seed()
self.messages = {}
self.bot.add_listener(self.on_ready, 'on_ready')
self.bot.add_listener(self.react, 'on_reaction_add')
self.bot.add_listener(self.unreact, 'on_reaction_remove')
def __str__(self):
return 'kokobot.cogs.Random'
async def on_ready(self):
self.owner = self.bot.get_user(self.bot.owner_id)
async def clear_message(self, message, future):
await asyncio.sleep(60)
if not future.cancelled():
if message.id in self.messages:
self.messages.pop(message.id)
await message.clear_reactions()
embed = message.embeds[0]
embed.description += "\n\nMixer has stopped."
await message.edit(embed=embed)
async def react(self, reaction, user):
if (user == self.bot.user
or not reaction.message.id in self.messages):
return
message = reaction.message
if reaction.emoji == emoji_bank[':twisted_rightwards_arrows:']:
# Shuffle
await reaction.remove(user)
if len(self.messages[message.id]['people']) == 0:
self.messages[message.id]['groups_list'] = None
else:
self.messages[message.id]['groups_list'] = []
for _ in range(self.messages[message.id]['groups']):
self.messages[message.id]['groups_list'].append([])
shuffle = list(self.messages[message.id]['people'])
rng.shuffle(shuffle)
for i, p in enumerate(shuffle):
g = i % self.messages[message.id]['groups']
self.messages[message.id]['groups_list'][g].append(p)
await self.mixer_display(self.messages[message.id]['message'],
self.messages[message.id]['people'],
self.messages[message.id]['groups'],
self.messages[message.id]['groups_list'])
elif reaction.emoji == emoji_bank[':octagonal_sign:']:
# Stop
await reaction.remove(user)
if user == self.messages[message.id]['owner']:
if 'future' in self.messages[message.id]:
self.messages[message.id]['future'].cancel()
if message.id in self.messages:
self.messages.pop(message.id)
await message.clear_reactions()
embed = message.embeds[0]
embed.description += "\n\nMixer has stopped."
await message.edit(embed=embed)
else:
# Add user
if not user in self.messages[message.id]['people']:
self.messages[message.id]['people'].add(user)
await self.mixer_display(self.messages[message.id]['message'],
self.messages[message.id]['people'],
self.messages[message.id]['groups'],
self.messages[message.id]['groups_list'])
async def unreact(self, reaction, user):
if (user == self.bot.user
or not reaction.message.id in self.messages):
return
if (reaction.emoji != emoji_bank[':twisted_rightwards_arrows:']
and reaction.emoji != emoji_bank[':octagonal_sign:']):
users = set()
message = reaction.message
for reaction in message.reactions:
async for user in reaction.users():
if user != self.bot.user:
users.add(user)
remove = []
for p in self.messages[message.id]['people']:
if not p in users:
remove.append(p)
for p in remove:
if p in self.messages[message.id]['people']:
self.messages[message.id]['people'].remove(p)
if remove:
await self.mixer_display(self.messages[message.id]['message'],
self.messages[message.id]['people'],
self.messages[message.id]['groups'],
self.messages[message.id]['groups_list'])
@commands.group()
async def random(self, ctx):
""" -- Random
Anything that's random. Well, it's actually pseudo-random but okay.
Use $help random <command> for more information.
"""
if ctx.invoked_subcommand is None:
await ctx.send('Use `$help random` for more information')
@random.command()
async def mixer(self, ctx, groups=2):
""" -- Randomize into groups
Usage: $random mixer [groups]
Example: $random mixer 3
By default, mix into 2 groups.
Only the owner of the mixer may shuffle and stop the mixer.
"""
if groups < 2:
await ctx.send('Invalid number of groups to mix.')
return
if groups > self.config['max_groups']:
await ctx.send('Cannot mix more than {} groups.'.format(self.config['max_groups']))
return
# Create the embed
title = f"Random Mixer for {groups} Groups"
r = rng.randint(0, 255)
b = rng.randint(0, 255)
g = rng.randint(0, 255)
colour = r*(16**4) + b*(16**2) + g
embed = discord.Embed(title=title, description="Initializing...", colour=colour)
embed.set_author(name=ctx.message.author, icon_url=ctx.message.author.avatar_url)
message = await ctx.send(embed=embed)
# Add reactions
await message.add_reaction(emoji_bank[':regional_indicator_j:'])
await message.add_reaction(emoji_bank[':regional_indicator_o:'])
await message.add_reaction(emoji_bank[':regional_indicator_i:'])
await message.add_reaction(emoji_bank[':regional_indicator_n:'])
await message.add_reaction(emoji_bank[':twisted_rightwards_arrows:'])
await message.add_reaction(emoji_bank[':octagonal_sign:'])
# Add to messages
self.messages[message.id] = {}
self.messages[message.id]['message'] = message
self.messages[message.id]['people'] = set()
self.messages[message.id]['groups'] = groups
self.messages[message.id]['groups_list'] = None
self.messages[message.id]['owner'] = ctx.message.author
logger.info("Created a random mixer for {}".format(ctx.message.author))
# Display and enqueue future
await self.mixer_display(self.messages[message.id]['message'],
self.messages[message.id]['people'],
self.messages[message.id]['groups'],
self.messages[message.id]['groups_list'])
async def mixer_display(self, message, people, groups, groups_list):
if 'future' in self.messages[message.id]:
self.messages[message.id]['future'].cancel()
embed = message.embeds[0]
desc = f"React on {emoji_bank[':regional_indicator_j:']} {emoji_bank[':regional_indicator_o:']} {emoji_bank[':regional_indicator_i:']} {emoji_bank[':regional_indicator_n:']} below to join, unreact to leave.\n"
desc += f"Click {emoji_bank[':twisted_rightwards_arrows:']} to shuffle, {emoji_bank[':octagonal_sign:']} to stop.\n\n"
# Add people
if len(people) > 0:
desc += "**People in this mixer:**\n"
for p in people:
desc += f"> {p}\n"
desc += "\n"
# Groups
if not groups_list is None:
for i in range(groups):
if len(groups_list[i]) > 0:
desc += f"**Group {i+1}**\n"
for p in groups_list[i]:
desc += f"> {p}\n"
desc += "\n"
embed.description = desc
await message.edit(embed=embed)
future = asyncio.Future()
self.messages[message.id]['future'] = future
await self.clear_message(self.messages[message.id]['message'],
future)
@random.command()
async def number(self, ctx, from_num: int=0, to_num: int=100):
""" -- Random number generator
Usage: $random number [from_num] [to_num]
Example: $random number 20 30
Gives a number in the range [from_num, to_num], inclusive.
Same as '$rng' command.
"""
if from_num > to_num:
await ctx.send('Invalid range.')
else:
await ctx.send('{}'.format(rng.randint(from_num, to_num)))
@number.error
async def number_error(self, ctx, error):
if isinstance(error, BadArgument):
await ctx.send('Invalid arguments for `$random number`. Use `$help random number` for more information.')
else:
logger.info('Random number got system error: {}'.format(error))
await ctx.send('Bot error, {} pls fix!'.format(self.owner.mention))
@commands.command()
async def rng(self, ctx, from_num: int=0, to_num: int=100):
""" -- Random number generator
Usage: $rng [from_num] [to_num]
Example: $rng 20 30
Gives a number in the range [from_num, to_num], inclusive.
Same as '$random number' command.
"""
await self.number(ctx, from_num, to_num)
@rng.error
async def rng_error(self, ctx, error):
if isinstance(error, BadArgument):
await ctx.send('Invalid arguments for `$rng`. Use `$help rng` for more information.')
else:
logger.info('Random number got system error: {}'.format(error))
await ctx.send('Bot error, {} pls fix!'.format(self.owner.mention))
| StarcoderdataPython |
3298892 | <reponame>LiteID/LiteID.github.io
import sys
import re
if len(sys.argv) != 2:
print "Usage:\n\tpython \"navigation-menu-gen.py\" <filename>"
exit(1)
f = open(sys.argv[1], 'r')
file = f.read()
f.close()
f = open(sys.argv[1], 'w')
try:
f.write(file.split('menu: | ')[0]+'menu: | \n')
for m in re.finditer(r"\n[#]+[ ]*(.*?)[ ]*\n", file):
if '[' in m.group(1):
m = re.match(r"\[(.*?)\]", m.group(1))
link = m.group(1).strip().replace(' ', '-').lower()
text = m.group(1).strip()
f.write(" <a href=\"#{}\" style=\"margin-bottom:1px\">{}</a><br>\n".format(link, text))
f.write('---'+file.split('---', 2)[-1])
except:
f.close()
f = open(sys.argv[1], 'w')
f.write(file)
f.close()
raise | StarcoderdataPython |
162499 | <gh_stars>1-10
import numpy
def is_positive_semidefinite(matrix: numpy.array) -> bool:
"""Check whether a matrix is positive semi-definite or not
Attempt to compute the Cholesky decomposition of the matrix, if this fails
then the matrix is not positive semidefinite.
Parameters
----------
matrix : numpy.array
A matrix
Returns
-------
bool
True if the matrix is positive semidefinite, else False
References
----------
.. https://stackoverflow.com/questions/16266720
"""
try:
numpy.linalg.cholesky(matrix)
return True
except numpy.linalg.LinAlgError:
return False
| StarcoderdataPython |
14727 | import getpass
# prompt user without echoing output
print getpass.getpass()
print getpass.getpass(prompt="Custom Prompt:")
print "user login name:", getpass.getuser()
| StarcoderdataPython |
3372399 | """Модуль получения абстрактных моделей, содержащихся в уведомлениях приложения."""
from typing import Type
from django.apps import apps
from .mailing import AbstractMailing
from .notification import AbstractNotice, AbstractNotification
from ..settings import notifications_settings
class Notice(AbstractNotice):
"""Модель хранения уведомлений."""
class Meta(AbstractNotice.Meta):
"""Мета-класс модели хранения уведомлений."""
pass
def get_notice_model() -> Type[AbstractNotice]:
"""Функция получения модели хранения уведомлений."""
return apps.get_model(notifications_settings.NOTICE_MODEL)
class Notification(AbstractNotification):
"""Модель хранения рассылаемых уведомлений."""
class Meta(AbstractNotification.Meta):
"""Мета-класс хранения рассылаемых уведомлений."""
pass
def get_notification_model() -> Type[AbstractNotification]:
"""Функция получения модель хранения рассылок уведомлений."""
return apps.get_model(notifications_settings.NOTIFICATION_MODEL)
class Mailing(AbstractMailing):
"""Модель хранения отправленных электронных писем."""
class Meta(AbstractMailing.Meta):
"""Мета-класс хранения отправленных электронных писем."""
pass
def get_mailing_model() -> Type[AbstractMailing]:
"""Функция получения модели отправленных электронных писем."""
return apps.get_model(notifications_settings.MAILING_MODEL)
| StarcoderdataPython |
1623966 | Given an integer array nums, move all the even integers at the beginning of the array followed by all the odd integers.
Return any array that satisfies this condition.
Example 1:
Input: nums = [3,1,2,4]
Output: [2,4,3,1]
Explanation: The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Example 2:
Input: nums = [0]
Output: [0]
Constraints:
1 <= nums.length <= 5000
0 <= nums[i] <= 5000
Solution:-
class Solution:
def sortArrayByParity(self, nums: List[int]) -> List[int]:
eve=[]
od=[]
for i in nums:
if i%2==0:
eve.append(i)
else:
od.append(i)
return eve+od
| StarcoderdataPython |
75106 | ALL = 'All servers'
def caller_check(servers = ALL):
def func_wrapper(func):
# TODO: To be implemented. Could get current_app and check it. Useful for anything?
return func
return func_wrapper
| StarcoderdataPython |
1750033 | <reponame>Gordonei/pyepd
# PyEPD
# <NAME> (<EMAIL>)
# November 2017
import numpy
from PIL import Image
from contextlib import contextmanager
@contextmanager
def acquire_and_normalise(filename, display_panel_controller, background_colour=-1, rotate_count=0):
"""
Reads in input image, and converts to correct size. Adds a border if necessary.
:param filename: path to image file to use
:param display_panel_controller: display panel controller that is being used.
:param background_colour: default background colour value to use (0 is black, 255 is white, -1 is median).
:param rotate_count: rotate image by specified number of 90° rotations
:return: PIL image object
"""
x_res = display_panel_controller.x_res
y_res = display_panel_controller.y_res
size = (x_res, y_res)
with Image.open(filename) as img:
img = img.rotate(90 * rotate_count, expand=True)
# Coping with images of a different size
if img.size[0] != x_res or img.size[1] != y_res:
# Resizing
img.thumbnail(size, Image.ANTIALIAS)
# finding the median background colour
if background_colour is -1:
img_data = numpy.asarray(img)
background_colours = tuple(
numpy.median(img_data, axis=(0, 1))
.astype(numpy.uint8)
)
else:
background_colours = (background_colour, background_colour, background_colour)
# Centering the image, and adding a border
background = Image.new('RGB', size, background_colours)
background.paste(img,
((x_res - img.size[0]) // 2, (y_res - img.size[1]) // 2)
)
img = background
yield img
def convert(image):
"""
Convert image data to 1-bit colour depth data array. Also inverts the image.
:param image: PIL Image object
:return: 1-bit image data array
"""
# converts to 1-bit colour and dithers
data = image.convert('1').getdata()
# Converting to numpy array
data = numpy.asarray(data)
# Inverting the colours
data ^= 255
return data
| StarcoderdataPython |
106766 | <reponame>diCagri/content
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import os
import os.path
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get('proxy'):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBALS/PARAMS '''
USERNAME = demisto.params().get('credentials').get('identifier')
PASSWORD = demisto.params().get('credentials').get('password')
SERVER_URL = demisto.params().get('server')[:-1] if demisto.params().get('server').endswith('/') else \
demisto.params().get('server')
FETCH_TIME = demisto.params().get('fetch_time', '3 days').strip()
FETCH_NOTIFICATIONS = demisto.params().get('fetch_notifications')
FETCH_BEHAVIORS = demisto.params().get('fetch_behviors')
# Should we use SSL
USE_SSL = not demisto.params().get('unsecure', False)
# Service base URL
BASE_PATH = '{}/api/v2/'.format(SERVER_URL)
# Headers to be sent in requests
DEFAULT_HEADERS = {
'Content-Type': 'application/json'
}
def http_request(method, suffix_url, headers=DEFAULT_HEADERS, body=None):
"""
returns the http request
"""
url = BASE_PATH + suffix_url
response = requests.request(
method,
url,
auth=(USERNAME, PASSWORD),
headers=headers,
verify=USE_SSL,
data=body
)
# handle request failure
if response.status_code not in {200}:
message = parse_error_response(response)
return_error('Error in API call to CounterTack with status code {}\n{}'.format(response.status_code, message))
try:
response = response.json()
except Exception:
return_error(response.content)
return response
def parse_error_response(response):
try:
res = response.json()
msg = res.get('message')
if res.get('details') is not None and res.get('details')[0].get('message') is not None:
msg = msg + "\n" + json.dumps(res.get('details')[0])
except Exception:
return response.text
return msg
"""
ENDPOINTS
"""
def get_endpoints_request():
"""
This request returns a collection of endpoints.
"""
suffix_url = 'endpoints'
response = http_request('GET', suffix_url)
return response
def get_endpoints():
"""
Returns the information on existing endpoints
"""
data = []
endpoint_standards = []
endpoints = get_endpoints_request()
for endpoint in endpoints:
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(endpoints,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Name', 'Threat', 'Status', 'Id', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': endpoints,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_endpoint_request(endpoint_id):
"""
Request for a specific endpoint
"""
suffix_url = 'endpoints/' + endpoint_id
response = http_request('GET', suffix_url)
return response
def get_endpoint():
"""
Get the information for the requested endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
returns:
The information about the specified endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = get_endpoint_request(endpoint_id)
content = {
'OS': response.get('product_name'),
'Domain': response.get('domain'),
'IP': response.get('ip'),
'Threat': response.get('threat'),
'MaxImpact': response.get('max_impact'),
'TenantID': response.get('tenant'),
'IsQuarantined': response.get('is_quarantined'),
'Profile': response.get('current_profile'),
'Cluster_hosts': response.get('cluster_hosts'),
'Status': response.get('status'),
'Tags': response.get('tags')
}
endpoint_standards = {
'Id': response.get('id'),
'IPAddress': response.get('ips'),
'Domain': response.get('domain'),
'MACAddress': response.get('mac'),
'OS': response.get('product_name'),
'OSVersion': response.get('driver_version'),
'Model': response.get('current_profile'),
'Memory': response.get('memory'),
'Processors': response.get('num_cpus')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase),
'Endpoint': endpoint_standards
}
headers = ['OS', 'Domain', 'IP', 'Threat', 'MaxImpact', 'TenantID', 'IsQuarantined',
'Profile', 'Tags', 'Cluster_Hosts', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoint information:', content, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS TAGS
"""
def endpoint_tags_request(endpoint_id):
"""
This request retrieves tags from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('GET', suffix_url)
return response
def get_endpoint_tags():
"""
Get the tags for the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response
}
tags_context = {
'Id': endpoint_id,
'tags': response
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(tags_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack tags for the specified endpoint:', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def add_tags_request(endpoint_id, body):
"""
The request adds tags to specified endpoint
The request gets the endpoint ID and the tags the user wants to add.
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_tags():
"""
The command add tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to add to the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = add_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Endpoint tags were added successfully", response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_request(endpoint_id, body):
"""
This request deletes specific tags from specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_tags():
"""
The command deletes tags for the specified endpoint.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (array) body
The tags to delete from the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_request(endpoint_id, body)
response = endpoint_tags_request(endpoint_id)
response = {
'tags': response,
'Id': endpoint_id
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'Endpoint tags were deleted successfully', response),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINTS COMMANDS
"""
def endpoint_quarantine_request(endpoint_id, body):
"""
Request to quarantine a specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def endpoint_quarantine():
"""
Prevents an endpoint(s) from any network communication, but maintains a connection to the Sentinel Cluster
and addresses defined in the Global Whitelist.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def disable_quarantine():
"""
Allows a previously quarantined endpoint to communicate with the network.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) type
The type of the command: lift_quarantine
"""
endpoint_id = demisto.args().get('endpoint_id')
body = {
'type': 'lift_quarantine'
}
response = endpoint_quarantine_request(endpoint_id, body)
quarantine_response = get_endpoint_request(endpoint_id)
quarantine_context = {
'Id': endpoint_id,
'is_quarantine': quarantine_response.get('is_quarantined')
}
data = {
'Id': response.get('id'),
'user name': response.get('username'),
'request time': response.get('request_time'),
'endpoint ID': response.get('endpoint_ids'),
'command name': response.get('command_name'),
'status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(quarantine_context,
keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': quarantine_context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('The command has been applied successfully:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_extract_request(endpoint_id, body):
"""
Request for extracting file from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def extract_file():
"""
Enables an API consumer to extract the file in addition to some file metadata.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: extract file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
paths = argToList(demisto.args().get('file_path'))
body = {
'type': 'extract_files',
'paths': paths
}
response = file_extract_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Command Arguments': response.get('command_arg'),
'Status': response.get('status'),
}
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Id', 'User Name', 'Request Time', 'Endpoint ID', 'Command Name', 'Command Arguments', 'Status']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The file has been extracted successfully:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def delete_file_request(endpoint_id, body):
"""
Deletes a file from the specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def delete_file():
"""
Deletes a file from the specified endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) body
The type of the command: delete_file and the file path
"""
endpoint_id = demisto.args().get('endpoint_id')
path = demisto.args().get('file_path')
body = {
'type': 'delete_file',
'path': path
}
delete_file_request(endpoint_id, body)
demisto.results('The file has been deleted successfully')
def kill_process_request(endpoint_id, body):
"""
Reqquest to terminates all instances of the process identified in the command.
"""
suffix_url = 'endpoints/' + endpoint_id + '/commands'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def kill_process():
"""
Terminates all instances of the process identified in the command.
Processes can be identified by the PID or process name.
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
demisto parameter: (string) process_id
The ID of the process to terminate
demisto parameter: (string) process_name
The name of the process to terminate
"""
endpoint_id = demisto.args().get('endpoint_id')
pid = demisto.args().get('process_id')
name = demisto.args().get('process_name')
if not pid and not name:
return_error('Please provide either process_id or process_name')
body = {
'type': 'kill_process',
'pid': pid,
'name': name
}
response = kill_process_request(endpoint_id, body)
data = {
'Id': response.get('id'),
'User Name': response.get('username'),
'Request Time': response.get('request_time'),
'Endpoint ID': response.get('endpoint_ids'),
'Command Name': response.get('command_name'),
'Status': response.get('status'),
}
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(response,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The process has been terminated', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
ENDPOINT FILES
"""
def file_request():
"""
This request retrieves all extracted files for all endpoints on the cluster
"""
suffix_url = 'endpoints/files'
response = http_request('GET', suffix_url)
return response
def get_all_files():
data = []
files_standards = []
files = file_request()
for file in files:
data.append({
'Id': file.get('id'),
'user': file.get('user'),
'endpoint_id': file.get('endpoint_id'),
'path': file.get('path'),
'extraction_time': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': files,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'CounterTack Endpoints Files', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def endpoint_files_request(endpoint_id):
"""
This request returns all extracted files from specified endpoint
"""
suffix_url = 'endpoints/' + endpoint_id + '/files'
response = http_request('GET', suffix_url)
return response
def get_endpoint_files():
"""
Returns extracted files from specific endpoint
demisto parameter: (string) endpoint_id
The unique ID of the endpoint
"""
endpoint_id = demisto.args().get('endpoint_id')
data = []
files_standards = []
files = endpoint_files_request(endpoint_id)
for file in files:
data.append({
'Id': file.get('id'),
'User': file.get('user'),
'EndpointId': file.get('endpoint_id'),
'Path': file.get('path'),
'ExtractionTime': file.get('extraction_time'),
'Status': file.get('status')
})
files_standards.append({
'Size': file.get('size'),
'MD5': file.get('md5'),
'SHA256': file.get('sha256'),
'SSDeep': file.get('ssdeep'),
'Path': file.get('path')
})
context = {
'CounterTack.File(val.Id && val.Id === obj.Id)': createContext(files, keyTransform=underscoreToCamelCase),
outputPaths['file']: files_standards
}
headers = ['Status', 'Id', 'path', 'endpoint_id', 'extraction_time', 'user']
entry = {
'Type': entryTypes['note'],
'Contents': data,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(
'The extracted files from the endpoint:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def file_information_request(file_id):
"""
request specific file information
"""
suffix_url = 'endpoints/files/' + file_id
response = http_request('GET', suffix_url)
return response
def get_file_information():
"""
Get the information of a specific file
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
context = {}
files_standards = []
file_id = demisto.args().get('file_id')
response = file_information_request(file_id)
data = {
'endpoint_name': response.get('endpoint_name'),
'path': response.get('path'),
'size': response.get('size'),
'extraction_time': response.get('extraction_time'),
'status': response.get('status')
}
files_standards.append({
'Size': response.get('size'),
'MD5': response.get('md5'),
'SHA256': response.get('sha256'),
'SSDeep': response.get('ssdeep'),
'Path': response.get('path')
})
context['CounterTack.File(val.Id && val.Id === obj.Id)'] = createContext(response,
keyTransform=underscoreToCamelCase)
context[outputPaths['file']] = files_standards
headers = ['endpoint_name', 'path', 'size', 'status', 'extraction_time']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack File Information:', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def download_file_request(file_id):
# This request downloads an extracted file.
suffix_url = 'downloads/extractedfiles/' + file_id
response = http_request('GET', suffix_url)
return response
def download_file():
"""
Download an extracted file in a ZIP format.
demisto parameter: (string) file_id
The unique ID of the extracted file
"""
file_id = demisto.args().get('file_id')
response = download_file_request(file_id)
demisto.results(fileResult(file_id + '.zip', response.content))
"""
BEHAVIORS
"""
def get_behaviors_request():
"""
This request retrieves information on a collection of behaviors.
"""
suffix_url = 'behaviors'
response = http_request('GET', suffix_url)
return response
def get_behaviors():
"""
retrieve information on a collection of behaviors.
"""
data = []
behaviors = get_behaviors_request()
for behavior in behaviors:
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'ImpactLevel': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointId': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(behaviors,
keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'Type', 'ImpactLevel', 'EndpointId', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': behaviors,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Endpoints Behaviors', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
def get_behavior_request(behavior_id):
"""
Request for getting specified behvior
"""
suffix_url = 'behaviors/' + behavior_id
response = http_request('GET', suffix_url)
return response
def get_behavior():
"""
Get behavior information
demisto parameter: behavior_id(string)
The unique ID of the behvior
"""
behavior_id = demisto.args().get('behavior_id')
response = get_behavior_request(behavior_id)
data = {
'Id': response.get('id'),
'Name': response.get('name'),
'ImpactLevel': response.get('impact_level'),
'LastActive': response.get('last_active'),
'EventCount': response.get('event_count'),
'MaxImpact': response.get('max_impact'),
'EndpointId': response.get('endpoint_id'),
'Type': response.get('type'),
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
headers = ['Name', 'Id', 'ImpactLevel', 'MaxImpact', 'EventCount', 'Type', 'EndpointId', 'LastActive']
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('CounterTack Behavior information', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
BEHAVIORS TAGS
"""
def behaviour_add_tags_request(behaviour_id, body):
"""
The request adds tags to specified behaviour
"""
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('POST', suffix_url, body=json.dumps(body))
return response
def add_behavior_tags():
"""
Add specific tags to specified behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to add to the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = behaviour_add_tags_request(behaviour_id, body)
behavior_tags = get_behavior_request(behaviour_id)
response = {
'tags': behavior_tags.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Behavior tags were added successfully', response),
'EntryContext': context
}
demisto.results(entry)
def delete_tags_behavior_request(behaviour_id, body):
suffix_url = 'behaviors/' + behaviour_id + '/tags'
response = http_request('DELETE', suffix_url, body=json.dumps(body))
return response
def delete_behavior_tags():
"""
Delete specific tags from behavior
demisto parameter: (string) behavior_id
The unique ID of the behavior
demisto parameter: (Array) Body.
The tags to delete from the behavior. seperate the tags with comma
"""
behaviour_id = demisto.args().get('behaviour_id')
body = argToList(demisto.args().get('tags'))
response = delete_tags_behavior_request(behaviour_id, body)
response = get_behavior_request(behaviour_id)
response = {
'tags': response.get('tags'),
'Id': behaviour_id
}
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(response, keyTransform=underscoreToCamelCase)
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Endpoint tags were deleted successfully', response, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
"""
SEARCH
"""
def search_endpoints_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/endpoints' + exp
response = http_request('GET', suffix_url)
return response
def search_behaviors_request(exp):
"""
Request for endpoints search using CQL expression
"""
suffix_url = 'search/behaviors' + exp
response = http_request('GET', suffix_url)
return response
def search_events_request(exp):
"""
Request for events search using CQL expression
"""
suffix_url = 'search/events' + exp
response = http_request('GET', suffix_url)
return response
def search_events():
"""
Request for events search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
events = search_events_request(exp)
if events.get('results'):
results = events.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('events.', ''): v for k, v in results[i].items()})
events['results'] = results_lst
for event in events.get('results'):
data.append({
'Id': event.get('id'),
'Events Action': event.get('action'),
'Events Impact': event.get('impact'),
'Events EndpointID': event.get('endpoint_id'),
'Event Type': event.get('event_type'),
'Collected time': event.get('time_stamp'),
'Source process PID': event.get('source_process_pid'),
'Source process name': event.get('source_process_name')
})
context = {
'CounterTack.Event(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['ID', 'Event Type', 'Events Action', 'Events EndpointID', 'Events Impact',
'Collected time', 'Source process PID', 'Source process name']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the events search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_endpoints():
"""
Request for endpoints search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
endpoint_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
endpoints = search_endpoints_request(exp)
if endpoints.get('results'):
results = endpoints.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('endpoints.', ''): v for k, v in results[i].items()})
endpoints['results'] = results_lst
for endpoint in endpoints.get('results'):
data.append({
'Id': endpoint.get('id'),
'Name': endpoint.get('name'),
'OS': endpoint.get('product_name'),
'IP': endpoint.get('ips'),
'Status': endpoint.get('status'),
'Threat': endpoint.get('threat')
})
endpoint_standards.append({
'Id': endpoint.get('id'),
'IPAddress': endpoint.get('ips'),
'Domain': endpoint.get('domain'),
'MACAddress': endpoint.get('mac'),
'OS': endpoint.get('product_name'),
'OSVersion': endpoint.get('driver_version'),
'Model': endpoint.get('current_profile'),
'Memory': endpoint.get('memory'),
'Processors': endpoint.get('num_cpus')
})
context = {
'CounterTack.Endpoint(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True),
'Endpoint': endpoint_standards
}
headers = ['Status', 'Name', 'Id', 'OS', 'Events Impact', 'Threat', 'IP']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the endpoints search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def search_behaviors():
"""
Request for behaviors search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
behaviors = search_behaviors_request(exp)
if behaviors.get('results'):
results = behaviors.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('behaviors.', ''): v for k, v in results[i].items()})
behaviors['results'] = results_lst
for behavior in behaviors.get('results'):
data.append({
'Id': behavior.get('id'),
'Name': behavior.get('name'),
'Type': behavior.get('type'),
'Impact_Level': behavior.get('impact_level'),
'lastReported': behavior.get('last_reported'),
'EndpointID': behavior.get('endpoint_id')
})
context = {
'CounterTack.Behavior(val.Id && val.Id === obj.Id)': createContext(results_lst,
keyTransform=underscoreToCamelCase,
removeNull=True)
}
headers = ['Name', 'Type', 'Impact_Level', 'Id', 'EndpointID', 'lastReported']
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the behaviors search', data, headers, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
def hashes_search_request(exp):
"""
Request for Hashed search using CQL expression
"""
suffix_url = 'search/hashes' + exp
response = http_request('GET', suffix_url)
return response
def search_hashes():
"""
Request for hashes search using CQL expression
demisto parameter: (dict) expression
The CQL expression to be used for the search
"""
data = []
file_standards = []
expression = demisto.args().get('expression')
exp = '?expression=' + expression
hashes = hashes_search_request(exp)
if hashes.get('results'):
results = hashes.get('results')
results_lst = list()
for i in range(len(results)):
results_lst.append({k.replace('hashes.', ''): v for k, v in results[i].items()})
hashes['results'] = results_lst
for hash_type in hashes.get('results'):
file_hash_type = hash_type.get('type', '').upper()
if file_hash_type == 'SSDEEP':
file_hash_type = 'SSDeep'
hash_id = hash_type.get('id')
data.append({
file_hash_type: hash_id,
'Type': file_hash_type,
'Impact': hash_type.get('impact'),
'VT report location': hash_type.get('vt_report_location'),
'AV Coverage': hash_type.get('av_coverage')
})
if file_hash_type:
file_standards.append({
file_hash_type: hash_id
})
context = {
'CounterTack.Hash(val.hash_id && val.hash_id === obj.hash_id)': createContext(data),
outputPaths['file']: file_standards
}
entry = {
'Type': entryTypes['note'],
'Contents': results_lst,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Results of the hashes search:', data, removeNull=True),
'EntryContext': context
}
demisto.results(entry)
else:
demisto.results('No results found')
"""
FETCH INCIDENTS
"""
def search_notifications_request(params=''):
"""
Request for notifications search using CQL expression
"""
suffix_url = 'search/notifications?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_behaviors_request(params=''):
"""
Request for behaviors search using CQL expression
"""
suffix_url = 'search/behaviors?expression=' + params
response = http_request('GET', suffix_url)
return response
def fetch_incidents():
incidents = []
last_run = demisto.getLastRun()
if last_run and last_run['time_stamp']:
last_update_time = last_run['time_stamp']
else:
# In first run
last_update_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%S.%f'[:-3])
max_timestamp = last_update_time
if FETCH_BEHAVIORS:
params = 'behaviors.time_stamp>' + last_update_time
behaviors = fetch_behaviors_request(params)
for behavior in behaviors.get('results'):
incident = behavior_to_incident(behavior)
# 0 corresponds to never triggered
time_stamp = behavior.get('behaviors.time_stamp')[:-5] # comapre time_stamp
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
if FETCH_NOTIFICATIONS:
params = 'notifications.time_stamp>' + last_update_time
notifications = search_notifications_request(params)
for notification in notifications.get('results'):
incident = notifications_to_incidents(notification)
time_stamp = notification.get('notifications.time_stamp')[:-5]
if time_stamp > max_timestamp:
max_timestamp = time_stamp
incidents.append(incident)
demisto.setLastRun({
'time_stamp': max_timestamp
})
demisto.incidents(incidents)
def behavior_to_incident(behavior):
incident = {}
incident['name'] = 'CounterTack Behavior - ' + behavior.get('behaviors.name')
incident['rawJSON'] = json.dumps(behavior)
return incident
def notifications_to_incidents(notification):
incident = {}
incident['name'] = 'CounterTack Notification - ' + notification.get('notifications.message')
incident['rawJSON'] = json.dumps(notification)
return incident
"""
EXECUTION
"""
command = demisto.command()
LOG('Running command "{}"'.format(command))
try:
if command == 'test-module':
get_endpoints_request()
demisto.results('ok')
elif command == 'fetch-incidents':
fetch_incidents()
elif command == 'countertack-get-endpoints':
get_endpoints()
elif command == 'countertack-get-endpoint':
get_endpoint()
elif command == 'countertack-get-endpoint-tags':
get_endpoint_tags()
elif command == 'countertack-add-tags':
add_tags()
elif command == 'countertack-delete-tags':
delete_tags()
elif command == 'countertack-endpoint-quarantine':
endpoint_quarantine()
elif command == 'countertack-disable-quarantine':
disable_quarantine()
elif command == 'countertack-extract-file':
extract_file()
elif command == 'countertack-delete-file':
delete_file()
elif command == 'countertack-get-all-files':
get_all_files()
elif command == 'countertack-get-endpoint-files':
get_endpoint_files()
elif command == 'countertack-get-file-information':
get_file_information()
elif command == 'countertack-download-file':
download_file()
elif command == 'countertack-get-behaviors':
get_behaviors()
elif command == 'countertack-get-behavior':
get_behavior()
elif command == 'countertack-add-behavior-tags':
add_behavior_tags()
elif command == 'countertack-delete-behavior-tags':
delete_behavior_tags()
elif command == 'countertack-search-events':
search_events()
elif command == 'countertack-search-hashes':
search_hashes()
elif command == 'countertack-search-endpoints':
search_endpoints()
elif command == 'countertack-search-behaviors':
search_behaviors()
elif command == 'countertack-kill-process':
kill_process()
except Exception as e:
return_error(e.message)
LOG(e)
| StarcoderdataPython |
1709263 | <gh_stars>0
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for getting target pools."""
from googlecloudsdk.compute.lib import base_classes
class GetTargetPools(base_classes.RegionalGetter):
"""Get target pools."""
@staticmethod
def Args(parser):
base_classes.RegionalGetter.Args(parser)
base_classes.AddFieldsFlag(parser, 'targetPools')
@property
def service(self):
return self.context['compute'].targetPools
@property
def print_resource_type(self):
return 'targetPools'
GetTargetPools.detailed_help = {
'brief': 'Get Google Compute Engine target pools',
'DESCRIPTION': """\
*{command}* displays all data associated with Google Compute
Engine target pools in a project.
By default, target pools from all regions are fetched. The results can
be narrowed down by providing ``--region''.
""",
}
| StarcoderdataPython |
1762884 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Main dialog to welcome users."""
import json
import os.path
from typing import List
from botbuilder.dialogs import Dialog
from botbuilder.core import (
TurnContext,
ConversationState,
UserState,
BotTelemetryClient,
)
from botbuilder.schema import Activity, Attachment, ChannelAccount
from helpers.activity_helper import create_activity_reply
from .dialog_bot import DialogBot
class DialogAndWelcomeBot(DialogBot):
"""Main dialog to welcome users."""
def __init__(
self,
conversation_state: ConversationState,
user_state: UserState,
dialog: Dialog,
telemetry_client: BotTelemetryClient,
):
super(DialogAndWelcomeBot, self).__init__(
conversation_state, user_state, dialog, telemetry_client
)
self.telemetry_client = telemetry_client
async def on_members_added_activity(
self, members_added: List[ChannelAccount], turn_context: TurnContext
):
for member in members_added:
# Greet anyone that was not the target (recipient) of this message.
# To learn more about Adaptive Cards, see https://aka.ms/msbot-adaptivecards
# for more details.
if member.id != turn_context.activity.recipient.id:
welcome_card = self.create_adaptive_card_attachment()
response = self.create_response(turn_context.activity, welcome_card)
await turn_context.send_activity(response)
def create_response(self, activity: Activity, attachment: Attachment):
"""Create an attachment message response."""
response = create_activity_reply(activity)
response.attachments = [attachment]
return response
# Load attachment from file.
def create_adaptive_card_attachment(self):
"""Create an adaptive card."""
relative_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(relative_path, "resources/welcomeCard.json")
with open(path) as card_file:
card = json.load(card_file)
return Attachment(
content_type="application/vnd.microsoft.card.adaptive", content=card
)
| StarcoderdataPython |
1627004 | <gh_stars>0
# -*- coding: utf-8 -*-
import torch
import matplotlib.pyplot as plt
import os.path as osp
def visualize_batch(
batch, labels=None, save_dir='', figname='figure', ncols=4, figsize=(4, 4)
):
"""Visualise training batch
batch (torch.Tensor): images of shape (B, C, H, W)
"""
nrows = batch.size(0) // ncols
assert nrows > 1
assert ncols > 1
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols, figsize=(ncols * figsize[0], nrows * figsize[1])
)
images = batch.detach().cpu()
images = unnormalize(images, use_gpu=False)
images = images.numpy().transpose((0, 2, 3, 1))
for r in range(nrows):
for c in range(ncols):
index = r * ncols + c
image = images[index]
ax[r, c].imshow(image)
if labels is not None:
ax[r, c].set_title('{}'.format(labels[index]))
ax[r, c].axis('off')
# Save figure
fig_path = osp.join(save_dir, figname + '.jpg')
fig.savefig(fig_path, format='jpg', dpi=100, bbox_inches='tight', facecolor='w')
plt.close(fig)
def unnormalize(
batch_image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], use_gpu=False
):
"""Reverse normalization applied to batch of images """
B = batch_image.shape[0]
H = batch_image.shape[2]
W = batch_image.shape[3]
t_mean = (
torch.FloatTensor(mean)
.view(3, 1, 1)
.expand(3, H, W)
.contiguous()
.view(1, 3, H, W)
)
t_std = (
torch.FloatTensor(std).view(3, 1, 1).expand(3, H, W).contiguous().view(1, 3, H, W)
)
if use_gpu:
t_mean = t_mean.cuda()
t_std = t_std.cuda()
batch_image_unnorm = batch_image * t_std.expand(B, 3, H, W) + t_mean.expand(
B, 3, H, W
)
return batch_image_unnorm
| StarcoderdataPython |
192320 | import os
import shutil
import numpy as np
from util import log_util
log = log_util.get_logger("file process")
def create_blank_file(file_name):
'''
create a blank file
:param file_name:
:return:
'''
with open(file_name, 'w') as wt:
wt.write("")
log.debug("blank file %s created.." % file_name)
def read_file_list_from_path(path, file_type=None, if_recursive=False):
'''
get all file list from path
:param path:
:param file_type:
:return:
'''
file_list = []
for file in os.listdir(path):
tmp_file = os.path.join(path, file)
if if_recursive:
if os.path.isfile(tmp_file):
if file_type:
if str(tmp_file).endswith(file_type):
file_list.append(tmp_file)
else:
file_list.append(tmp_file)
elif os.path.isdir(tmp_file):
file_list += read_file_list_from_path(tmp_file, file_type, if_recursive)
else:
if file_type is not None:
if file.endswith(file_type):
file_list.append(tmp_file)
else:
file_list.append(tmp_file)
file_list.sort()
return file_list
def read_file_by_line(filename):
'''
read every line of a file
:param filename: file to read
:return: list of line content in file
'''
line_list = []
with open(filename, 'r') as rd:
lines = rd.readlines()
for line in lines:
line = line.strip()
if len(line) < 1:
continue
line_list.append(line)
return line_list
def write2file(content, save_file):
'''
write content to file
:param content: content to write to file
:param save_file: where should content be written to
:return:
'''
with open(save_file, "w") as wt:
if isinstance(content, list):
for con in content:
wt.write(con + "\r")
else:
wt.write(content)
log.debug(" write content to %s " % save_file)
def copy_filepath(src_path, target_path):
'''
copy file from source to target
:param src_path:
:param target_path:
:return:
'''
shutil.copytree(src_path, target_path)
log.debug("copy directory from %s to %s finished.." % (src_path, target_path))
def del_path_list(path_list):
'''
delete file paths or file in list
:param path_list: file path (or file)list to be deleted
:return:
'''
for path in path_list:
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
elif os.path.isfile(path):
os.remove(path)
log.debug(" file path %s was deleted" % path)
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def create_path_list(path_list):
'''
create file path in list
:param path_list: file path to be created in list
:return:
'''
for path in path_list:
if not os.path.exists(path):
os.mkdir(path)
log.debug(" file path %s created " % path)
def read_binfile(filename, dim=60, dtype=np.float64):
'''
Reads binary file into numpy array.
'''
fid = open(filename, 'rb')
v_data = np.fromfile(fid, dtype=dtype)
fid.close()
if np.mod(v_data.size, dim) != 0:
raise ValueError('Dimension provided not compatible with file size.')
m_data = v_data.reshape((-1, dim)).astype('float64') # This is to keep compatibility with numpy default dtype.
m_data = np.squeeze(m_data)
return m_data
def write_binfile(m_data, filename, dtype=np.float64):
'''
Writes numpy array into binary file.
'''
m_data = np.array(m_data, dtype)
fid = open(filename, 'wb')
m_data.tofile(fid)
fid.close()
return
def array_to_binary_file(self, data, output_file_name):
data = np.array(data, 'float32')
fid = open(output_file_name, 'wb')
data.tofile(fid)
fid.close()
def load_binary_file_frame(self, file_name, dimension):
fid_lab = open(file_name, 'rb')
features = np.fromfile(fid_lab, dtype=np.float32)
fid_lab.close()
assert features.size % float(dimension) == 0.0, 'specified dimension not compatible with data'
frame_number = features.size / dimension
features = features[:(dimension * frame_number)]
features = features.reshape((-1, dimension))
return features, frame_number
if __name__ == "__main__":
list = read_file_list_from_path("D:/test", if_recursive=True)
print(list)
| StarcoderdataPython |
1778495 | #!/usr/bin/env python
"""
##############################################
Testing Package Reliability Growth Data Module
##############################################
"""
# -*- coding: utf-8 -*-
#
# rtk.testing.growth.Growth.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Import modules for localization support.
import gettext
import locale
# Import modules for mathematics.
from math import exp, log, sqrt
import numpy as np
from scipy.optimize import fsolve
from scipy.stats import chi2 # pylint: disable=E0611
# Import other RTK modules.
try:
import Configuration
import Utilities
import analyses.statistics.Bounds as Bounds
import analyses.statistics.growth.CrowAMSAA as CrowAMSAA
import analyses.statistics.growth.SPLAN as SPLAN
from testing.Testing import Model as Testing
from testing.Testing import Testing as dtcTesting
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
import rtk.analyses.statistics.Bounds as Bounds
import rtk.analyses.statistics.growth.CrowAMSAA as CrowAMSAA
import rtk.analyses.statistics.growth.SPLAN as SPLAN
from rtk.testing.Testing import Model as Testing
from rtk.testing.Testing import Testing as dtcTesting
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "Weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
def _gr(gr, mi, mf, ttt, t1):
"""
Function used to calculate the growth rate necessary to have the ideal
growth curve end at the final MTBF and to calculate the optimum growth rate
for a test phase.
"""
return (ttt / t1)**gr + (mf / mi) * (gr - 1.0)
def running_sum(values):
"""
Function used to calculate the running sum of values from a list.
:param list values: the list of values to calculate the running sum.
"""
_total = 0
for _item in values:
_total += _item
yield _total
class Model(Testing): # pylint: disable=R0902, R0904
"""
The Reliability Growth data model contains the attributes and methods for
planning and assessing a reliability growth test. The attributes of a
Reliability Growth model are:
:ivar dict dic_test_data: dictionary containing the test data for the
Growth data model. Key is an integer from 0 to
n records. Values are a list: [record_id,
failure date, left interval, right interval,
quantity]
:ivar list lst_p_growth_rate: list of planned growth rates per test phase.
:ivar list lst_p_ms: list of planned management strategies per test phase.
:ivar list lst_p_fef: list of planned fix effectiveness factors per test
phase.
:ivar list lst_p_prob: list of planned probabilities of observing a failure
per test phase.
:ivar list lst_p_mtbfi: list of planned initial MTBF per test phase.
:ivar list lst_p_mtbff: list of planned final MTBF per test phase.
:ivar list lst_p_mtbfa: list of planned average MTBF per test phase.
:ivar list lst_p_test_time: list of planned test times per test phase.
:ivar list lst_p_n_failures: list of planned number of failures per test
phase.
:ivar list lst_p_start_date: list of planned start dates per test phase.
:ivar list lst_p_end_date: list of planned end dates per test phase.
:ivar list lst_p_weeks: list of planned number of weeks per test phase.
:ivar list lst_p_n_test_units: list of planned number of test units per
test phase.
:ivar list lst_p_tpu: list of planned average test time per unit per test
phase.
:ivar list lst_p_tpupw: list of planned average test time per unit per week
per test phase.
:ivar list lst_o_growth_rate: list of observed growth rates per test phase.
:ivar list lst_o_ms: list of observed management strategies per test phase.
:ivar list lst_o_fef: list of observed fix effectiveness factors per test
phase.
:ivar list lst_o_mtbfi: list of observed initial MTBF per test phase.
:ivar list lst_o_mtbff: list of observed final MTBF per test phase.
:ivar list lst_o_mtbfa: list of observed average MTBF per test phase.
:ivar list lst_o_test_time: list of observed test times per test phase.
:ivar list alpha_hat: list of scale parameters estimated from the test data
[lower bound, point, upper bound].
:ivar list beta_hat: list of shape parameters estimated from the test data
[lower bound, point, upper bound].
:ivar list cum_mean: list of cumulative MTBF estimated from the test data
[lower bound, point, upper bound].
:ivar list instantaneous_mean: list of instantaneous MTBF estimated from
the test data
[lower bound, point, upper bound].
:ivar int rg_plan_model: the index in the list of reliability growth
planning models.
:ivar int rg_assess_model: the index in the list of reliability assessment
and projection models.
:ivar float alpha_hat: the point estimate of the scale parameter.
:ivar float beta_hat: the point estimate of the shape parameter.
:ivar float cum_mean: the point estimate of the cumulative MTBF.
:ivar float instantaneous_mean: the point estimate of the instantaneous
MTBF.
:ivar float se_scale: the estimated standard error of the scale parameter.
:ivar float se_shape: the estimated standard error of the shape parameter.
:ivar float se_cum_mean: the estimated standard error of the cumulative
MTBF.
:ivar float se_inst_mean: the estimated standard error of the instantaneous
MTBF.
:ivar float cramer_vonmises: the Cramer-von Mises test statistic.
:ivar float chi_square: the chi-square test statistic.
"""
def __init__(self, n_phases=1):
"""
Method to initialize a Reliability Growth Test data model instance.
:param int n_phases: the number of growth phases associated with the
Growth test.
"""
super(Model, self).__init__()
# Initialize private dict attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dict attributes.
self.dic_test_data = {}
# Initialize public list attributes.
# The following lists are used for holding ideal growth data for each
# test phase.
self.lst_i_mtbfi = [0.0] * n_phases # Initial phase MTBF.
self.lst_i_mtbff = [0.0] * n_phases # Final phase MTBF.
self.lst_i_mtbfa = [0.0] * n_phases # Average phase MTBF.
self.lst_i_n_failures = [0] * n_phases # Expected number of failures.
# The following lists are used for holding planned growth data for each
# test phase.
self.lst_p_growth_rate = [0.0] * n_phases
self.lst_p_ms = [0.0] * n_phases # Planned management strategy.
self.lst_p_fef = [0.0] * n_phases # Planned fix effectiveness factor.
self.lst_p_prob = [0.0] * n_phases
self.lst_p_mtbfi = [0.0] * n_phases # Initial phase MTBF.
self.lst_p_mtbff = [0.0] * n_phases
self.lst_p_mtbfa = [0.0] * n_phases
self.lst_p_test_time = [0.0] * n_phases # Planned test time.
self.lst_p_n_failures = [0] * n_phases # Expected number of failures.
self.lst_p_start_date = [0] * n_phases
self.lst_p_end_date = [0] * n_phases
self.lst_p_weeks = [0.0] * n_phases
self.lst_p_n_test_units = [0] * n_phases
self.lst_p_tpu = [0.0] * n_phases # Test time per unit.
self.lst_p_tpupw = [0.0] * n_phases # Test time per unit per week.
# The following lists are used for holding observed growth data for
# each test phase.
self.lst_o_growth_rate = [0.0, 0.0, 0.0]
self.lst_o_ms = [0.0] * n_phases
self.lst_o_fef = [0.0] * n_phases
self.lst_o_mtbfi = [0.0] * n_phases
self.lst_o_mtbff = [0.0] * n_phases
self.lst_o_mtbfa = [0.0] * n_phases
self.lst_o_test_time = [0.0] * n_phases # Actual test time.
self.lst_o_n_failures = [0] * n_phases # Observed number of failures.
self.lst_fixed_values = [True, True, True, True, True, True, True,
True]
# The following lists are used for holding model parameter estimates.
# The format is [lower bound, point estimate, upper bound].
self.alpha_hat = [0.0, 0.0, 0.0]
self.beta_hat = [0.0, 0.0, 0.0]
self.cum_mean = [[0.0, 0.0, 0.0]]
self.instantaneous_mean = [[0.0, 0.0, 0.0]]
self.growth_rate = [0.0, 0.0, 0.0]
self.chi2_critical_value = [0.0, 0.0]
# Initialize public scalar attributes.
self.rg_plan_model = 0
self.rg_assess_model = 0
self.tr = 0.0 # Program technical requirement MTBF.
self.mtbfg = 0.0 # Program goal MTBF.
self.mtbfgp = 0.0 # Growth potential MTBF.
self.n_phases = n_phases
self.ttt = 0.0 # Total time on test.
self.avg_growth = 0.0 # Average growth rate across all test phases.
self.avg_ms = 0.75 # Average management strategy across all test phases.
self.avg_fef = 0.7 # Average fix effectiveness factor across all test phases.
self.probability = 0.75 # Probability of observing a failure.
self.ttff = 0.0 # Time to first fix.
self.grouped = 0
self.group_interval = 0.0
self.se_scale = 0.0
self.se_shape = 0.0
self.se_cum_mean = 0.0
self.se_inst_mean = 0.0
self.cramer_vonmises = 0.0
self.chi_square = 0.0
self.cvm_critical_value = 0.0
def calculate_idealized_growth_curve(self, mtbf=True):
"""
Method to calculate the values for the idealized growth curve.
:keyword bool mtbf: indicates whether to calculate MTBF (default) or
failure intensity values.
:return: _ideal
:rtype: list of floats
"""
# WARNING: Refactor calculate_idealized_growth_curve; current McCabe Complexity metric=17.
_ideal = []
# Verify the first phase average MTBF is greater than zero. If not,
# attempt to calculate the average MTBF.
if self.lst_i_mtbfa[0] <= 0.0:
_mtbfa = CrowAMSAA.calculate_initial_mtbf(self.avg_growth,
self.mtbfg, self.ttt,
self.lst_p_test_time[0])
self.lst_i_mtbfa[0] = _mtbfa
# Verify the program final (goal) MTBF is greater than zero. If not,
# attempt to calculate the final MTBF.
if self.mtbfg <= 0.0:
_mtbfg = CrowAMSAA.calculate_final_mtbf(self.avg_growth,
self.lst_i_mtbfa[0],
self.ttt,
self.lst_p_test_time[0])
self.mtbfg = _mtbfg
# Verify the program total time on test is greater than zero. If not,
# attempt to calculate the total time on test.
if self.ttt <= 0.0:
self.ttt = CrowAMSAA.calculate_total_time(self.avg_growth,
self.lst_i_mtbfa[0],
self.mtbfg,
self.lst_p_test_time[0])
# Verify the first phase test time is greater than zero. If not,
# attempt to calculate the first phase test time.
if self.lst_p_test_time[0] <= 0.0:
_time = CrowAMSAA.calculate_t1(self.avg_growth,
self.lst_i_mtbfa[0],
self.mtbfg, self.ttt)
self.lst_p_test_time[0] = _time
# Verify the program average growth rate is greater than zero. If not,
# attempt to calculate the program average growth rate.
if self.avg_growth <= 0.0:
_alpha = CrowAMSAA.calculate_growth_rate(self.lst_i_mtbfa[0],
self.mtbfg, self.ttt,
self.lst_p_test_time[0])
self.avg_growth = _alpha
# Build the idealized curve. If the time is less than the time to
# first fix, the idealized value is the initial MTBF. If the time
# is equal to the time to first fix, the idealized value is set to
# numpy's not a number to force a jump in the plot. If the time is
# greater than the time to first failure, the idealized value is
# calculated from the inputs read above.
if(self.lst_i_mtbfa[0] > 0.0 and self.lst_p_test_time[0] > 0.0 and
self.mtbfg > 0.0 and self.ttt > 0.0 and self.avg_growth > 0.0):
for _time in range(int(self.ttt)):
if _time < int(self.lst_p_test_time[0]):
_ideal.append(self.lst_i_mtbfa[0])
elif _time == int(self.lst_p_test_time[0]):
_ideal.append(np.nan)
else:
_ideal.append((self.lst_i_mtbfa[0] *
(float(_time) /
self.lst_p_test_time[0])**self.avg_growth) /
(1.0 - self.avg_growth))
# Convert to failure intensity if that has been called for.
if not mtbf:
_ideal = [1.0 / _mtbf for _mtbf in _ideal]
# Calculate the initial MTBF, final MTBF, average MTBF, and
# expected number of failures for each phase.
_t1 = self.lst_p_test_time[0]
_mtbfa = self.lst_i_mtbfa[0]
self.lst_i_n_failures = [0.0] * self.n_phases
for _index in range(self.n_phases):
_time = sum(self.lst_p_test_time[:_index + 1])
_mtbf = CrowAMSAA.calculate_final_mtbf(self.avg_growth,
_mtbfa, _time, _t1)
if _index < self.n_phases - 1:
self.lst_i_mtbfi[_index + 1] = _mtbf
if _index > 0:
self.lst_i_mtbff[_index] = _mtbf
_cum_fails = sum(self.lst_i_n_failures[:_index + 1])
_n_failures = CrowAMSAA.calculate_n_failures(self.avg_growth,
_mtbfa, _time,
_t1, _cum_fails)
self.lst_i_n_failures[_index] = _n_failures
for _index in range(self.n_phases):
_time = self.lst_p_test_time[_index]
_n_failures = self.lst_i_n_failures[_index]
_mtbfi = self.lst_i_mtbfi[_index]
_mtbff = self.lst_i_mtbff[_index]
_mtbfa = CrowAMSAA.calculate_average_mtbf(_time, _n_failures,
_mtbfi, _mtbff)
self.lst_i_mtbfa[_index] = _mtbfa
return _ideal
def calculate_planned_growth_curve(self):
"""
Method to calculate the necessary values for each reliability growth
test phase. These are the start and end points of the planned growth
curve.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_mtbf1 = self.lst_p_mtbfa[0]
_t1 = self.lst_p_test_time[0]
for i in range(self.n_phases):
_alpha = self.lst_p_growth_rate[i]
_mtbfa = self.lst_p_mtbfa[i]
_mtbfi = self.lst_p_mtbfi[i]
_mtbff = self.lst_p_mtbff[i]
_time = self.lst_p_test_time[i]
_cum_time = sum(self.lst_p_test_time[:i + 1])
if _mtbff <= 0.0:
_mtbff = CrowAMSAA.calculate_final_mtbf(_alpha, _mtbf1,
_cum_time, _t1)
self.lst_p_mtbff[i] = _mtbff
if _mtbfa <= 0.0:
_mtbfa = CrowAMSAA.calculate_average_mtbf(0.0, 0, _mtbfi,
_mtbff)
self.lst_p_mtbfa[i] = _mtbfa
if _mtbfi <= 0.0:
_mtbfi = 2.0 * _mtbfa - _mtbff
self.lst_p_mtbfi[i] = _mtbfi
if _alpha <= 0.0:
_alpha = CrowAMSAA.calculate_growth_rate(_mtbfi, _mtbff,
_time, _t1)
self.lst_p_growth_rate[i] = _alpha
return False
def create_planned_values(self, mtbf=True):
"""
Method to create the planned growth curve values. These are used for
plotting the planned growth curve. The first curve created represents
the average MTBF values over each phase. These will be plotted as
horizontal lines. The second curve created represents the
straight-line linear change in MTBF over the phase.
:keyword boolean mtbf: indicates whether to calculate MTBF or failure
rates.
:return: _plan
:rtype: list
"""
_plan = []
for _phase in range(self.n_phases):
_time = 0.0
while _time < (self.lst_p_test_time[_phase] - 1.0):
if mtbf:
_plan.append(self.lst_p_mtbfa[_phase])
else:
_plan.append(1.0 / self.lst_p_mtbfa[_phase])
_time += 1.0
_plan.append(np.nan) # pylint: disable=E1101
return _plan
def assess_plan_feasibility(self):
"""
Method to assess the feasibility of a test plan. The assessment
criteria come from MIL-HDBK-189C, section 5.1.5 and section 5.1.6.\n\n
The criteria and acceptable ranges are:\n
- Initial MTBF / Goal MTBF 0.15 - 0.47\n
- Fix Effectiveness Factor 0.55 - 0.85\n
- Goal MTBF / Growth Potential MTBF 0.60 - 0.80\n
- Growth Rate 0.23 - 0.64\n
:return: _results
:rtype: list
"""
_results = [0.0, 0.0, -1, -1]
# Initial MTBF to goal MTBF ratio is high enough. Too low means growth
# testing is probably being started too early.
try:
_results[0] = self.lst_p_mtbfi[0] / self.mtbfg
except ZeroDivisionError:
_results[0] = 0.0
# Goal MTBF to growth potential MTBF ratio is high enough. Too
# high means there is a low probability of achieving the goal MTBF.
# Too low means the system may be over designed.
try:
_results[1] = self.mtbfg / self.mtbfgp
except ZeroDivisionError:
_results[1] = 0.0
# Calculate the test time per test unit and test time per test unit
# per week.
for _phase in range(self.n_phases):
# Assess logistics of test plan.
_weeks = (self.lst_p_end_date[_phase] -
self.lst_p_start_date[_phase]) / 7.0
try:
self.lst_p_tpu[_phase] = self.lst_p_test_time[_phase] / \
self.lst_p_n_test_units[_phase]
except ZeroDivisionError:
_results[2] = _phase
self.lst_p_tpu[_phase] = 0.0
try:
self.lst_p_tpupw[_phase] = self.lst_p_tpu[_phase] / _weeks
except ZeroDivisionError:
_results[3] = _phase
self.lst_p_tpupw[_phase] = 0.0
# Assess engineering effort and quality of test plan.
if self.lst_p_ms[_phase] <= 0.0 or self.lst_p_ms[_phase] > 1.0:
_fef = self.lst_p_fef[_phase]
_mtbfa = self.lst_p_mtbfa[_phase]
_ms = SPLAN.calculate_management_strategy(_fef, _mtbfa,
self.mtbfgp)
self.lst_p_ms[_phase] = _ms
if self.lst_p_fef[_phase] <= 0.0 or self.lst_p_fef[_phase] > 0.0:
_ms = self.lst_p_ms[_phase]
_mtbfa = self.lst_p_mtbfa[_phase]
_fef = SPLAN.calculate_fef(_ms, _mtbfa, self.mtbfgp)
self.lst_p_fef[_phase] = _fef
if self.lst_p_prob[_phase] <= 0.0 or self.lst_p_prob[_phase] > 1.0:
_time = self.lst_p_test_time[_phase]
_ms = self.lst_p_ms[_phase]
_mtbfi = self.lst_p_mtbfi[_phase]
_prob = SPLAN.calculate_probability(_time, _ms, _mtbfi)
self.lst_p_prob[_phase] = _prob
return _results
def estimate_crow_amsaa(self):
"""
Method to estimate the parameters of the Crow-AMSAA reliability growth
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cum_time = _times[-1]
self.cum_failures = sum(_failures)
(self.alpha_hat[1],
self.beta_hat[1]) = CrowAMSAA.calculate_crow_amsaa_parameters(
_failures, _times, 0.0, self.grouped)
(self.beta_hat[0],
self.beta_hat[2]) = Bounds.calculate_crow_bounds(
sum(_failures), _times[-1], self.alpha_hat[1],
self.beta_hat[1], self.confidence, 1)
(self.alpha_hat[0],
self.alpha_hat[2]) = Bounds.calculate_crow_bounds(
self.cum_failures, self.cum_time, self.alpha_hat[1],
self.beta_hat[1], self.confidence, 2)
return False
def calculate_crow_amsaa_mean(self):
"""
Method to calculate the cumulative and instantaneous mean from the
Crow-AMSAA reliability growth model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
_n_fail_times = len(_times)
self.cum_mean = []
self.instantaneous_mean = []
for i in range(_n_fail_times):
(_cum_mean,
_instantaneous_mean) = CrowAMSAA.calculate_crow_amsaa_mean(
_times[i], self.alpha_hat[1], self.beta_hat[1])
(_lower, _upper) = Bounds.calculate_crow_bounds(
sum(_failures[:i + 1]), _times[i], self.alpha_hat[1],
self.beta_hat[1], self.confidence, 3)
_cum_mean_ll = 1.0 / _upper
_cum_mean_ul = 1.0 / _lower
_i_mean_ll = 1.0 / (self.alpha_hat[2] * self.beta_hat[2] *
_times[-1]**(self.beta_hat[2] - 1.0))
_i_mean_ul = 1.0 / (self.alpha_hat[0] * self.beta_hat[0] *
_times[-1]**(self.beta_hat[0] - 1.0))
self.cum_mean.append([_cum_mean_ll, _cum_mean, _cum_mean_ul])
self.instantaneous_mean.append([_i_mean_ll, _instantaneous_mean,
_i_mean_ul])
return False
def calculate_cramer_vonmises(self, t_star=0.0, type2=True):
"""
Method to calculate the Cramer-von Mises test statistic from the
observed reliability growth data.
Test the hypothesis that the data fits the Crow-AMSAA model.
Ho: the data fits the Crow-AMSAA model
Ha: the data does not fit the Crow-AMSAA model
Reject Ho if _CvM exceeds the critical value.
:param float t_star: termination time for Type I tests.
:param bool type2: whether or not the test is time terminated (Type I)
or failure terminated (Type II).
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cramer_vonmises = CrowAMSAA.calculate_cramer_vonmises(
_failures, _times, self.beta_hat[1], t_star, type2)
self.cvm_critical_value = CrowAMSAA.cramer_vonmises_critical_value(
self.cum_failures, self.confidence)
return False
def calculate_chi_square(self):
"""
Method to calculate the chi-square test statistic from the observed
reliability growth data.
Test the hypothesis that the data fits the Crow-AMSAA model.
Ho: the data fits the Crow-AMSAA model
Ha: the data does not fit the Crow-AMSAA model
Reject Ho if _chi2 exceeds the critical values.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Ensure the confidence level is a fraction.
if self.confidence > 1.0:
self.confidence = self.confidence / 100.0
_times = [x[3] for x in self.dic_test_data.values()]
_failures = [x[4] for x in self.dic_test_data.values()]
self.cum_failures = sum(_failures)
self.chi_square = CrowAMSAA.calculate_crow_amsaa_chi_square(
_failures, _times, self.beta_hat[1], _times[-1], self.grouped)
_alpha_half = (1.0 - self.confidence) / 2.0
if self.grouped == 0: # Individual failure times.
if self.test_termination_time > 0.0: # Time truncated test.
_df = 2.0 * self.cum_failures
else: # Failure truncated test.
_df = 2.0 * (self.cum_failures - 1)
_upper = _alpha_half
_lower = self.confidence + _alpha_half
else: # Grouped failure times.
_df = len(_failures) - 1
_upper = self.confidence
_lower = 1.0 - self.confidence
self.chi2_critical_value[0] = chi2.ppf(_lower, _df)
self.chi2_critical_value[1] = chi2.ppf(_upper, _df)
return False
def assess_growth_rate(self):
"""
Method to assess the actual growth rate occuring during a Growth Test.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self.lst_o_growth_rate[0] = 1.0 - self.beta_hat[2]
self.lst_o_growth_rate[1] = 1.0 - self.beta_hat[1]
self.lst_o_growth_rate[2] = 1.0 - self.beta_hat[0]
return False
class Growth(dtcTesting):
"""
The Reliability Growth data controller provides an interface between the
Reliability Growth data model and an RTK view model. A single Growth
controller can manage one or more Growth data models. The attributes of a
Growth data controller are:
:ivar _dao: the Data Access Object to use when communicating with the RTK
Project database.
:ivar dicTests: Dictionary of the Growth data models managed. Key is the
Test ID; value is a pointer to the Growth data model
instance.
"""
def __init__(self): # pylint: disable=E1002
"""
Method to initialize a Growth data controller instance.
"""
super(Growth, self).__init__()
# Initialize private scalar attributes.
self._dao = None
self._last_id = None
def request_tests(self, dao, growth_test):
"""
Reads the RTK Project database and loads all the Growth Tests
associated with the selected Revision. For each Growth Test returned:
#. Retrieve the inputs from the RTK Project database.
#. Create a Growth data model instance.
#. Set the attributes of the data model instance from the returned
results.
#. Add the instance to the dictionary of Growth Tests being managed
by this controller.
:param rtk.DAO dao: the Data Access object to use for communicating
with the RTK Project database.
:param tuple growth_test: the Growth test attributes from the RTK
Project database.
:return: (_results, _error_code)
:rtype: tuple
"""
self._dao = dao
self._last_id = self._dao.get_last_id('rtk_tests')[0]
# Create an instance of a Growth data model, set it's attributes, and
# add it to the dictionary of Growth tests controlled by this data
# controller.
_test = Model(growth_test[17])
_test.set_attributes(growth_test)
self.dicTests[_test.test_id] = _test
# Gather the Growth model phase attributes.
_query = "SELECT * FROM rtk_growth_testing \
WHERE fld_test_id={0:d}".format(growth_test[2])
(_phases,
_error_code, __) = self._dao.execute(_query, commit=False)
try:
_n_phases = len(_phases)
except TypeError:
_n_phases = 0
for j in range(_n_phases):
_test.set_phase_attributes(_phases[j], j)
self._request_test_data(_test.test_id)
return(_phases, _error_code)
def _request_test_data(self, test_id):
"""
Method to read the RTK Project database and retrieves all the test
records associated with the selected Test.
:param int test_id: the Growth Test ID to select data for.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_query = "SELECT fld_record_id, fld_failure_date, \
fld_left_interval, fld_right_interval, \
fld_quantity \
FROM rtk_survival_data \
WHERE fld_dataset_id={0:d} \
AND fld_source=1 \
ORDER BY fld_right_interval".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=False)
_test.dic_test_data = {}
try:
_n_records = len(_results)
except TypeError:
_n_records = 0
for i in range(_n_records):
_test.dic_test_data[i] = [_results[i][0], _results[i][1],
_results[i][2], _results[i][3],
_results[i][4]]
return(_results, _error_code)
def add_test(self, revision_id, assembly_id):
"""
Adds a new Test to the RTK Project for the selected Revision.
:param int revision_id: the Revision ID to add the new Test.
:param int assembly_id: the Assembly ID to add the new Test.
:return: (_test, _error_code)
:rtype: tuple
"""
_query = "INSERT INTO rtk_tests \
(fld_revision_id, fld_assembly_id, fld_name, fld_test_type) \
VALUES ({0:d}, {1:d}, 'Test Plan', 4)".format(revision_id,
assembly_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# If the new test was added successfully to the RTK Project database:
# 1. Retrieve the ID of the newly inserted test.
# 2. Add a single growth phase to the growth testing table.
# 3. Create a new Testing model instance.
# 4. Set the attributes of the new Testing model instance.
# 5. Add the new Testing model to the controller dictionary.
if _results:
self._last_id = self._dao.get_last_id('rtk_tests')[0]
(_results, _error_code) = self.add_test_phase(self._last_id)
_test = Model()
_test.set_attributes((revision_id, assembly_id, self._last_id, '',
'', 4, '', 0.0, 0.0, 0.75, 0.0, 0.0))
self.dicTests[_test.test_id] = _test
return(_test, _error_code)
def add_test_phase(self, test_id, phase_id=0):
"""
Adds a new test phase to the RTK Project for the selected Reliability
Growth test.
:param int test_id: the Test ID to add the new phase.
:param int phase_id: the Phase ID of the new phase to add.
:return: (_results, _error_code)
:rtype: tuple
"""
self._last_id = self._dao.get_last_id('rtk_tests')[0]
_query = "INSERT INTO rtk_growth_testing \
(fld_test_id, fld_phase_id) \
VALUES ({0:d}, {1:d})".format(test_id, phase_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def add_test_record(self, test_id, date, time, n_failures,
additional=False):
"""
Method to add a new record to the selected Reliability Growth test.
:param int test_id: the ID of the test to add the record to.
:param int date: the ordinal date of the failure(s).
:param float time: the operating time at failure.
:param int n_failures: the number of failures occurring at time.
:keyword bool additional: indicates whether or not the time is
cumulative.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_query = "SELECT MAX(fld_record_id), MAX(fld_right_interval) \
FROM rtk_survival_data \
WHERE fld_dataset_id={0:d} \
AND fld_source=1".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=False)
if _results[0][0] is None or _results[0][0] == '':
_last_id = 0
else:
_last_id = _results[0][0] + 1
if _results[0][1] is None or _results[0][1] == '':
_last_time = 0.0
else:
_last_time = float(_results[0][1])
if additional:
time = time + _last_time
_query = "INSERT INTO rtk_survival_data \
(fld_record_id, fld_dataset_id, fld_left_interval, \
fld_right_interval, fld_quantity, fld_mode_type, \
fld_failure_date, fld_source) \
VALUES ({0:d}, {1:d}, {2:f}, {3:f}, {4:d}, {5:d}, \
{6:d}, 1)".format(_last_id, test_id, 0.0,
time, n_failures, 0, date)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
try:
_id = max(_test.dic_test_data.keys()) + 1
except ValueError:
_id = 0
_test.dic_test_data[_id] = [_last_id, date, 0.0, time, n_failures]
return(_results, _error_code)
def delete_test(self, test_id):
"""
Deletes a Testing input from the RTK Project.
:param int test_id: the Test ID to delete the phase from.
:return: (_results, _error_code)
:rtype: tuple
"""
# Delete the phase information.
_query = "DELETE FROM rtk_growth_testing \
WHERE fld_test_id={0:d}".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# Then delete the growth test itself.
_query = "DELETE FROM rtk_tests \
WHERE fld_test_id={0:d}".format(test_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
self.dicTests.pop(test_id)
return(_results, _error_code)
def delete_test_phase(self, test_id, phase_id):
"""
Deletes the selected test phase from the RTK Project database.
:param int test_id: the Test ID to add the new phase.
:param int phase_id: the Phase ID to delete from the test.
:return: (_results, _error_code)
:rtype: tuple
"""
_query = "DELETE FROM rtk_growth_testing \
WHERE fld_test_id={0:d} \
AND fld_phase_id={1:d}".format(test_id, phase_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def delete_test_record(self, record_id, dataset_id):
"""
Method to delete a test record from the RTK Program database.
:param int record_id: the ID of the record to delete.
:param int dataset_id: the ID of the dataset to delete the record from.
:return: (_results, _error_code)
:rtype: tuple
"""
_query = "DELETE FROM rtk_survival_data \
WHERE fld_record_id={0:d} \
AND fld_dataset_id={1:d} \
AND fld_source=1".format(record_id, dataset_id)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
def request_calculate(self, test_id, mtbf=True):
"""
Method to request the various calculate methods of the Reliability
Growth test data model.
:param int test_id: the ID of the test to calculate.
:keyword bool mtbf: indicates whether to calculate MTBF or failure
intensity values.
:return: (_ideal, _plan)
:rtype: tuple
"""
_test = self.dicTests[test_id]
_ideal = _test.calculate_idealized_growth_curve()
if not _test.calculate_planned_growth_curve():
_plan = _test.create_planned_values(mtbf)
return(_ideal, _plan)
def request_assessment(self, test_id):
"""
Method to request the various methods to assess actual test data for
the Reliability Growth test data model.
:param int test_id: the ID of the test to assess.
:keyword bool mtbf: indicates whether to calculate MTBF or failure
intensity values.
:return: False if successful or True if an error is encountered
:rtype: bool
"""
_test = self.dicTests[test_id]
if len(_test.dic_test_data.values()) > 0:
_test.estimate_crow_amsaa()
_test.calculate_crow_amsaa_mean()
_test.assess_growth_rate()
_test.calculate_chi_square()
_test.calculate_cramer_vonmises()
return False
def save_test(self, test_id):
"""
Method to save the Reliability Growth Test attributes to the RTK
Project database.
:param int test_id: the ID of the Test to save.
:return: (_results, _error_code)
:rtype: tuple
"""
_test = self.dicTests[test_id]
# Ensure confidence is stored as a fractional value.
if _test.confidence > 1.0:
_test.confidence = _test.confidence / 100.0
_query = "UPDATE rtk_tests \
SET fld_name='{1:s}', fld_description='{2:s}', \
fld_test_type={3:d}, fld_attachment='{4:s}', \
fld_cum_time={5:f}, fld_cum_failures={6:d}, \
fld_confidence={7:f}, fld_consumer_risk={8:f}, \
fld_producer_risk={9:f}, fld_plan_model={10:d}, \
fld_assess_model={11:d}, fld_tr={12:f}, fld_mg={13:f}, \
fld_mgp={14:f}, fld_num_phases={15:d}, fld_ttt={16:f}, \
fld_avg_growth={17:f}, fld_avg_ms={18:f}, \
fld_avg_fef={19:f}, fld_prob={20:f}, fld_ttff={21:f}, \
fld_grouped={22:d}, fld_group_interval={23:f}, \
fld_se_scale={24:f}, fld_se_shape={25:f}, \
fld_se_cum_mean={26:f}, fld_se_inst_mean={27:f}, \
fld_cramer_vonmises={28:f}, fld_chi_square={29:f}, \
fld_scale_ll={30:f}, fld_scale={31:f}, \
fld_scale_ul={32:f}, fld_shape_ll={33:f}, \
fld_shape={34:f}, fld_shape_ul={35:f}, \
fld_cum_mean_ll={36:f}, fld_cum_mean={37:f}, \
fld_cum_mean_ul={38:f}, fld_inst_mean_ll={39:f}, \
fld_inst_mean={40:f}, fld_inst_mean_ul={41:f} \
WHERE fld_test_id={0:d}".format(
_test.test_id, _test.name, _test.description,
_test.test_type, _test.attachment, _test.cum_time,
_test.cum_failures, _test.confidence,
_test.consumer_risk, _test.producer_risk,
_test.rg_plan_model, _test.rg_assess_model, _test.tr,
_test.mtbfg, _test.mtbfgp, _test.n_phases, _test.ttt,
_test.avg_growth, _test.avg_ms, _test.avg_fef,
_test.probability, _test.ttff, _test.grouped,
_test.group_interval, _test.se_scale, _test.se_shape,
_test.se_cum_mean, _test.se_inst_mean,
_test.cramer_vonmises, _test.chi_square,
_test.alpha_hat[0], _test.alpha_hat[1],
_test.alpha_hat[2], _test.beta_hat[0],
_test.beta_hat[1], _test.beta_hat[2],
_test.cum_mean[-1][0], _test.cum_mean[-1][1],
_test.cum_mean[-1][2], _test.instantaneous_mean[-1][0],
_test.instantaneous_mean[-1][1],
_test.instantaneous_mean[-1][2])
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# Save the phase-specific information.
for i in range(_test.n_phases):
_query = "UPDATE rtk_growth_testing \
SET fld_p_growth_rate={2:f}, fld_p_ms={3:f}, \
fld_p_fef_avg={4:f}, fld_p_prob={5:f}, \
fld_p_mi={6:f}, fld_p_mf={7:f}, fld_p_ma={8:f}, \
fld_p_test_time={9:f}, fld_p_num_fails={10:d}, \
fld_p_start_date={11:d}, fld_p_end_date={12:d}, \
fld_p_weeks={13:f}, fld_p_test_units={14:d}, \
fld_p_tpu={15:f}, fld_p_tpupw={16:f}, \
fld_o_ms={17:f}, fld_o_fef_avg={18:f}, \
fld_o_mi={19:f}, fld_o_mf={20:f}, fld_o_ma={21:f}, \
fld_o_ttff={22:f}, fld_i_mi={23:f}, \
fld_i_mf={24:f}, fld_i_ma={25:f}, \
fld_i_num_fails={26:d} \
WHERE fld_test_id={0:d} \
AND fld_phase_id={1:d}".format(
_test.test_id, i, _test.lst_p_growth_rate[i],
_test.lst_p_ms[i], _test.lst_p_fef[i],
_test.lst_p_prob[i], _test.lst_p_mtbfi[i],
_test.lst_p_mtbff[i], _test.lst_p_mtbfa[i],
_test.lst_p_test_time[i],
int(_test.lst_p_n_failures[i]),
_test.lst_p_start_date[i], _test.lst_p_end_date[i],
_test.lst_p_weeks[i], _test.lst_p_n_test_units[i],
_test.lst_p_tpu[i], _test.lst_p_tpupw[i],
_test.lst_o_ms[i], _test.lst_o_fef[i],
_test.lst_o_mtbfi[i], _test.lst_o_mtbff[i],
_test.lst_o_mtbfa[i], _test.ttff,
_test.lst_i_mtbfi[i], _test.lst_i_mtbff[i],
_test.lst_i_mtbfa[i], int(_test.lst_i_n_failures[i]))
(_results, _error_code, __) = self._dao.execute(_query,
commit=True)
return(_results, _error_code)
def save_test_data(self, test_id):
"""
Method to save the test data.
:param int test_id: the ID of the Test to save.
:return: (_results, _error_code)
:rtype: tuple
"""
_results = False
_error_code = 0
_test = self.dicTests[test_id]
# Save the actual test data.
for _key in _test.dic_test_data.keys():
_query = "UPDATE rtk_survival_data \
SET fld_failure_date={2:d}, fld_left_interval={3:f}, \
fld_right_interval={4:f}, fld_quantity={5:d} \
WHERE fld_dataset_id={0:d} \
AND fld_record_id={1:d} \
AND fld_source=1".format(
_test.test_id, _test.dic_test_data[_key][0],
_test.dic_test_data[_key][1],
_test.dic_test_data[_key][2],
_test.dic_test_data[_key][3],
_test.dic_test_data[_key][4])
(_results, _error_code, __) = self._dao.execute(_query,
commit=True)
return(_results, _error_code)
def save_all_tests(self):
"""
Method to save all Testing data models managed by the controller.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
for _test in self.dicTests.values():
(_results, _error_code) = self.save_test(_test.test_id)
(_results, _error_code) = self.save_test_data(_test.test_id)
return False
| StarcoderdataPython |
164305 | <gh_stars>0
def dibujo (base,altura):
dibujo=print("x"*base)
for fila in range (altura):
print("x"+" "*(base-2)+"x")
dibujo=print("x"*base)
dibujo(7,5)
| StarcoderdataPython |
92752 | """Fractal definitions
Credit: https://elc.github.io/posts/plotting-fractals-step-by-step-with-python
"""
from dataclasses import dataclass
@dataclass
class Params:
"""Holds Fractal definitions suitable for L-System construction"""
name: str
axiom: str
rules: dict
iterations: int
angle: int
size: int = 10
min_iter: int = 8
max_iter: int = 12
class Fractals:
"""A collection of Fractal parameters"""
dragon = Params(
# --
name="dragon",
axiom="FX",
rules={"X": "X+YF+", "Y": "-FX-Y"},
iterations=8,
angle=90,
max_iter=15,
)
three_dragon = Params(
# --
name="three_dragon",
axiom="FX+FX+FX",
rules={"X": "X+YF+", "Y": "-FX-Y"},
iterations=9,
angle=90,
)
twin_dragon = Params(
# --
name="twin_dragon",
axiom="FX+FX",
rules={"X": "X+YF+", "Y": "-FX-Y"},
iterations=9,
angle=90,
max_iter=10,
)
ter_dragon = Params(
# --
name="ter_dragon",
axiom="F",
rules={"F": "F-F+F"},
iterations=8,
angle=120,
size=10,
max_iter=10,
)
koch_snowflake = Params(
# --
name="koch_snowflake",
axiom="F--F--F",
rules={"F": "F+F--F+F"},
iterations=4,
angle=60,
min_iter=3,
max_iter=5,
)
koch_island = Params(
# --
name="koch_island",
axiom="F+F+F+F",
rules={"F": "F-F+F+FFF-F-F+F"},
iterations=2,
angle=90,
min_iter=2,
max_iter=4,
)
triangle = Params(
# --
name="triangle",
axiom="F+F+F",
rules={"F": "F-F+F"},
iterations=6,
angle=120,
size=14,
)
crystal = Params(
# --
name="crystal",
axiom="F+F+F+F",
rules={"F": "FF+F++F+F"},
iterations=3,
angle=90,
min_iter=3,
max_iter=6,
)
box = Params(
# --
name="box",
axiom="F-F-F-F",
rules={"F": "F-F+F+F-F"},
iterations=4, # TOP: 6
angle=90,
min_iter=3,
max_iter=6,
)
levy_c = Params(
# --
name="levy_c",
axiom="F",
rules={"F": "+F--F+"},
iterations=10,
angle=45,
max_iter=16,
)
sierpinski = Params(
# --
name="sierpinski",
axiom="F+XF+F+XF",
rules={"X": "XF-F+F-XF+F+XF-F+F-X"},
iterations=4,
angle=90,
min_iter=3,
max_iter=8,
)
sierpinski_arrowhead = Params(
# --
name="sierpinski_arrowhead",
axiom="YF",
rules={"X": "YF+XF+Y", "Y": "XF-YF-X"},
iterations=4,
angle=60,
min_iter=3,
max_iter=10,
)
# NOTE: this one is slooow
sierpinski_sieve = Params(
# --
name="sierpinski_sieve",
axiom="FXF--FF--FF",
rules={"F": "FF", "X": "--FXF++FXF++FXF--"},
iterations=5,
angle=60,
min_iter=3,
max_iter=7,
)
board = Params(
# --
name="board",
axiom="F+F+F+F",
rules={"F": "FF+F+F+F+FF"},
iterations=3,
angle=90,
min_iter=3,
max_iter=5,
)
tiles = Params(
# --
name="tiles",
axiom="F+F+F+F",
rules={"F": "FF+F-F+F+FF"},
iterations=3,
angle=90,
min_iter=2,
max_iter=4,
)
rings = Params(
# --
name="rings",
axiom="F+F+F+F",
rules={"F": "FF+F+F+F+F+F-F"},
iterations=2,
angle=90,
min_iter=2,
max_iter=4,
)
cross = Params(
# --
name="cross",
axiom="F+F+F+F",
rules={"F": "F+FF++F+F"},
iterations=3,
angle=90,
min_iter=2,
max_iter=6,
)
cross2 = Params(
# --
name="cross2",
axiom="F+F+F+F",
rules={"F": "F+F-F+F+F"},
iterations=3,
angle=90,
min_iter=2,
max_iter=6,
)
pentaplexity = Params(
# --
name="pentaplexity",
axiom="F++F++F++F++F",
rules={"F": "F++F++F+++++F-F++F"},
iterations=1,
angle=36,
min_iter=2,
max_iter=5,
)
# NOTE: this one is slooooow
segment_curve = Params(
# --
name="segment_curve",
axiom="F+F+F+F",
rules={"F": "-F+F-F-F+F+FF-F+F+FF+F-F-FF+FF-FF+F+F-FF-F-F+FF-F-F+F+F-F+"},
iterations=2,
angle=90,
min_iter=2,
max_iter=3,
)
peano_gosper = Params(
# --
name="peano_gosper",
axiom="FX",
rules={"X": "X+YF++YF-FX--FXFX-YF+", "Y": "-FX+YFYF++YF+FX--FX-Y"},
iterations=4,
angle=60,
min_iter=2,
max_iter=5,
)
krishna_anklets = Params(
# --
name="krishna_anklets",
axiom=" -X--X",
rules={"X": "XFX--XFX"},
iterations=3,
angle=45,
min_iter=2,
max_iter=9,
)
# quad_gosper = Params(
# # --
# name="quad_gosper",
# axiom="YF",
# rules={
# "X": "XFX-YF-YF+FX+FX-YF-YFFX+YF+FXFXYF-FX+YF+FXFX+YF-FXYF-YF-FX+FX+YFYF-",
# "Y": "+FXFX-YF-YF+FX+FXYF+FX-YFYF-FX-YF+FXYFYF-FX-YFFX+FX+YF-YF-FX+FX+YFY",
# },
# iterations=2,
# angle=90,
# min_iter=2,
# max_iter=3,
# )
moore = Params(
# --
name="moore",
axiom="LFL-F-LFL",
rules={"L": "+RF-LFL-FR+", "R": "-LF+RFR+FL-"},
iterations=2,
angle=90,
min_iter=2,
max_iter=8,
)
hilberts = Params(
# --
name="hilberts",
axiom="L",
rules={"L": "+RF-LFL-FR+", "R": "-LF+RFR+FL-"},
iterations=4,
angle=90,
min_iter=2,
max_iter=7,
)
hilbert2 = Params(
# --
name="hilbert2",
axiom="X",
rules={"X": "XFYFX+F+YFXFY-F-XFYFX", "Y": "YFXFY-F-XFYFX+F+YFXFY"},
iterations=4,
angle=90,
min_iter=2,
max_iter=6,
)
peano = Params(
# --
name="peano",
axiom="F",
rules={"F": "F+F-F-F-F+F+F+F-F"},
iterations=4,
angle=90,
min_iter=3,
max_iter=5,
)
| StarcoderdataPython |
1731805 | from datetime import datetime, timedelta
from cal_setup import get_calendar_service
def main( color):
# mark the entire day as a special event
service = get_calendar_service()
d = datetime.now().date()
tmr = d +timedelta(days=1)
start = d.isoformat()
end = datetime(tmr.year, tmr.month, tmr.day).date().isoformat()
calendars_result = service.calendarList().list().execute()
calendars = calendars_result.get('items', [])
calID = ''
for calendar in calendars:
if calendar[ 'summary' ] == 'Trend' :
calID = calendar[ 'id' ]
break
print (start)
print(end)
event = {
'summary': '',
'start': {
'date': start,
'timeZone': 'America/Los_Angeles'
},
'end': {
'date': end,
'timeZone': 'America/Los_Angeles'
},
'colorId': color,
}
service.events().insert(calendarId=calID, body=event).execute()
if __name__ == '__main__':
main() | StarcoderdataPython |
1792581 | #!/usr/bin/env python
import socket
try:
from cStringIO import StringIO
except ImportError, e:
from StringIO import StringIO
from struct import unpack
from __init__ import dumps, loads
def _bintoint(data):
return unpack("<i", data)[0]
def _sendobj(self, obj):
"""
Atomically send a BSON message.
"""
data = dumps(obj)
self.sendall(data)
def _recvobj(self):
"""
Atomic read of a BSON message.
This function either returns a dict, None, or raises a socket error.
If the return value is None, it means the socket is closed by the other side.
"""
sock_buf = self.recvbytes(4)
if sock_buf is None:
return None
message_length = _bintoint(sock_buf.getvalue())
sock_buf = self.recvbytes(message_length - 4, sock_buf)
if sock_buf is None:
return None
retval = loads(sock_buf.getvalue())
return retval
def _recvbytes(self, bytes_needed, sock_buf = None):
"""
Atomic read of bytes_needed bytes.
This function either returns exactly the nmber of bytes requested in a
StringIO buffer, None, or raises a socket error.
If the return value is None, it means the socket is closed by the other side.
"""
if sock_buf is None:
sock_buf = StringIO()
bytes_count = 0
while bytes_count < bytes_needed:
chunk = self.recv(min(bytes_needed - bytes_count, 32768))
part_count = len(chunk)
if part_count < 1:
return None
bytes_count += part_count
sock_buf.write(chunk)
return sock_buf
| StarcoderdataPython |
1605891 | <reponame>drawjk705/us-pls
from dataclasses import dataclass, field
from us_pls._logger.configure_logger import DEFAULT_LOG_FILE
DEFAULT_DATA_DIR = "data"
@dataclass
class Config:
year: int
data_dir: str = field(default=DEFAULT_DATA_DIR)
log_file: str = field(default=DEFAULT_LOG_FILE)
should_overwrite_cached_urls: bool = field(default=False)
should_overwrite_existing_cache: bool = field(default=False)
| StarcoderdataPython |
154744 | <filename>deep-learning-for-image-processing-master/tensorflow_classification/Test2_alexnet/read_pth.py
import torch
import numpy as np
import tensorflow as tf
def rename_var(pth_path, new_ckpt_path, num_classes):
pytorch_dict = torch.load(pth_path)
with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as sess:
new_var_list = []
for key, value in pytorch_dict.items():
if key in except_list:
continue
new_name = key
value = value.detach().numpy()
if 'features.0' in new_name:
new_name = new_name.replace("features.0.weight", "conv2d/kernel")
new_name = new_name.replace("features.0.bias", "conv2d/bias")
if 'features.3' in new_name:
new_name = new_name.replace("features.3.weight", "conv2d_1/kernel")
new_name = new_name.replace("features.3.bias", "conv2d_1/bias")
if 'features.6' in new_name:
new_name = new_name.replace("features.6.weight", "conv2d_2/kernel")
new_name = new_name.replace("features.6.bias", "conv2d_2/bias")
if 'features.8' in new_name:
new_name = new_name.replace("features.8.weight", "conv2d_3/kernel")
new_name = new_name.replace("features.8.bias", "conv2d_3/bias")
if 'features.10' in new_name:
new_name = new_name.replace("features.10.weight", "conv2d_4/kernel")
new_name = new_name.replace("features.10.bias", "conv2d_4/bias")
if 'classifier.1' in new_name:
new_name = new_name.replace("classifier.1.weight", "dense/kernel")
new_name = new_name.replace("classifier.1.bias", "dense/bias")
if 'classifier.4' in new_name:
new_name = new_name.replace("classifier.4.weight", "dense_1/kernel")
new_name = new_name.replace("classifier.4.bias", "dense_1/bias")
if 'conv2d' in new_name and 'kernel' in new_name:
value = np.transpose(value, (2, 3, 1, 0)).astype(np.float32)
else:
value = np.transpose(value).astype(np.float32)
re_var = tf.Variable(value, name=new_name)
new_var_list.append(re_var)
re_var = tf.Variable(tf.keras.initializers.he_uniform()([4096, num_classes]), name="dense_2/kernel")
new_var_list.append(re_var)
re_var = tf.Variable(tf.keras.initializers.he_uniform()([num_classes]), name="dense_2/bias")
new_var_list.append(re_var)
saver = tf.compat.v1.train.Saver(new_var_list)
sess.run(tf.compat.v1.global_variables_initializer())
saver.save(sess, save_path=new_ckpt_path, write_meta_graph=False, write_state=False)
except_list = ['classifier.6.weight', 'classifier.6.bias']
# https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth
pth_path = './alexnet-owt-4df8aa71.pth'
new_ckpt_path = './pretrain_weights.ckpt'
num_classes = 5
rename_var(pth_path, new_ckpt_path, num_classes) | StarcoderdataPython |
3028 | import pytest
from plenum.server.view_change.view_changer import ViewChanger
from stp_core.common.log import getlogger
from plenum.test.pool_transactions.helper import start_not_added_node, add_started_node
logger = getlogger()
@pytest.fixture(scope="module", autouse=True)
def tconf(tconf):
old_vc_timeout = tconf.VIEW_CHANGE_TIMEOUT
tconf.VIEW_CHANGE_TIMEOUT = 10
yield tconf
tconf.VIEW_CHANGE_TIMEOUT = old_vc_timeout
def test_no_instance_change_on_primary_disconnection_for_not_ready_node(
looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, sdk_pool_handle, sdk_wallet_steward):
"""
Test steps:
1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
3. make sure no InstanceChange sent by the new node
4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
6. make sure no InstanceChange sent by the new node
"""
# 1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
sigseed, bls_key, new_node, node_ha, client_ha = \
start_not_added_node(looper,
tdir, tconf, allPluginsPath,
"TestTheta")
# 2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 3. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
logger.info("Start added node {}".format(new_node))
# 4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
add_started_node(looper,
new_node,
node_ha,
client_ha,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward,
bls_key)
# 5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 6. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.