id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
1745885
<gh_stars>10-100 def test_fill(dbs, TestModelA): obj = dbs.create(TestModelA, title="Remember") obj.fill(title="lorem ipsum") dbs.commit() updated = dbs.first(TestModelA) assert updated.title == "lorem ipsum"
StarcoderdataPython
1788964
# stdlib import json # third party from aiortc import RTCSessionDescription from aiortc.contrib.signaling import object_from_string from nacl.signing import SigningKey import nest_asyncio import pytest # syft absolute from syft.core.node.common.service.repr_service import ReprMessage from syft.core.node.domain.domain import Domain from syft.grid.connections.webrtc import WebRTCConnection def get_signing_key() -> SigningKey: # return a the signing key used to sign the get_signed_message_bytes fixture key = "<KEY>" return SigningKey(bytes.fromhex(key)) @pytest.mark.asyncio async def test_init_without_event_loop() -> None: nest_asyncio.apply() domain = Domain(name="test") webrtc = WebRTCConnection(node=domain) assert webrtc is not None @pytest.mark.slow @pytest.mark.asyncio async def test_signaling_process() -> None: nest_asyncio.apply() domain = Domain(name="test") webrtc = WebRTCConnection(node=domain) offer_payload = await webrtc._set_offer() offer_dict = json.loads(offer_payload) aiortc_session = object_from_string(offer_payload) assert "sdp" in offer_dict assert "type" in offer_dict assert offer_dict["type"] == "offer" assert isinstance(aiortc_session, RTCSessionDescription) answer_webrtc = WebRTCConnection(node=domain) answer_payload = await answer_webrtc._set_answer(payload=offer_payload) answer_dict = json.loads(answer_payload) aiortc_session = object_from_string(answer_payload) assert "sdp" in answer_dict assert "type" in answer_dict assert answer_dict["type"] == "answer" assert isinstance(aiortc_session, RTCSessionDescription) response = await webrtc._process_answer(payload=answer_payload) assert response is None @pytest.mark.asyncio async def test_consumer_request() -> None: nest_asyncio.apply() test_domain = Domain(name="test") webrtc_node = WebRTCConnection(node=test_domain) msg = ReprMessage(address=test_domain.address) signing_key = SigningKey.generate() test_domain.root_verify_key = signing_key.verify_key signed_msg = msg.sign(signing_key=signing_key) msg_bin = signed_msg.to_bytes() await webrtc_node.consumer(msg=msg_bin)
StarcoderdataPython
19082
<reponame>TimSC/PyFeatureTrack from __future__ import print_function import math, numpy as np from PIL import Image from klt import * from error import * from convolve import * from klt_util import * import goodFeaturesUtils class selectionMode: SELECTING_ALL = 1 REPLACING_SOME = 2 KLT_verbose = 1 #********************************************************************* def _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows): for iy in range(y - mindist,y + mindist + 1): for ix in range(x - mindist, x + mindist + 1): if ix >= 0 and ix < ncols and iy >= 0 and iy < nrows: featuremap[iy*ncols+ix] = True return featuremap #********************************************************************* #* _enforceMinimumDistance #* #* Removes features that are within close proximity to better features. #* #* INPUTS #* featurelist: A list of features. The nFeatures property #* is used. #* #* OUTPUTS #* featurelist: Is overwritten. Nearby "redundant" features are removed. #* Writes -1's into the remaining elements. #* #* RETURNS #* The number of remaining features. #* def _enforceMinimumDistance(pointlist, featurelist, ncols, nrows, mindist, min_eigenvalue, overwriteAllFeatures): #int indx; # Index into features #int x, y, val; # Location and trackability of pixel under consideration #uchar *featuremap; # Boolean array recording proximity of features #int *ptr; # Cannot add features with an eigenvalue less than one if min_eigenvalue < 1: min_eigenvalue = 1 # Allocate memory for feature map and clear it #featuremap = (uchar *) malloc(ncols * nrows * sizeof(uchar)); #memset(featuremap, 0, ncols*nrows); featuremap = [False for i in range(ncols * nrows)] # Necessary because code below works with (mindist-1) mindist = mindist - 1 # If we are keeping all old good features, then add them to the featuremap if not overwriteAllFeatures: for indx, feat in enumerate(featurelist): if featurelist[indx].val >= 0: x = int(featurelist[indx].x) y = int(featurelist[indx].y) featuremap = _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows) # For each feature point, in descending order of importance, do ... indx = 0 pointlistIndx = 0 while True: # If we can't add all the points, then fill in the rest # of the featurelist with -1's */ if pointlistIndx >= len(pointlist): while indx < len(featurelist): if overwriteAllFeatures and featurelist[indx].val < 0: featurelist[indx].x = -1 featurelist[indx].y = -1 featurelist[indx].val = kltState.KLT_NOT_FOUND featurelist[indx].aff_img = None featurelist[indx].aff_img_gradx = None featurelist[indx].aff_img_grady = None featurelist[indx].aff_x = -1.0 featurelist[indx].aff_y = -1.0 featurelist[indx].aff_Axx = 1.0 featurelist[indx].aff_Ayx = 0.0 featurelist[indx].aff_Axy = 0.0 featurelist[indx].aff_Ayy = 1.0 indx = indx + 1 break pointdata = pointlist[pointlistIndx] x = pointdata[1] y = pointdata[2] val = pointdata[0] pointlistIndx += 1 # Ensure that feature is in-bounds assert x >= 0 assert x < ncols assert y >= 0 assert y < nrows while not overwriteAllFeatures and indx < len(featurelist) and featurelist[indx].val >= 0: indx = indx + 1 if indx >= len(featurelist): break # If no neighbor has been selected, and if the minimum # eigenvalue is large enough, then add feature to the current list if not featuremap[y*ncols+x] and val >= min_eigenvalue: featurelist[indx].x = x featurelist[indx].y = y featurelist[indx].val = int(val) featurelist[indx].aff_img = None featurelist[indx].aff_img_gradx = None featurelist[indx].aff_img_grady = None featurelist[indx].aff_x = -1.0 featurelist[indx].aff_y = -1.0 featurelist[indx].aff_Axx = 1.0 featurelist[indx].aff_Ayx = 0.0 featurelist[indx].aff_Axy = 0.0 featurelist[indx].aff_Ayy = 1.0 indx = indx + 1 # Fill in surrounding region of feature map, but # make sure that pixels are in-bounds */ featuremap = _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows); return featurelist #********************************************************************* def _KLTSelectGoodFeatures(tc,img,nFeatures,mode): featurelist = [KLT_Feature() for i in range(nFeatures)] #_KLT_FloatImage floatimg, gradx, grady; #int window_hw, window_hh #int *pointlist overwriteAllFeatures = (mode == selectionMode.SELECTING_ALL) floatimages_created = False ncols, nrows = img.size # Check window size (and correct if necessary) if tc.window_width % 2 != 1: tc.window_width = tc.window_width+1 KLTWarning("Tracking context's window width must be odd. Changing to {0}.\n".format(tc.window_width)) if tc.window_height % 2 != 1: tc.window_height = tc.window_height+1 KLTWarning("Tracking context's window height must be odd. Changing to {0}.\n".format(tc.window_height)) if tc.window_width < 3: tc.window_width = 3 KLTWarning("Tracking context's window width must be at least three. \nChanging to %d.\n".format(tc.window_width)) if tc.window_height < 3: tc.window_height = 3 KLTWarning("Tracking context's window height must be at least three. \nChanging to %d.\n".format(tc.window_height)) window_hw = tc.window_width/2 window_hh = tc.window_height/2 # Create pointlist, which is a simplified version of a featurelist, # for speed. Contains only integer locations and values. #pointlist = [0 for i in range(ncols * nrows * 3)] # Create temporary images, etc. if mode == selectionMode.REPLACING_SOME and tc.sequentialMode and tc.pyramid_last != None: floatimg = tc.pyramid_last.img[0] gradx = tc.pyramid_last_gradx.img[0] grady = tc.pyramid_last_grady.img[0] assert gradx != None assert grady != None else: floatimages_created = True floatimg = Image.new("F", img.size) gradx = Image.new("F", img.size) grady = Image.new("F", img.size) if tc.smoothBeforeSelecting: #_KLT_FloatImage tmpimg; #tmpimg = Image.new("F", img.size) tmpimg = np.array(img.convert("F")) floatimg = KLTComputeSmoothedImage(tmpimg, KLTComputeSmoothSigma(tc)) #_KLTFreeFloatImage(tmpimg) else: floatimg = np.array(img.convert("F")) # Compute gradient of image in x and y direction gradx, grady = KLTComputeGradients(floatimg, tc.grad_sigma) # Write internal images if tc.writeInternalImages: floatimg.save("kltimg_sgfrlf.pgm") gradx.save("kltimg_sgfrlf_gx.pgm") grady.save("kltimg_sgfrlf_gy.pgm") # Compute trackability of each image pixel as the minimum # of the two eigenvalues of the Z matrix #register float gx, gy; #register float gxx, gxy, gyy; #register int xx, yy; #register int *ptr; #float val; #unsigned int limit = 1; borderx = tc.borderx; # Must not touch cols bordery = tc.bordery; # lost by convolution #int x, y; #int i; if borderx < window_hw: borderx = window_hw if bordery < window_hh: bordery = window_hh # Find largest value of an int #for (i = 0 ; i < sizeof(int) ; i++) limit *= 256; #limit = limit/2 - 1; #gradxArr = np.array(gradx) #gradyArr = np.array(grady) pointlistx,pointlisty,pointlistval=goodFeaturesUtils.ScanImageForGoodFeatures(gradx,\ grady, borderx, bordery, window_hw, window_hh, tc.nSkippedPixels) # Sort the features pointlist = list(zip(pointlistval, pointlistx, pointlisty)) pointlist.sort() pointlist.reverse() #print(pointlist) # Check tc.mindist if tc.mindist < 0: KLTWarning("(_KLTSelectGoodFeatures) Tracking context field tc.mindist is negative ({0}); setting to zero".format(tc.mindist)) tc.mindist = 0; # Enforce minimum distance between features _enforceMinimumDistance(pointlist, \ featurelist, \ ncols, nrows, \ tc.mindist, \ tc.min_eigenvalue, \ overwriteAllFeatures) # Free memory # free(pointlist); # if (floatimages_created) { # _KLTFreeFloatImage(floatimg); # _KLTFreeFloatImage(gradx); # _KLTFreeFloatImage(grady); # } return featurelist #********************************************************************* #* KLTSelectGoodFeatures #* #* Main routine, visible to the outside. Finds the good features in #* an image. #* #* INPUTS #* tc: Contains parameters used in computation (size of image, #* size of window, min distance b/w features, sigma to compute #* image gradients, # of features desired). #* img: Pointer to the data of an image (probably unsigned chars). #* #* OUTPUTS #* features: List of features. The member nFeatures is computed. #* def KLTSelectGoodFeatures(tc, img, nFeatures): ncols, nrows = img.size #int ncols, int nrows, if KLT_verbose >= 1: print("(KLT) Selecting the {0} best features from a {1} by {2} image... ".format(nFeatures, ncols, nrows)) fl = _KLTSelectGoodFeatures(tc, img, nFeatures, selectionMode.SELECTING_ALL) if KLT_verbose >= 1: print("\n\t{0} features found.\n".format(KLTCountRemainingFeatures(fl))) if tc.writeInternalImages: print("\tWrote images to 'kltimg_sgfrlf*.pgm'.\n") return fl
StarcoderdataPython
74296
<filename>src/yaabook/action_controller_search.py import logging import npyscreen class ActionControllerSearch(npyscreen.ActionControllerSimple): def create(self): self.add_action('^/.*', self.set_search, True) def set_search(self, command_line, widget_proxy, live): logging.debug('searching for %s' % command_line[1:]) self.parent.value.set_filter(command_line[1:]) self.parent.wMain.values = self.parent.parentApp.myDatabase.search(command_line[1:]) self.parent.wMain.display()
StarcoderdataPython
3260
# -*- coding: utf-8 -*- import pickle import numpy as np from rdkit import Chem from rdkit.Chem import AllChem,DataStructs def get_classes(path): f = open(path, 'rb') dict_ = pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes = [(x,y) for x,y in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as the above function but takes smiles separately and returns pfp and rfp separately rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except Exception as e: print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print("Cannot build reactant fp due to {}".format(e)) return rfp = fp try: mol = Chem.MolFromSmiles(psmi) except Exception as e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print("Cannot build product fp due to {}".format(e)) return pfp = fp rxn_fp = pfp - rfp final_fp = np.concatenate((pfp, rxn_fp)) return final_fp
StarcoderdataPython
3264083
import redis from prometheus_client import Counter, Histogram from dotenv import load_dotenv from pipeline import ProcessorSettings, Processor, Command, CommandActions, Definition from apihub.utils import Result, Status, RedisSettings, DEFINITION from apihub import __worker__, __version__ load_dotenv() class ResultWriter(Processor): """ResultWriter collects results from API service worker, and store these results in Redis """ api_counter = Counter( "api_requests_total", "API requests", ["api", "user", "status"], ) request_duration = Histogram( "api_process_time_seconds", "Processing time (seconds)", labelnames=["api"], ) def __init__(self) -> None: settings = ProcessorSettings( name=__worker__ + " ResultWriter", version=__version__, description="write results to redis", debug=True, monitoring=True, ) super().__init__(settings, input_class=dict, output_class=None) def setup(self) -> None: settings = RedisSettings() self.redis = redis.Redis.from_url(settings.redis) def process_command(self, command: Command) -> None: self.logger.info("Processing COMMAND") if command.action == CommandActions.Define: definition = Definition.parse_obj(command.content) self.logger.info(definition) self.redis.hset(DEFINITION, definition.source.topic, definition.json()) self.logger.info( f"{definition.source.topic} definition:\n{definition.json()}" ) def process(self, message_content, message_id): self.logger.info("Processing MESSAGE") result = Result.parse_obj(message_content) if result.status == Status.PROCESSED: result.result = { k: message_content.get(k) for k in self.message.logs[-1].updated } self.api_counter.labels(api=result.api, user=result.user, status=result.status) if self.redis.get(message_id) is not None: self.logger.warning("Found result with key %s, overwriting...", message_id) self.redis.set(message_id, result.json(), ex=86400) return None def main(): writer = ResultWriter() writer.parse_args() writer.start() if __name__ == "__main__": main()
StarcoderdataPython
3274365
<gh_stars>1-10 # Crie um programa onde 4 jogadores joguem um dado e tenham resultados aleatórios. Guarde esses resultados em um dicionário em Python. # No final, coloque esse dicionário em ordem, sabendo que o vencedor tirou o maior número no dado. from random import randint from time import sleep from operator import itemgetter cores = {'limpa':'\033[m', 'bverde':'\033[1;32m', 'bvermelho': '\033[1;31m', 'pretoebranco':'\033[7:30m'} print('-=-'*10) print(cores['pretoebranco']+'_____INICIO_____'+cores['limpa']) print('-=-'*10) jogo = { 'jogador1':randint(1,6), 'jogador2':randint(1,6), 'jogador3':randint(1,6), 'jogador4':randint(1,6)} print(f'{"VALORES SORTEADOS":=^40}') ranking = list() for k, v in jogo.items(): print(f'O {k} tirou {v} no dado. ') sleep(1) print(f'{"RANKING DOS JOGADORES":=^40}') ranking = sorted(jogo.items(), key=itemgetter(1), reverse=True ) for i, v in enumerate(ranking): print(f'O {i+1}º lugar {v[0]} com {v[1]}.') print('') print('-=-'*10) print(cores['pretoebranco']+'______FIM_______'+cores['limpa']) print(cores['pretoebranco']+'_Code by Rafael_'+cores['limpa']) print('-=-'*10)
StarcoderdataPython
138187
# Implemente uma aplicação que utilize uma pilha para conversão de expressões da notação tradicional (infixa), para a notação polonesa reversa (pós-fixada).
StarcoderdataPython
108132
res=0 for i in range(1,1001): res += i**i res=str(res) #res='nursyahjaya' print(res[len(res)-10:])
StarcoderdataPython
3324524
import numpy as np def standardize(x_test, x_train): """ standardizes the train and test data matrices input: x_test: matrix which contains test data x_train: matrix which contains train data return: standardized matrices x_test, x_train """ for i in range(x_test.shape[1]): x_test[:, i], x_train[:, i] = standardize_col(x_test[:, i], x_train[:, i]) return x_test, x_train def standardize_col(x1, x2): """ standardizes arrays of train and test data after having set -999 values to 0 input: x_1: column of (test) data matrix x_2: column of (train) data matrix return: standardized columns x_1,x_2 """ index_x1 = np.where(x1 == -999) index_x2 = np.where(x2 == -999) x1_clean = np.delete(x1, index_x1) x2_clean = np.delete(x2, index_x2) x_clean = np.append(x1_clean, x2_clean) x1 = x1 - np.mean(x_clean, axis =0) x2 = x2 - np.mean(x_clean, axis =0) x1[index_x1] = np.mean(x_clean, axis =0) x2[index_x2] = np.mean(x_clean, axis =0) # where -999 #x1[index_x1] = 0 #x2[index_x2] = 0 # where -999 std = np.std(np.append(x1, x2), ddof=1) x1 = x1/std x2 = x2/std return x1, x2 def remove_outliers(x_train, ys_train): """ discards data points containing outliers, i.e. values being far away from the mean input: x_train: matrix which contains train data ys_train: array which contains labels return: train and label data without outliers """ index = [] threshold = 8.5 for i in range(x_train.shape[0]): if np.amax(np.abs(x_train[i, :])) > threshold: index.append(i) x_train = np.delete(x_train, index, 0) ys_train = np.delete(ys_train, index, 0) return x_train, ys_train
StarcoderdataPython
1658252
from libfuturize.fixes.fix_raise import FixRaise
StarcoderdataPython
191850
<filename>solver.py def exact_cover(X, Y): X = {j: set() for j in X} for i, row in Y.items(): for j in row: X[j].add(i) return X, Y def select(X, Y, r): cols = [] for j in Y[r]: for i in X[j]: for k in Y[i]: if k != j: X[k].remove(i) cols.append(X.pop(j)) return cols def deselect(X, Y, r, cols): for j in reversed(Y[r]): X[j] = cols.pop() for i in X[j]: for k in Y[i]: if k != j: X[k].add(i) def solve(X, Y, solution): if not X: yield list(solution) else: c = min(X, key=lambda c: len(X[c])) for r in list(X[c]): solution.append(r) cols = select(X, Y, r) for s in solve(X, Y, solution): yield s deselect(X, Y, r, cols) solution.pop()
StarcoderdataPython
169972
<reponame>aliabd/cos-cvae from __future__ import print_function import os import random import numpy as np import torch import argparse from torch.utils.data import Dataset, DataLoader import itertools from tqdm import tqdm import pickle import json import sys sys.path.insert(0,'./data') from build_vocab_coco import Vocabulary sys.path.insert(0,'./') from modules.data_loader import COCO_Dataset from modules.textmodules import TextDecoder, GaussPrior from modules.utils import masked_mean torch.multiprocessing.set_sharing_strategy('file_system') def loglikehihoods_to_str( loglikelihood_seq, length ): curr_pred = [vocab_wordlist[int(p)] for p in loglikelihood_seq][0:length] curr_pred_str = [] for curr_word in curr_pred: if (curr_word == "."): curr_pred_str += [curr_word] break; elif curr_word == "<end>": break; elif (curr_word != "<start>") and (curr_word != "<end>") and (curr_word != "<pad>") and (curr_word != "<ukn>"): curr_pred_str += [curr_word] #+ ' '; return curr_pred_str def load_dataset(data_path,vocab_path,coco_class,maxlength): image_data_path = str(data_path)+'/coco_val_2014_adaptive_withclasses.h5' text_data_path = str(data_path)+'/annotations/captions_val2014.json' coco_test_idxs_files = [str(data_path)+'/coco_test_mRNN.txt'] return COCO_Dataset(text_data_path,image_data_path,vocab_path,coco_class,coco_test_idxs_files,True,maxlength) if __name__== "__main__": parser = argparse.ArgumentParser() parser.add_argument('--config', default='params', help='Experiment settings.') args = parser.parse_args() config_setting = args.config config = json.loads(open('./params.json', 'r').read()) config = config[config_setting] data_path = config['pathToData'] vocab_path = config['vocab_path'] coco_class = config['coco_class'] batch_size = 1000#int(config['batch_size']) maxlength = int(config['maxlength']) latent_dim_tx = int(config['latent_dim_tx']) meanimfeats_size = int(config['meanimfeats_size']) word_dim = int(config['word_dim']) mask_hidden_size = int(config['mask_hidden_size']) max_iterations = int(config['max_iterations']) vocab_class = pickle.load( open( str(vocab_path),'rb')) vocab = vocab_class.idx2word vocab_w2i = vocab_class.word2idx vocab_word2vec = [] vocab_wordlist = []; for w_id,word in vocab.items(): vocab_wordlist.append(word) vocab_size = len(vocab_wordlist) device = torch.device("cuda:0") coco_dataset = load_dataset(data_path,vocab_path,coco_class,maxlength) imkeyfile = str(data_path)+'/coco_test.txt' imkeyfile_ = open(imkeyfile,'r').read().split('\n')[:1000] imkeylist = [] for images in imkeyfile_: imagekey = os.path.splitext(os.path.basename(images))[0] imagekey = int(imagekey.split('_')[-1]) imkeylist.append(imagekey) coco_dataloader = torch.utils.data.DataLoader(coco_dataset, batch_size=batch_size, shuffle=False, num_workers=4, drop_last=True) texDec= TextDecoder(batch_size, 2*latent_dim_tx, vocab_size).cuda() g_prior = GaussPrior(meanimfeats_size=meanimfeats_size, vocab_size=vocab_size, sent_emd_size=word_dim, sent_enc_size=latent_dim_tx, max_length=maxlength).cuda() test_samples = 100 beam_width = 2 try: model = torch.load('./ckpts/model_checkpoint_release.pt') g_prior_st = model['g_prior_sd'] g_prior.load_my_state_dict(g_prior_st) g_prior = g_prior.cuda() txtDecoder_st = model['txtDecoder_sd'] texDec.load_my_state_dict(txtDecoder_st) texDec = texDec.cuda() file_load_success = True except Exception: print('ckpt not found') sys.exit(0) results = [] for i,data in tqdm(enumerate(coco_dataloader)): with torch.no_grad(): caps = data[0] bottom_up_features = data[3] seq_lengths = data[4] im_idx = data[-1].cpu().numpy() seq = torch.reshape(caps[0],(batch_size,5,maxlength)) seq_in = None seq_lengths = maxlength mask_dec_in = bottom_up_features.float() mask_dec_in = mask_dec_in[:,:,:2048] obj_enc_mask = torch.sum(torch.abs(mask_dec_in), dim=-1) > 0 meanimfeats = masked_mean( mask_dec_in, obj_enc_mask.unsqueeze(-1) ,dim=1) all_preds_inf_latent = [] for t_idx in range(test_samples): preds, _ = texDec(seq_in,None, None,mask_dec_in.cuda(),seq_lengths, prior_model=g_prior, train = False) for b in range(beam_width): all_preds_inf_latent.append(preds[:,:,b]) all_preds_inf_latent = np.array(all_preds_inf_latent) all_preds_inf_latent = np.transpose(all_preds_inf_latent,(1,0,2)) count =0 for j in range(batch_size): if im_idx[j] in imkeylist: true_strs = [] for true_caption in seq[j,:,:]: true_str = loglikehihoods_to_str(true_caption.cpu().numpy(), len(true_caption)) true_strs.append(true_str) all_test_samples = [] for test_sample_idx in range(beam_width*test_samples): curr_pred_str = loglikehihoods_to_str(all_preds_inf_latent[j,test_sample_idx], maxlength-1) curr_pred_str = ' '.join(curr_pred_str) temp = {'file_name':'img'+str(int(im_idx[j])) , 'file_path': './val2014/', 'gen_beam_search_10': all_test_samples,'sentences':true_strs, 'id': int(im_idx[j]) , 'height': 224, 'width': 224 , 'url': 'www.coco.com', 'date_captured': '10.10.2013', 'license': 'apache'} results.append( temp ) np.save('./consensus_hypothesis.npy',results)
StarcoderdataPython
1720025
<filename>pydelta/mutators_boolean.py from . import options from .semantics import * NAME = 'boolean' MUTATORS = ['de-morgan', 'double-negations', 'eliminate-false-eq', 'eliminate-implications', 'negate-quant'] def is_quantifier(node): return has_name(node) and get_name(node) in ['exists', 'forall'] class DeMorgan: """Uses de Morgans rules to push negations inside.""" def filter(self, node): return is_not(node) and has_name(node[1]) def mutations(self, node): if get_name(node[1]) == 'and': res = [['not', t] for t in node[1][1:]] return [['or', *res]] if get_name(node[1]) == 'or': res = [['not', t] for t in node[1][1:]] return [['and', *res]] return [] def __str__(self): return 'push negation inside' class DoubleNegation: """Elimination double negations.""" def filter(self, node): return is_not(node) and is_not(node[1]) def mutations(self, node): return [node[1][1]] def __str__(self): return 'eliminate double negation' class EliminateFalseEquality: """Replaces an equality with :code:`false` by a negation.""" def filter(self, node): return not is_leaf(node) and len(node) == 3 and has_name(node) and get_name(node) == '=' and node[1] == 'false' def mutations(self, node): return [['not', node[2]]] def __str__(self): return 'replace equality with false by negation' class EliminateImplications: """Replaces implications by disjunctions.""" def filter(self, node): return has_name(node) and get_name(node) == '=>' and len(node) == 3 def mutations(self, node): return [['or', ['not', node[1]], node[2]]] def __str__(self): return 'eliminate implication' class NegatedQuantifiers: """Pushes negation inside quantifiers.""" def filter(self, node): return is_not(node) and is_quantifier(node[1]) def mutations(self, node): if get_name(node[1]) == 'exists': return [['forall', node[1][1], ['not', node[1][2]]]] if get_name(node[1]) == 'forall': return [['exists', node[1][1], ['not', node[1][2]]]] return [] def __str__(self): return 'push negation inside of quantifier' class XORRemoveConstants: """Eliminates constant children from :code:`xor`.""" def filter(self, node): return has_name(node) and get_name(node) == 'xor' def mutations(self, node): res = [] if 'false' in node: res.append([c for c in node if c != 'false']) if 'true' in node: res.append([c for c in node if c != 'true']) res.append(['not', [c for c in node if c != 'true']]) return res def __str__(self): return 'remove constants from xor' class XOREliminateBinary: """Eliminates binary :code:`xor` by :code:`distinct`.""" def filter(self, node): return has_name(node) and get_name(node) == 'xor' and len(node) == 3 def mutations(self, node): return [['distinct', node[1], node[2]]] def __str__(self): return 'eliminate binary xor' def collect_mutator_options(argparser): options.add_mutator_argument(argparser, NAME, True, 'boolean mutators') options.add_mutator_argument(argparser, 'de-morgan', True, 'apply de Morgan to push negations inside') options.add_mutator_argument(argparser, 'double-negations', True, 'eliminate double negations') options.add_mutator_argument(argparser, 'eliminate-binary-xor', True, 'eliminate binary xor') options.add_mutator_argument(argparser, 'eliminate-false-eq', True, 'eliminate equalities with false') options.add_mutator_argument(argparser, 'eliminate-implications', True, 'eliminate implications') options.add_mutator_argument(argparser, 'negated-quant', True, 'push negations inside quantifiers') options.add_mutator_argument(argparser, 'remove-xor-constants', True, 'remove constants from xor') def collect_mutators(args): res = [] if args.mutator_boolean: if args.mutator_de_morgan: res.append(DeMorgan()) if args.mutator_double_negations: res.append(DoubleNegation()) if args.mutator_eliminate_binary_xor: res.append(XOREliminateBinary()) if args.mutator_eliminate_false_eq: res.append(EliminateFalseEquality()) if args.mutator_eliminate_implications: res.append(EliminateImplications()) if args.mutator_negated_quant: res.append(NegatedQuantifiers()) if args.mutator_remove_xor_constants: res.append(XORRemoveConstants()) return res
StarcoderdataPython
161652
<reponame>shuigedeng/taotao-cloud-paren<gh_stars>10-100 #!/usr/bin/env python # -*- coding:utf-8 -*- import os import json import time import hashlib import requests from src import plugins from lib.serialize import Json from lib.log import Logger from config import settings from concurrent.futures import ThreadPoolExecutor class AutoBase(object): def __init__(self): self.asset_api = settings.ASSET_API self.key = settings.KEY self.key_name = settings.AUTH_KEY_NAME def auth_key(self): """ 接口认证 :return: """ ha = hashlib.md5(self.key.encode('utf-8')) # 加盐 time_span = time.time() ha.update(bytes("%s|%f" % (self.key, time_span), encoding='utf-8')) encryption = ha.hexdigest() result = "%s|%f" % (encryption, time_span) # {auth-key: 173b81be05cd997eeac31e2fa99eff1c|1492395689.3360105 return {self.key_name: result} def get_asset(self): """ get方式向获取未采集的资产 :return: {"data": [{"hostname": "c1.com"}, {"hostname": "c2.com"}], "error": null, "message": null, "status": true} """ try: headers = {} headers.update(self.auth_key()) response = requests.get( url=self.asset_api, headers=headers ) except Exception as e: response = e return response.json() def post_asset(self, msg, callback=None): """ post方式向接口提交资产信息 :param msg: :param callback: :return: """ status = True try: headers = {} headers.update(self.auth_key()) response = requests.post( url=self.asset_api, headers=headers, json=msg ) except Exception as e: response = e status = False if callback: callback(status, response) def process(self): """ 派生类需要继承此方法,用于处理请求的入口 :return: """ raise NotImplementedError('you must implement process method') def callback(self, status, response): """ 提交资产后的回调函数 :param status: 是否请求成功 :param response: 请求成功,则是响应内容对象;请求错误,则是异常对象 :return: """ if not status: Logger().log(str(response), False) return ret = json.loads(response.text) if ret['code'] == 1000: Logger().log(ret['message'], True) else: Logger().log(ret['message'], False) class AutoAgent(AutoBase): def __init__(self): self.cert_file_path = settings.CERT_FILE_PATH super(AutoAgent, self).__init__() def load_local_cert(self): """ 获取本地以为标识 :return: """ if not os.path.exists(self.cert_file_path): return None with open(self.cert_file_path, mode='r') as f: data = f.read() if not data: return None cert = data.strip() return cert def write_local_cert(self, cert): """ 写入本地以为标识 :param cert: :return: """ if not os.path.exists(self.cert_file_path): os.makedirs(os.path.basename(self.cert_file_path)) with open(settings.CERT_FILE_PATH, mode='w') as f: f.write(cert) def process(self): """ 获取当前资产信息 1. 在资产中获取主机名 cert_new 2. 在本地cert文件中获取主机名 cert_old 如果cert文件中为空,表示是新资产 - 则将 cert_new 写入该文件中,发送数据到服务器(新资产) 如果两个名称不相等 - 如果 db=new 则,表示应该主动修改,new为唯一ID - 如果 db=old 则,表示 :return: """ server_info = plugins.get_server_info() if not server_info.status: return local_cert = self.load_local_cert() if local_cert: if local_cert == server_info.data['hostname']: pass else: server_info.data['hostname'] = local_cert else: self.write_local_cert(server_info.data['hostname']) server_json = Json.dumps(server_info.data) self.post_asset(server_json, self.callback) class AutoSSH(AutoBase): def process(self): """ 根据主机名获取资产信息,将其发送到API :return: """ task = self.get_asset() if not task['status']: Logger().log(task['message'], False) pool = ThreadPoolExecutor(10) for item in task['data']: hostname = item['hostname'] pool.submit(self.run, hostname) pool.shutdown(wait=True) def run(self, hostname): server_info = plugins.get_server_info(hostname) server_json = Json.dumps(server_info.data) self.post_asset(server_json, self.callback) class AutoSalt(AutoBase): def process(self): """ 根据主机名获取资产信息,将其发送到API :return: { "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], "error": null, "message": null, "status": true } """ task = self.get_asset() if not task['status']: Logger().log(task['message'], False) # 创建线程池:最大可用线程10 pool = ThreadPoolExecutor(10) # "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], for item in task['data']: # c1.com c2.com hostname = item['hostname'] pool.submit(self.run, hostname) # run(c1.com) 1 # run(c2.com) 2 pool.shutdown(wait=True) def run(self, hostname): # 获取指定主机名的资产信息 # {'status': True, 'message': None, 'error': None, 'data': {'disk': <lib.response.BaseResponse object at 0x00000000014686A0>, 'main_board': <lib.response.BaseResponse object at 0x00000000014689B0>, 'nic': <lib.response.BaseResponse object at 0x0000000001478278>, 'memory': <lib.response.BaseResponse object at 0x0000000001468F98>, 'os_platform': 'linux', 'os_version': 'CentOS release 6.6 (Final)', 'hostname': 'c1.com', 'cpu': <lib.response.BaseResponse object at 0x0000000001468E10>}} server_info = plugins.get_server_info(hostname) # 序列化成字符串 server_json = Json.dumps(server_info.data) # 发送到API self.post_asset(server_json, self.callback)
StarcoderdataPython
3294597
<reponame>Simonll/tools #!/opt/anaconda3/bin/python3.4 import sys import re import numpy as np if (len(sys.argv) != 2): print("exec <phylip>") sys.exit(0) file = sys.argv[1] def compute_dinuc(puz): nuc = {"A" : 0, "C":1, "G":2, "T":3} f = open(puz,"r") lines = f.readlines() f.close() lines_clean = [] Nsite_nuc = 0 for line,index in zip(lines,[i for i in range(0,len(lines))]): if(index != 0): lines_clean.append(line.split(" ")[-1]) else: Nsite_nuc = line.split(" ")[-1].strip() Nsp = 0 dinuc = np.zeros((4,4)) dinuc12 = np.zeros((4,4)) dinuc23 = np.zeros((4,4)) dinuc31 = np.zeros((4,4)) for line_clean in lines_clean: line_clean = line_clean.strip() Nsp += 1 for i,j in zip([line_clean[i] for i in range(0,len(line_clean),3)],[line_clean[i] for i in range(1,len(line_clean),3)]): if (i.upper() in nuc and j.upper() in nuc ): dinuc12[nuc[i.upper()],nuc[j.upper()]]+=1 for i,j in zip([line_clean[i] for i in range(1,len(line_clean),3)],[line_clean[i] for i in range(2,len(line_clean),3)]): if (i.upper() in nuc and j.upper() in nuc ): dinuc23[nuc[i.upper()],nuc[j.upper()]]+=1 for i,j in zip([line_clean[i] for i in range(2,len(line_clean),3)],[line_clean[i] for i in range(3,len(line_clean),3)]): if (i.upper() in nuc and j.upper() in nuc ): dinuc31[nuc[i.upper()],nuc[j.upper()]]+=1 dinuc = dinuc12 + dinuc23 + dinuc31 dinuc /= dinuc.sum() dinuc12 /= dinuc12.sum() dinuc23 /= dinuc23.sum() dinuc31 /= dinuc31.sum() return (dinuc,dinuc12,dinuc23,dinuc31,Nsp,Nsite_nuc) def printdinuc(dinuc): nuc = ["A","C","G","T"] for i in range(0,len(nuc)): for j in range(0,len(nuc)): print(nuc[i],nuc[j],round(dinuc[i,j],3)) tup = compute_dinuc(file) print("Ntaxa ", tup[4], " Nsite_nuc ", tup[5]) print("dinuc") printdinuc(tup[0]) print("dinuc12") printdinuc(tup[1]) print("dinuc21") printdinuc(tup[2]) print("dinuc31") printdinuc(tup[3])
StarcoderdataPython
3233498
<gh_stars>0 #!/usr/bin/env python from __future__ import print_function import argparse import os from six.moves import cPickle from six import text_type import tensorflow as tf from model import Model def sample(args, prime): with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f: saved_args = cPickle.load(f) with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f: chars, vocab = cPickle.load(f) # Use most frequent char if no prime is given if prime == '': prime = chars[0] model = Model(saved_args, training=False) with tf.Session() as sess: tf.global_variables_initializer().run() saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(args.save_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) with open('output/output.txt', 'w') as f: f.write(str(model.sample(sess, chars, vocab, args.n, prime, args.sample).encode('utf-8'))) # must be called from python file if __name__ == '__main__': # sample(args) print("Sample Initialized")
StarcoderdataPython
68442
import concurrent.futures import os import os.path as osp import pathlib import shutil import urllib.parse from .connection import download_image from .data import PageContent from .utils import escape_path OUTPUT_PATH = pathlib.Path('data/') OUTPUT_TEXT_FILENAME = 'article.txt' def save_text(post_text: str, output_file: pathlib.Path) -> None: with output_file.open('w') as file: file.write(post_text) def save_image(link: str, output_folder: pathlib.Path) -> None: image_file = download_image(link) url = urllib.parse.urlparse(link) output_file = output_folder / osp.basename(url.path) with output_file.open('wb') as file: shutil.copyfileobj(image_file, file) def export_content( executor: concurrent.futures.Executor, content: PageContent ) -> list[concurrent.futures.Future[None]]: content_folder = (OUTPUT_PATH / escape_path(content.title)).absolute() os.makedirs(content_folder, exist_ok=True) save_text_future = executor.submit( save_text, content.text, content_folder / OUTPUT_TEXT_FILENAME ) save_image_futures = [] for image in content.images: save_image_futures.append(executor.submit(save_image, image, content_folder)) return [save_text_future] + save_image_futures
StarcoderdataPython
1764765
<filename>models/trigger.py<gh_stars>1-10 from plugins.splunk.includes import splunk from core.models import trigger from core import logging, auth, db, helpers class _splunkSearch(trigger._trigger): splunkJob = str() splunkHost = str() splunkPort = int() splunkUsername = str() splunkPassword = str() insecure = bool() searchQuery = str() ca = str() def check(self): password = auth.getPasswordFromENC(self.splunkPassword) secure = not self.insecure s = splunk.splunkClass(self.splunkHost,self.splunkPort,self.splunkUsername,password,secure=secure,ca=self.ca) if not s: if logging.debugEnabled: logging.debug("Unable to authenticate to Splunk instance. actionID={0}".format(self._id),1) return jobID = s.startJob(self.searchQuery) if s.waitJob(jobID): pollResult = s.getJob(jobID) self.result["events"] = pollResult["results"] def setAttribute(self,attr,value,sessionData=None): if attr == "splunkPassword" and not value.startswith("ENC "): if db.fieldACLAccess(sessionData,self.acl,attr,accessType="write"): self.splunkPassword = "<PASSWORD>}".format(auth.getENCFromPassword(value)) return True return False return super(_splunkSearch, self).setAttribute(attr,value,sessionData)
StarcoderdataPython
1604035
<filename>xcalc/interpreter.py from ufl.corealg.traversal import traverse_unique_terminals from ufl.conditional import (LT, GT, LE, GE, EQ, NE, AndCondition, OrCondition, NotCondition) from dolfin import (Function, VectorFunctionSpace, interpolate, Expression, as_vector, Constant, as_matrix) import numpy as np import ufl from itertools import imap, repeat, izip import operator import timeseries import operators from clement import clement_interpolate from utils import * def Eval(expr): ''' This intepreter translates expr into a function object or a number. Expr is defined via a subset of UFL language. Letting f, g be functions in V Eval(op(f, g)) is a function in V with coefs given by (op(coefs(f), coef(g))). ''' return Interpreter.eval(expr) class Interpreter(object): ''' This intepreter translates expr into a function object or a number. Expr is defined via a subset of UFL language. Letting f, g be functions in V Eval(op(f, g)) is a function in V with coefs given by (op(coefs(f), coef(g))). ''' # Expression which when evaluated end up in the same space as the arguments # or require no reshaping of arraysbefore numpy is applied no_reshape_type = { ufl.algebra.Sum: np.add, ufl.algebra.Abs: np.abs, ufl.algebra.Division: np.divide, ufl.algebra.Product: np.multiply, ufl.algebra.Power: np.power, ufl.mathfunctions.Sin: np.sin, ufl.mathfunctions.Cos: np.cos, ufl.mathfunctions.Sqrt: np.sqrt, ufl.mathfunctions.Exp: np.exp, ufl.mathfunctions.Ln: np.log, ufl.mathfunctions.Tan: np.tan, ufl.mathfunctions.Sinh: np.sinh, ufl.mathfunctions.Cosh: np.cosh, ufl.mathfunctions.Tanh: np.tanh, ufl.mathfunctions.Asin: np.arcsin, ufl.mathfunctions.Acos: np.arccos, ufl.mathfunctions.Atan: np.arctan, ufl.mathfunctions.Atan2: np.arctan2, ufl.operators.MinValue: np.minimum, ufl.operators.MaxValue: np.maximum, LT: lambda x, y: np.array(x < y, dtype=float), GT: lambda x, y: np.array(x > y, dtype=float), LE: lambda x, y: np.array(x <= y, dtype=float), GE: lambda x, y: np.array(x >= y, dtype=float), EQ: lambda x, y: np.array(x == y, dtype=float), NE: lambda x, y: np.array(x != y, dtype=float), AndCondition: lambda x, y: np.array(np.logical_and(x, y), dtype=float), OrCondition: lambda x, y: np.array(np.logical_or(x, y), dtype=float), NotCondition: lambda x: np.array(np.logical_not(x), dtype=float), ufl.operators.Conditional: lambda pred, true, false: np.where(pred, true, false) } # Expression which when evaluated end up in general in different space than # the arguments/require manipulations before numpy is applied reshape_type = { ufl.tensoralgebra.Inverse: np.linalg.inv, ufl.tensoralgebra.Transposed: np.transpose, ufl.tensoralgebra.Sym: lambda A: 0.5*(A + A.T), ufl.tensoralgebra.Skew: lambda A: 0.5*(A - A.T), ufl.tensoralgebra.Deviatoric: lambda A: A - np.trace(A)*np.eye(len(A))*(1./len(A)), ufl.tensoralgebra.Cofactor: lambda A: np.linalg.det(A)*(np.linalg.inv(A)).T, ufl.tensoralgebra.Determinant: np.linalg.det, ufl.tensoralgebra.Trace: np.trace, ufl.tensoralgebra.Dot: np.dot, ufl.tensoralgebra.Cross: np.cross, ufl.tensoralgebra.Outer: np.outer, ufl.tensoralgebra.Inner: np.inner, ufl.tensors.ListTensor: lambda *x: np.hstack(x) } # FIXME: ListTensor(foo, indices=None) <= we have no support for indices # Node to be handled by Clement interpolation diff_type = (ufl.differentiation.Grad, ufl.differentiation.Div, ufl.differentiation.Curl, ufl.differentiation.NablaGrad, ufl.differentiation.NablaDiv) # Other's where Eval works terminal_type = (Function, int, float) value_type = (ufl.algebra.ScalarValue, ufl.algebra.IntValue) index_type = (ufl.indexed.Indexed, ) compose_type = (ufl.tensors.ComponentTensor, ) @staticmethod def eval(expr): # Guys with their own logic for collapsing into functions # Okay we combine 2 design patters, LazyNodes do it themselves # series rely on the interpreter if isinstance(expr, operators.LazyNode): return expr.evaluate() # For series we eval each node and make a series of functions # NOTE: intersept here because TempSeries is a terminal type if isinstance(expr, timeseries.TempSeries): return timeseries.TempSeries(zip(map(Interpreter.eval, expr), expr.times)) # Terminals/base cases (also TempSeries) -> identity if isinstance(expr, Interpreter.terminal_type): return expr # To number if isinstance(expr, Interpreter.value_type): return expr.value() # To number if isinstance(expr, Constant): return float(expr) # To number if isinstance(expr, ufl.constantvalue.Zero): return 0 # Recast spatial coordinate as CG1 functions if isinstance(expr, ufl.geometry.SpatialCoordinate): mesh = expr.ufl_domain().ufl_cargo() r = Expression(('x[0]', 'x[1]', 'x[2]')[:mesh.geometry().dim()], degree=1) return interpolate(r, VectorFunctionSpace(mesh, 'CG', 1)) # Okay: now we have expr with arguments. If this expression involves # times series then all the non number arguments should be compatible # time series terminals = filter(lambda t: isinstance(t, Function), traverse_unique_terminals(expr)) # Don't mix function and terminals series = filter(lambda t: isinstance(t, timeseries.TempSeries), terminals) assert len(series) == len(terminals) or len(series) == 0, map(type, terminals) # For series, we apply op to functions and make new series if series: return series_rule(expr) expr_type = type(expr) # Require reshaping and all args are functions if expr_type in Interpreter.reshape_type: return numpy_reshaped(expr, op=Interpreter.reshape_type[expr_type]) # Clement if expr_type in Interpreter.diff_type: # NOTE: Clement is its own thing-it does not use this interpreter # for subexpression evaluation return clement_interpolate(expr) # Define tensor by componenents if isinstance(expr, Interpreter.compose_type): return component_tensor_rule(expr) # A indexed by FixedIndex or Index if isinstance(expr, Interpreter.index_type): return indexed_rule(expr) # No reshaping neeed op = Interpreter.no_reshape_type[expr_type] # Throw if we don't support this args = map(Interpreter.eval, expr.ufl_operands) # Manipulate coefs of arguments to get coefs of the expression coefs = map(coefs_of, args) V_coefs = op(*coefs) # Make that function V = space_of(args) return make_function(V, V_coefs) def numpy_reshaped(expr, op): '''Get the coefs by applying the numpy op to reshaped argument coefficients''' args = map(Interpreter.eval, expr.ufl_operands) # Exception to the rules are some ops with scalar args if isinstance(expr, (ufl.tensoralgebra.Inner, ufl.tensoralgebra.Dot)): if all(arg.ufl_shape == () for arg in args): return Interpreter.eval(args[0]*args[1]) # Construct by numpy with op applied args of expr and reshaping as shape_res return numpy_op_foo(args, op=op, shape_res=expr.ufl_shape) def indexed_rule(expr): '''Function representing f[index] so we end up with scalar''' shape_res = expr.ufl_shape assert isinstance(expr, ufl.indexed.Indexed) f, index = expr.ufl_operands # What to index f = Interpreter.eval(f) # How to index shape = f.ufl_shape indices = tuple(int(index) if isinstance(index, ufl.indexed.FixedIndex) else slice(l) for l, index in zip(shape, index.indices())) # This could be implemented more efficiently (see earilier commits) # However, below is a more ideas which is that op is just a getitem op = lambda A, i=indices: A[i] return numpy_op_foo((f, ), op=op, shape_res=shape_res) def series_rule(expr): '''Eval expression where the terminals are time series''' foos = filter(lambda f: isinstance(f, Function), traverse_unique_terminals(expr)) # Make first sure that the series are compatible in the sense of having same time # interval times = timeseries.common_interval(foos) assert len(times) # Compatibility of spaces common_sub_element([f.function_space() for f in foos]) # The idea now is to propagate the expression by which I mean that # we grow the expr using nodes in the series def unpack(expr): '''expr -> iterable of expr''' return (apply(type(expr), sum(args, ())) for args in expand(expr.ufl_operands)) def expand(operands): iterators = [] for o in operands: if isinstance(o, timeseries.TempSeries): iterators.append(((f, ) for f in o)) # Nonseries terminal elif not o.ufl_operands: iterators.append(((f, ) for f in repeat(o))) # An expression else: iterators.append(((f, ) for f in unpack(o))) return izip(*iterators) nodes = unpack(expr) # A series of new nodes -> series of functions return Interpreter.eval(timeseries.TempSeries(zip(nodes, times))) def component_tensor_rule(expr): '''Tensors whose components are given by computation of some sort.''' f, free_indices = expr.ufl_operands # Want to build vectors or matrices assert len(free_indices) == 1 or len(free_indices) == 2 # Simple rules where the eval node is obtained just by substitution if not isinstance(f, ufl.indexsum.IndexSum): # Vector from 2*Constant((1, 2)) if len(free_indices) == 1: index = free_indices[0] f = tuple(replace(f, index, FixedIndex(i)) for i in range(expr.ufl_shape[0])) return Interpreter.eval(as_vector(f)) # Matrix from 2*Costant(((1, 2), (3, 4))) if len(free_indices) == 2: mat = [] for i in range(expr.ufl_shape[0]): f_i = replace(f, free_indices[0], FixedIndex(i)) row = [] for j in range(expr.ufl_shape[1]): row.append(replace(f_i, free_indices[1], FixedIndex(j))) mat.append(row) return Interpreter.eval(as_matrix(mat)) # The idea now is to to build the expression which represents the sum # needed to compute the component, i.e. explicit transformation of the # IndexSum node. Computing with scalars this way is not very efficient -> # FIXME: drop to numpy? assert isinstance(f, ufl.indexsum.IndexSum) summand, sum_indices = f.ufl_operands assert len(sum_indices) == 1 # FIXME: is this necessary # Be explicit about the sum - have free indices left to be fill # in by that component sum_expr = sum(replace(summand, sum_indices[0], FixedIndex(j)) for j in range(f.dimension())) # Now build the components if len(free_indices) == 1: # Sub for the free_i expr = as_vector(tuple(replace(sum_expr, free_indices[0], FixedIndex(i)) for i in range(f.ufl_index_dimensions[0]))) return Interpreter.eval(expr) mat = [] for i in range(f.ufl_index_dimensions[0]): # Sub i sub_i = replace(sum_expr, free_indices[0], FixedIndex(i)) row = [] for j in range(f.ufl_index_dimensions[1]): # Sub j row.append(replace(sub_i, free_indices[1], FixedIndex(j))) mat.append(row) expr = as_matrix(mat) return Interpreter.eval(expr)
StarcoderdataPython
3302597
<filename>tests/tensorflow2/utils.py<gh_stars>0 # Standard Library # Third Party import tensorflow.compat.v2 as tf from packaging import version def is_tf_2_2(): """ TF 2.0 returns ['accuracy', 'batch', 'size'] as metric collections. where 'batch' is the batch number and size is the batch size. But TF 2.2 returns ['accuracy', 'batch'] in eager mode, reducing the total number of tensor_names emitted by 1. :return: bool """ if version.parse(tf.__version__) >= version.parse("2.2.0"): return True return False def is_tf_2_3(): if version.parse(tf.__version__) == version.parse("2.3.0"): return True return False
StarcoderdataPython
71322
import logging import os import re import sys from enum import Enum import click from Bio import SeqIO from Bio.Seq import Seq from tqdm import tqdm tqdm.pandas() import pandas as pd import numpy as np logger = logging.getLogger(__name__) sys.path.append("..") from utils.clustering_utils import ClusteringUtils class SimilarityComputationMethod(Enum): CDHIT = 0 MSA = 1 PAIRWISE = 2 def clean_sequence_data_from_outliers( record: pd.Series, input_path: str, output_path: str ): """ :param record: pandas row representative of a cluster of species sequences :param input_path: path to the aligned sequences that include outliers :param output_path: path to create in aligned sequences without the outliers (without re-aligning - just removing outliers and then cleainnig the induced alignment from only gap positions) :return: """ if pd.notna(record.relevant_genome_accessions): selected_accessions = record.relevant_genome_accessions.split(";;") input_sequences = list(SeqIO.parse(input_path, format="fasta")) relevant_sequences = [ seq for seq in input_sequences if seq.id in selected_accessions ] # filter out all gap positions with trimal SeqIO.write(relevant_sequences, output_path, format="fasta") cmd = f"trimal -in {output_path} -out {output_path} -noallgaps" res = os.system(cmd) if res != 0: logger.error(f"trimal execution on {output_path} failed and so only-gap positions are apparent in the data") def compute_sequence_similarities_across_species( species_info: pd.DataFrame, seq_data_dir: str, output_path: str, use_sequence_directly: bool = True, ): """ :param species_info: data with the names of viruses corresponding to each viral species and the number of available sequences :param seq_data_dir: directory holding fasta files of collected sequences per species to compute similarity based on :param output_path: path to write the output dataframe to :param use_sequence_directly: indicator weather outliers should be removed based on the sequence data directly or based on their pairwise distances :return: """ relevant_species_info = species_info.loc[ species_info.virus_species_name.isin( species_info.virus_species_name.unique() ) ] if ( relevant_species_info.shape[0] > 0 and relevant_species_info["#sequences"].values[0] > 0 ): logger.info( f"computing sequence similarities across {len(species_info.virus_species_name)} species" ) intermediate_output_path = output_path.replace(".", "_intermediate.") if ( os.path.exists(intermediate_output_path) and relevant_species_info["#sequences"].values[0] > 2 ): relevant_species_info = pd.read_csv(intermediate_output_path) else: if relevant_species_info.shape[0] > 0: logger.info( f"computing sequence similarity value for species {','.join(relevant_species_info.virus_species_name.unique())}" ) relevant_species_info = compute_entries_sequence_similarities( df=relevant_species_info, seq_data_dir=seq_data_dir, output_path=output_path.replace(".", "_intermediate."), ) if ( "relevant_genome_accessions" not in relevant_species_info.columns or "#relevant_sequences" not in relevant_species_info.columns ) or ( relevant_species_info.loc[ relevant_species_info.relevant_genome_accessions.isna() ].shape[0] > 0 ): logger.info( f"computing outlier sequences for species {relevant_species_info.virus_species_name.unique()}" ) relevant_species_info = remove_outliers( df=relevant_species_info, similarities_data_dir=seq_data_dir, output_path=output_path.replace(".", "_intermediate."), use_sequence_directly=use_sequence_directly, ) # create new alignments without the outliers new_seq_data_dir = f"{seq_data_dir}/no_outliers/" os.makedirs(new_seq_data_dir, exist_ok=True) relevant_species_info.loc[relevant_species_info["#sequences"] > 1].apply( lambda record: clean_sequence_data_from_outliers( record=record, input_path=f"{seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', record.virus_species_name)}_aligned.fasta", output_path=f"{new_seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', record.virus_species_name)}_aligned.fasta", ), axis=1, ) sequence_similarity_fields = [ "#sequences", "mean_sequence_similarity", "min_sequence_similarity", "max_sequence_similarity", "med_sequence_similarity", "relevant_genome_accessions", "#relevant_sequences", ] relevant_species_info["#relevant_sequences"] = relevant_species_info[ "relevant_genome_accessions" ].apply(lambda x: x.count(";;") + 1 if pd.notna(x) else np.nan) species_info.set_index("virus_species_name", inplace=True) for field in sequence_similarity_fields: species_info[field] = np.nan species_info[field].fillna( value=relevant_species_info.set_index("virus_species_name")[ field ].to_dict(), inplace=True, ) species_info.reset_index(inplace=True) else: species_info["#sequences"] = 0 species_info["#relevant_sequences"] = 0 species_info.to_csv(output_path, index=False) logger.info(f"wrote associations data clustered by virus species to {output_path}") def compute_entries_sequence_similarities( df: pd.DataFrame, seq_data_dir: str, output_path: str, similarity_computation_method: SimilarityComputationMethod = SimilarityComputationMethod.MSA, ) -> pd.DataFrame: """ :param df: dataframe with association entries :param seq_data_dir: directory with fasta file corresponding ot each species with its corresponding collected sequences :param output_path: path to write the intermediate result to :param similarity_computation_method: indicator of the method that should be employed to compute the similarity values :param mem_limit: RAM in MB that should be allocated to cdhit :return: """ pid = os.getpid() tqdm.pandas(desc="worker #{}".format(pid), position=pid) new_df = df new_df[ [ "mean_sequence_similarity", "min_sequence_similarity", "max_sequence_similarity", "med_sequence_similarity", ] ] = np.nan if new_df.shape[0] > 0: logger.info( f"computing sequence similarities for #species {len(new_df.virus_species_name.values)} that consists of {new_df['#sequences'].values} sequences respectively" ) func = ( ClusteringUtils.get_sequences_similarity_with_pairwise_alignments if similarity_computation_method == SimilarityComputationMethod.PAIRWISE else ( ClusteringUtils.get_sequences_similarity_with_cdhit if similarity_computation_method == SimilarityComputationMethod.CDHIT else ClusteringUtils.get_sequence_similarity_with_multiple_alignment ) ) new_df[ [ "mean_sequence_similarity", "min_sequence_similarity", "max_sequence_similarity", "med_sequence_similarity", ] ] = new_df.progress_apply( lambda x: [1, 1, 1, 1] if x["#sequences"] == 1 else func( sequence_data_path=f"{seq_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', x.virus_species_name)}.fasta", ), axis=1, result_type="expand", ) new_df.to_csv(output_path, index=False) return new_df def remove_outliers( df: pd.DataFrame, similarities_data_dir: str, output_path: str, use_sequence_directly: bool = False, ) -> pd.DataFrame: """ :param df: dataframe with association entries :param similarities_data_dir: directory with similarity dataframes corresponding ot each species with its corresponding collected sequences :param output_path: path to write the intermediate result to :param use_sequence_directly: indicator weather outlier detection should use the sequence data directly or use the pairwise distances etween sequences as features :return: """ pid = os.getpid() tqdm.pandas(desc="worker #{}".format(pid), position=pid) if not os.path.exists(output_path) or ( os.path.exists(output_path) and "relevant_genome_accessions" not in pd.read_csv(output_path).columns ): new_df = df new_df["relevant_genome_accessions"] = np.nan if new_df.shape[0] > 0: logger.info( f"computing sequence outliers for for species {list(new_df.virus_species_name.unique())[0]} that consists of {','.join(list([str(i) for i in new_df['#sequences'].values]))} sequences respectively" ) func = ( ClusteringUtils.get_relevant_accessions_using_sequence_data_directly if use_sequence_directly else ClusteringUtils.get_relevant_accessions_using_pairwise_distances ) input_path_suffix = ( "_aligned.fasta" if use_sequence_directly else "_similarity_values.csv" ) new_df.loc[ new_df["#sequences"] > 1, "relevant_genome_accessions" ] = new_df.loc[ new_df["#sequences"] > 1, "virus_species_name" ].progress_apply( lambda x: func( data_path=f"{similarities_data_dir}/{re.sub('[^0-9a-zA-Z]+', '_', x)}{input_path_suffix}" ) ) new_df["#relevant_sequences"] = new_df["relevant_genome_accessions"].apply( lambda x: x.count(";;") + 1 if pd.notna(x) else np.nan ) new_df.to_csv(output_path, index=False) else: new_df = pd.read_csv(output_path) return new_df @click.command() @click.option( "--species_info_path", type=click.Path(exists=True, file_okay=True, readable=True), help="path to dataframe holding the names of taxa under each viral species", ) @click.option( "--sequence_data_dir", type=click.Path(exists=False, file_okay=True, readable=True), help="directory holding sequence data files per species with their collected sequences", ) @click.option( "--log_path", type=click.Path(exists=False, file_okay=True, readable=True), help="path holding the logging of the script", ) @click.option( "--df_output_path", type=click.Path(exists=False, file_okay=True, readable=True), help="path holding the output dataframe to write", ) @click.option( "--use_sequence_directly", type=click.BOOL, help="indicator weather outliers should be removed based on sequence data directly or based on pairwise distances", required=False, default=False, ) def compute_seq_similarities( species_info_path: click.Path, sequence_data_dir: click.Path, log_path: click.Path, df_output_path: click.Path, use_sequence_directly: bool, ): # initialize the logger logging.basicConfig( level=logging.INFO, format="%(asctime)s module: %(module)s function: %(funcName)s line: %(lineno)d %(message)s", handlers=[ logging.StreamHandler(sys.stdout), logging.FileHandler(str(log_path)), ], force=True, # run over root logger settings to enable simultaneous writing to both stdout and file handler ) # process input data species_info = pd.read_csv(species_info_path) # compute sequence similarities compute_sequence_similarities_across_species( species_info=species_info, seq_data_dir=str(sequence_data_dir), output_path=str(df_output_path), use_sequence_directly=use_sequence_directly, ) if __name__ == "__main__": compute_seq_similarities()
StarcoderdataPython
3269352
<filename>products/migrations/0004_auto_20151124_1628.py<gh_stars>0 # -*- coding: utf-8 -*- # Generated by Django 1.9c1 on 2015-11-24 16:28 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0003_auto_20151124_1625'), ] operations = [ migrations.RemoveField( model_name='tooth', name='diameter', ), migrations.AddField( model_name='indicator', name='diameter', field=models.FloatField(default=0.0, verbose_name='Durchmesser'), ), migrations.AddField( model_name='indicator', name='hss', field=models.BooleanField(default=False, verbose_name='HSS?'), ), migrations.AlterField( model_name='indicator', name='teeth', field=models.ManyToManyField(blank=True, to='products.Tooth'), ), ]
StarcoderdataPython
55328
from asgard.app import app from asgard.handlers import http app.run()
StarcoderdataPython
1689080
""" Blink1 class/lib for blink1-tool and device see tests.py for examples """ import os import time class Blink1(): """Blink1""" blink1_tool_file_path = 'lib/blink1-tool' quite_mode = True command_output = '' def blink(self, number_of_blinks, rgb_color=None, hex_color=None): if rgb_color or hex_color: if hex_color: rgb_color = self._hex_to_rgb(str(hex_color)) rgb_string = self._construct_rgb_string(rgb_color) self._call_blink1_tool('--rgb', rgb_string, '--blink', str(number_of_blinks)) else: self._call_blink1_tool('--blink', str(number_of_blinks)) def random(self, number_of_blinks): self._call_blink1_tool('--random', str(number_of_blinks)) def rgb(self, rgb_color=None, hex_color=None): ''' rgb_color should be a tuple or list of strings or ints i.e. ('0xff', '0', '00') or [255,'0', '255] hex_color should be a string i.e. '#FF0000' ''' if hex_color: rgb_color = self._hex_to_rgb(str(hex_color)) if rgb_color: rgb_string = self._construct_rgb_string(rgb_color) self._call_blink1_tool('--rgb', rgb_string) def on(self): self._call_blink1_tool('--on') def off(self): self._call_blink1_tool('--off') def red(self): self._call_blink1_tool('--red') def green(self): self._call_blink1_tool('--green') def blue(self): self._call_blink1_tool('--blue') def orange(self): self.rgb((255, 150, 0)) def delay(self, delay=5): time.sleep(delay) def _call_blink1_tool(self, *args): if all(isinstance(item, basestring) for item in args): arg_list = list(args) arg_list.insert(0, self.blink1_tool_file_path) if self.quite_mode: arg_list.append('-q') arg_string = '' for arg in arg_list: arg_string += arg + ' ' child = os.popen(arg_string) self.command_output = child.read() err = child.close() if err: raise RuntimeError('%s failed with exit code %d' % (arg_string, err)) else: raise TypeError('arguments must be strings') def _hex_to_rgb(self, value): value = value.lstrip('#') lv = len(value) return tuple(int(value[i:i + lv / 3], 16) for i in range(0, lv, lv / 3)) def _construct_rgb_string(self, rgb_color): if len(rgb_color) != 3: raise Exception('rgb_color is a tuple of three ints or strings') return '%s,%s,%s' % (rgb_color[0], rgb_color[1], rgb_color[2]) def _color_scale(self, value, domain, color_range): '''like d3.scale.linear().domain([30,90]).range(['red', 'white', 'green']) domain: tuple or list of length 2 color_range: any list of discrete vales to be mapped to linearly does not interpolate colors, just bins like a histogram ''' if value >= domain[1]: return color_range[-1] if value <= domain[0]: return color_range[0] bin_width = (domain[1]-domain[0])/len(color_range) for i in range(len(color_range)): if value < domain[0] + (i+1) * bin_width: return color_range[i]
StarcoderdataPython
1693730
<reponame>RDC4Smart-Mobility/UniSim # -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import, unicode_literals from unisim.RESTserver import app, Flask_DB from datetime import datetime import sys if __name__ == "__main__": Flask_DB.dbpath = sys.argv[1] Flask_DB.connect() app.run(host='0.0.0.0') # host='0.0.0.0': allow connecting from external client
StarcoderdataPython
3364965
<reponame>sammylev/Capstone from flask_script import Manager from flask_migrate import Migrate, MigrateCommand from app import APP from models import db migrate = Migrate(APP, db) manager = Manager(APP) manager.add_command('db', MigrateCommand) if __name__ == '__main__': manager.run()
StarcoderdataPython
136425
<filename>osrefl/theory/BAGISANS.py from numpy import * import numpy as np class bornWavefunction: def __init__(self, kz, SLDArray, sigmax = 1e6, sigmaz=1e3): if not isinstance(kz, ndarray): kz = array([kz], dtype=complex) #kz = array([kz]).flatten().astype(complex) self.kz = kz self.sigmax = sigmax self.sigmaz = sigmaz kzlen = kz.shape sldlen = len(SLDArray) self.SLDArray = SLDArray self.r = zeros(kzlen, dtype=complex) self.r = self.calc_r(self.kz) def calc_r(self, kz): qz = kz*2.0 sld = self.SLDArray r = zeros_like(kz) zs = cumsum(sld, axis=0) for i in range(len(sld)-1): for j in range(len(sld)-1): zi = zs[i][1] # interface i location zj = zs[j][1] dsldi = sld[i+1][0] - sld[i][0] dsldj = sld[j+1][0] - sld[j][0] r += 16*pi**2/qz**4*dsldi*dsldj*cos(qz*(zj-zi))*exp(-(zj-zi)**2/(self.sigmaz**2)) return sqrt(r) def calc_r_refract(self, kz): qz = kz*2.0 sld = self.SLDArray rho = sld[:,0] #qz_l_sq = 4.0*((kz**2) - 4*pi*rho[:,None]) qz_l = sqrt(4.0*((kz**2) - 4*pi*rho[:,None])) #print "qz_l_sq shape:", qz_l_sq.shape r = zeros_like(kz) zs = cumsum(sld, axis=0) #qzs = sqrt(qz_l_sq) * (sld[:,1,None]) qzs = qz_l * (sld[:,1,None]) qzs = cumsum(qzs, axis=0) for i in range(len(sld)-1): for j in range(len(sld)-1): qzi = qzs[i] qzj = qzs[j] zi = zs[i][1] # interface i location zj = zs[j][1] #dsldi = sld[i+1][0]/qz_l_sq[i+1] - sld[i][0]/qz_l_sq[i] #dsldj = sld[j+1][0]/qz_l_sq[j+1] - sld[j][0]/qz_l_sq[j] dsldi = sld[i+1][0]/qz_l[i+1] - sld[i][0]/qz_l[i] dsldj = sld[j+1][0]/qz_l[j+1] - sld[j][0]/qz_l[j] #print dsldi.shape, qzj.shape, zj.shape #print i,j,qzi-qzj, qz*(zi-zj) r += 16*pi**2/qz**2*dsldi*dsldj*cos(qzj-qzi)*exp(-(zj-zi)**2/(self.sigmaz**2)) return sqrt(r)
StarcoderdataPython
1686430
""" Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. The Universal Permissive License (UPL), Version 1.0 """ import os from shutil import copy2 import unittest from java.io import File from java.lang import String from java.lang import System from oracle.weblogic.deploy.encrypt import EncryptionUtils from wlsdeploy.util.cla_utils import CommandLineArgUtil import encrypt import wlsdeploy.util.variables as variables_helper from wlsdeploy.util.model_translator import FileToPython class EncryptionTestCase(unittest.TestCase): _execution_dir = '../../unit-tests/' _resources_dir = '../../test-classes/' _oracle_home = None _src_model_file_wo_variables = os.path.join(_resources_dir, 'encryption-test.yaml') _src_model_file_w_variables = os.path.join(_resources_dir, 'encryption-test-variables.yaml') _src_variable_file = os.path.join(_resources_dir, 'encryption-test-variables.properties') _target_model_test1 = os.path.join(_execution_dir, 'model-test1.yaml') _target_model_test2 = os.path.join(_execution_dir, 'model-test2.yaml') _target_model_test3 = os.path.join(_execution_dir, 'model-test3.yaml') _target_variables_test3 = os.path.join(_execution_dir, 'variables-test3.properties') _passphrase = '<PASSWORD>' _unencrypted_password = '<PASSWORD>' def setUp(self): if not os.path.exists(self._execution_dir): os.makedirs(self._execution_dir) wlst_dir = File(System.getProperty('unit-test-wlst-dir')) self._oracle_home = wlst_dir.getParentFile().getParentFile().getParentFile().getCanonicalPath() return def testDirectEncryption(self): copy2(self._src_model_file_wo_variables, self._target_model_test1) args = list() args.append('encrypt') # dummy arg for args[0] to get arg padding right args.append(CommandLineArgUtil.ORACLE_HOME_SWITCH) args.append(self._oracle_home) args.append(CommandLineArgUtil.MODEL_FILE_SWITCH) args.append(self._target_model_test1) args.append(CommandLineArgUtil.PASSPHRASE_SWITCH) args.append(self._passphrase) exit_code = encrypt._process_request(args) self.assertEquals(exit_code, 0) model = FileToPython(self._target_model_test1).parse() passphrase_array = String(self._passphrase).toCharArray() admin_pass = model['domainInfo']['AdminPassword'] self.assertEquals(admin_pass.startswith('{AES}'), True) _decrypted_admin_pass = EncryptionUtils.decryptString(admin_pass, passphrase_array) self.assertEquals(str(String(_decrypted_admin_pass)), self._unencrypted_password) nm_pass = model['topology']['SecurityConfiguration']['NodeManagerPasswordEncrypted'] self.assertEquals(nm_pass.startswith('{AES}'), True) _decrypted_nm_pass = EncryptionUtils.decryptString(nm_pass, passphrase_array) self.assertEquals(str(String(_decrypted_nm_pass)), self._unencrypted_password) ds1_pass = model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds1_pass.startswith('{AES}'), True) _decrypted_ds1_pass = EncryptionUtils.decryptString(ds1_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds1_pass)), self._unencrypted_password) ons_pass = \ model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCOracleParams']['OnsWalletPasswordEncrypted'] self.assertEquals(ons_pass.startswith('{AES}'), True) _decrypted_ons_pass = EncryptionUtils.decryptString(ons_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ons_pass)), self._unencrypted_password) ds2_pass = model['resources']['JDBCSystemResource']['Generic2']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds2_pass.startswith('{AES}'), True) _decrypted_ds2_pass = EncryptionUtils.decryptString(ds2_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds2_pass)), self._unencrypted_password) return def testDirectEncryptionVariablesNoOverwrite(self): copy2(self._src_model_file_w_variables, self._target_model_test2) args = list() args.append('encrypt') # dummy arg for args[0] to get arg padding right args.append(CommandLineArgUtil.ORACLE_HOME_SWITCH) args.append(self._oracle_home) args.append(CommandLineArgUtil.MODEL_FILE_SWITCH) args.append(self._target_model_test2) args.append(CommandLineArgUtil.PASSPHRASE_SWITCH) args.append(self._passphrase) exit_code = encrypt._process_request(args) self.assertEquals(exit_code, 0) model = FileToPython(self._target_model_test2).parse() passphrase_array = String(self._passphrase).toCharArray() admin_pass = model['domainInfo']['AdminPassword'] self.assertNotEquals(admin_pass.startswith('{AES}'), True) nm_pass = model['topology']['SecurityConfiguration']['NodeManagerPasswordEncrypted'] self.assertNotEquals(nm_pass.startswith('{AES}'), True) ds1_pass = model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds1_pass.startswith('{AES}'), True) _decrypted_ds1_pass = EncryptionUtils.decryptString(ds1_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds1_pass)), self._unencrypted_password) ons_pass = \ model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCOracleParams']['OnsWalletPasswordEncrypted'] self.assertNotEquals(ons_pass.startswith('{AES}'), True) ds2_pass = model['resources']['JDBCSystemResource']['Generic2']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds2_pass.startswith('{AES}'), True) _decrypted_ds2_pass = EncryptionUtils.decryptString(ds2_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds2_pass)), self._unencrypted_password) return def testIndirectEncryptionVariables(self): copy2(self._src_model_file_w_variables, self._target_model_test3) copy2(self._src_variable_file, self._target_variables_test3) args = list() args.append('encrypt') # dummy arg for args[0] to get arg padding right args.append(CommandLineArgUtil.ORACLE_HOME_SWITCH) args.append(self._oracle_home) args.append(CommandLineArgUtil.MODEL_FILE_SWITCH) args.append(self._target_model_test3) args.append(CommandLineArgUtil.VARIABLE_FILE_SWITCH) args.append(self._target_variables_test3) args.append(CommandLineArgUtil.PASSPHRASE_SWITCH) args.append(self._passphrase) exit_code = encrypt._process_request(args) self.assertEquals(exit_code, 0) model = FileToPython(self._target_model_test3).parse() variables = variables_helper.load_variables(self._target_variables_test3) passphrase_array = String(self._passphrase).toCharArray() admin_pass = model['domainInfo']['AdminPassword'] self.assertNotEquals(admin_pass.startswith('{AES}'), True) admin_pass = variables['admin.password'] self.assertEquals(admin_pass.startswith('{AES}'), True) _decrypted_admin_pass = EncryptionUtils.decryptString(admin_pass, passphrase_array) self.assertEquals(str(String(_decrypted_admin_pass)), self._unencrypted_password) nm_pass = model['topology']['SecurityConfiguration']['NodeManagerPasswordEncrypted'] self.assertNotEquals(nm_pass.startswith('{AES}'), True) nm_pass = variables['nm.password'] self.assertEquals(nm_pass.startswith('{AES}'), True) _decrypted_nm_pass = EncryptionUtils.decryptString(nm_pass, passphrase_array) self.assertEquals(str(String(_decrypted_nm_pass)), self._unencrypted_password) ds1_pass = model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds1_pass.startswith('{AES}'), True) _decrypted_ds1_pass = EncryptionUtils.decryptString(ds1_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds1_pass)), self._unencrypted_password) ons_pass = \ model['resources']['JDBCSystemResource']['Generic1']['JdbcResource']['JDBCOracleParams']['OnsWalletPasswordEncrypted'] self.assertNotEquals(ons_pass.startswith('{AES}'), True) ons_pass = variables['slc05til.ons.pass'] self.assertEquals(ons_pass.startswith('{AES}'), True) _decrypted_ons_pass = EncryptionUtils.decryptString(ons_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ons_pass)), self._unencrypted_password) ds2_pass = model['resources']['JDBCSystemResource']['Generic2']['JdbcResource']['JDBCDriverParams']['PasswordEncrypted'] self.assertEquals(ds2_pass.startswith('{AES}'), True) _decrypted_ds2_pass = EncryptionUtils.decryptString(ds2_pass, passphrase_array) self.assertEquals(str(String(_decrypted_ds2_pass)), self._unencrypted_password) return
StarcoderdataPython
3358407
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : 1_request_method.py @Time : 2021-02-23 @Author : EvilRecluse @Contact : https://github.com/RecluseXU @Desc : 常用的请求方法GET, POST, PUT, DELETE, HEAD, OPTIONS ''' # here put the import lib import httpx # 常用的请求方法GET, POST, PUT, DELETE, HEAD, OPTIONS r = httpx.get('https://httpbin.org/get') r = httpx.post('https://httpbin.org/post', data={'key': 'value'}) r = httpx.put('https://httpbin.org/put', data={'key': 'value'}) r = httpx.delete('https://httpbin.org/delete') r = httpx.head('https://httpbin.org/get') r = httpx.options('https://httpbin.org/get') # 设置headers headers = {'user-agent': 'my-app/0.0.1'} r = httpx.get('http://httpbin.org/headers', headers=headers) print(r.json())
StarcoderdataPython
1642576
<filename>osdria/views/data_input_dialog_view_ui.py # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'data_input_dialog_view.ui', # licensing of 'data_input_dialog_view.ui' applies. # # Created: Fri Feb 15 10:17:15 2019 # by: pyside2-uic running on PySide2 5.12.1 # # WARNING! All changes made in this file will be lost! from PySide2 import QtCore, QtGui, QtWidgets class Ui_DataInputDialog(object): def setupUi(self, DataInputDialog): DataInputDialog.setObjectName("DataInputDialog") DataInputDialog.resize(830, 450) self.verticalLayout = QtWidgets.QVBoxLayout(DataInputDialog) self.verticalLayout.setSpacing(0) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.dialog_frame = QtWidgets.QFrame(DataInputDialog) self.dialog_frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.dialog_frame.setFrameShadow(QtWidgets.QFrame.Plain) self.dialog_frame.setLineWidth(0) self.dialog_frame.setObjectName("dialog_frame") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.dialog_frame) self.verticalLayout_3.setSpacing(10) self.verticalLayout_3.setContentsMargins(10, 10, 10, 10) self.verticalLayout_3.setObjectName("verticalLayout_3") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setSpacing(0) self.horizontalLayout.setObjectName("horizontalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.name = QtWidgets.QLabel(self.dialog_frame) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(18) self.name.setFont(font) self.name.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.Germany)) self.name.setObjectName("name") self.verticalLayout_2.addWidget(self.name) self.value = QtWidgets.QLineEdit(self.dialog_frame) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(22) self.value.setFont(font) self.value.setObjectName("value") self.verticalLayout_2.addWidget(self.value) self.horizontalLayout.addLayout(self.verticalLayout_2) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.verticalLayout_3.addLayout(self.horizontalLayout) self.data_table = QtWidgets.QTableView(self.dialog_frame) self.data_table.setObjectName("data_table") self.data_table.horizontalHeader().setCascadingSectionResizes(False) self.verticalLayout_3.addWidget(self.data_table) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setSpacing(10) self.horizontalLayout_2.setObjectName("horizontalLayout_2") spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.cancel_button = QtWidgets.QPushButton(self.dialog_frame) self.cancel_button.setFlat(True) self.cancel_button.setObjectName("cancel_button") self.horizontalLayout_2.addWidget(self.cancel_button) self.apply_button = QtWidgets.QPushButton(self.dialog_frame) self.apply_button.setFlat(True) self.apply_button.setObjectName("apply_button") self.horizontalLayout_2.addWidget(self.apply_button) self.verticalLayout_3.addLayout(self.horizontalLayout_2) self.verticalLayout.addWidget(self.dialog_frame) self.retranslateUi(DataInputDialog) QtCore.QMetaObject.connectSlotsByName(DataInputDialog) def retranslateUi(self, DataInputDialog): DataInputDialog.setWindowTitle(QtWidgets.QApplication.translate("DataInputDialog", "Dialog", None, -1)) self.name.setText(QtWidgets.QApplication.translate("DataInputDialog", "Name", None, -1)) self.value.setText(QtWidgets.QApplication.translate("DataInputDialog", "Value", None, -1)) self.cancel_button.setText(QtWidgets.QApplication.translate("DataInputDialog", "Cancel", None, -1)) self.apply_button.setText(QtWidgets.QApplication.translate("DataInputDialog", "OK", None, -1))
StarcoderdataPython
3205529
<gh_stars>0 '''Train CIFAR10 with PyTorch 测试 benign acc 和 robust acc(mean acc 随 epoch 的变化情况) 绘制成图 ''' import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import argparse from models import * from utils import progress_bar import cifar10my2 import cifar10my3 from sklearn import datasets from sklearn.manifold import TSNE from mpl_toolkits.mplot3d import Axes3D from time import time import numpy as np import matplotlib.pyplot as plt from models.wideresnet import WideResNet from torch.autograd import Variable from time import time from torch.utils.tensorboard import SummaryWriter import numpy parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('--lr', default=0.1, type=float, help='learning rate') parser.add_argument('--gpu', default='0,1,2', type=str, help='GPUs id') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') # Model facotrs parser.add_argument('--depth', type=int, default=34, metavar='N', help='model depth (default: 34)') parser.add_argument('--widen_factor', type=int, default=10, metavar='N', help='model widen_factor (default: 10)') parser.add_argument('--droprate', type=float, default=0.0, metavar='N', help='model droprate (default: 0.0)') # draw imgs parser.add_argument('--factors', type=str, default='widen_factor', metavar='N', help='tensorboard draw img factors') # PGD attack parser.add_argument('--epsilon', default=0.031, type=float, help='perturbation') parser.add_argument('--num-steps', default=20, help='perturb number of steps') parser.add_argument('--step-size', default=0.003, help='perturb step size') parser.add_argument('--random', default=True, help='random initialization for PGD') args = parser.parse_args() print(args) # 设定 GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch # Data print('==> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), # 对于 TRADES 提供的 model 注释掉 # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) # bs = 20 bs = 1000 testset = cifar10my3.CIFAR10MY( root='./data', train=False, download=True, transform=transform_test, args=args) testloader = torch.utils.data.DataLoader( testset, batch_size=bs, shuffle=False, num_workers=2) # set up data loader # kwargs = {'num_workers': 1, 'pin_memory': True} # transform_test = transforms.Compose([transforms.ToTensor(),]) # testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) # testloader = torch.utils.data.DataLoader(testset, batch_size=bs, shuffle=False, **kwargs) cudnn.benchmark = True def loadmodel(model_name, i): # Model print('==> Building model..') # ckpt = '/hot-data/niuzh/Mycode/pytorch-cifar-master/checkpoint/model_cifar_wrn.pt' # ckpt = '/hot-data/niuzh/Mycode/TRADES-master/model-cifar-wideResNet/ST/model-wideres-epoch87.pt' ckpt = '/hot-data/niuzh/Mycode/TRADES-master/model-cifar-wideResNet/AT/' + model_name +'/' ckpt += 'model-wideres-epoch' + str(i)+'.pt' # net = WideResNet(depth=args.depth, widen_factor=args.widen_factor, dropRate=args.droprate).cuda() net = nn.DataParallel(WideResNet(depth=args.depth, widen_factor=args.widen_factor, dropRate=args.droprate)).cuda() # net.load_state_dict(torch.load(path + ckpt)) net.load_state_dict(torch.load(ckpt)) net.eval() print(ckpt) return net # PGD Attack def _pgd_whitebox(model, X, y, epsilon=args.epsilon, num_steps=args.num_steps, step_size=args.step_size): # def _pgd_whitebox(model, X, y, epsilon=args.epsilon, num_steps=args.num_steps, step_size=args.step_size): _, out = model(X) err = (out.data.max(1)[1] != y.data).float().sum() X_pgd = Variable(X.data, requires_grad=True) if args.random: random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).cuda() X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True) for _ in range(num_steps): opt = optim.SGD([X_pgd], lr=1e-3) opt.zero_grad() with torch.enable_grad(): loss = nn.CrossEntropyLoss()(model(X_pgd)[1], y) loss.backward() eta = step_size * X_pgd.grad.data.sign() X_pgd = Variable(X_pgd.data + eta, requires_grad=True) eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon) X_pgd = Variable(X.data + eta, requires_grad=True) X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True) err_pgd = (model(X_pgd)[1].data.max(1)[1] != y.data).float().sum() # print('err pgd (white-box): ', err_pgd) return err, err_pgd # input: tensorboard, model, model_name, ith epoch def test(writer, net, model_name, i): global best_acc global best_epoch accs_batch = [] acc_robust_label = [] acc_natural_label = [] count = 0 robust_err_total_label = 0 natural_err_total_label = 0 with torch.no_grad(): for inputs, targets in testloader: inputs, targets = inputs.cuda(), targets.cuda() X, y = Variable(inputs, requires_grad=True), Variable(targets) err_natural, err_robust = _pgd_whitebox(net, X, y) robust_err_total_label += err_robust natural_err_total_label += err_natural count = bs + count # 计算每个类别下的 err if count % 1000 == 0: label_index = count/1000-1 robust_acc = (1-robust_err_total_label/1000).cpu().numpy() natural_acc = (1-natural_err_total_label/1000).cpu().numpy() # print('robust_acc: {:3f}'.format(robust_acc)) # print('natural_acc: {:3f}'.format(natural_acc)) acc_robust_label.append(robust_acc) acc_natural_label.append(natural_acc) robust_err_total_label = 0 natural_err_total_label = 0 # 对 mean acc 绘图 robust_acc_mean = numpy.mean(acc_robust_label) natural_acc_mean = numpy.mean(acc_natural_label) graph_name = 'test/' + model_name + '/robust_acc_mean' writer.add_scalars(graph_name, {'robust_acc': robust_acc_mean}, i) graph_name = 'test/' + model_name + '/benign_acc_mean' writer.add_scalars(graph_name, {'natural_acc': natural_acc_mean}, i) # 输出各 label 下的 acc # print('acc_natural_label:') # for i in acc_natural_label: # print('{:3f}'.format(i)) # # print('acc_robust_label:') # for i in acc_robust_label: # print('{:3f}'.format(i)) # return 0 def main(): start = time() writer = SummaryWriter(comment='test_comment', filename_suffix="test_suffix") # load model # model_name = 'e' + str(args.epsilon) + '_depth' + str(args.depth) + '_' + 'widen' + str( # args.widen_factor) + '_' + 'drop' + str(args.droprate) # if args.factors == 'widen_factor': if True: # widen_factors = [4, 6, 8, 10, 12] for i in range(100): t0 = time() model_name = 'e' + str(args.epsilon) + '_depth' + str(args.depth) + '_' + \ 'widen' + str(args.widen_factor) + '_' + 'drop' + str(args.droprate) print("Test " + model_name) # load epoch ith model net = loadmodel(model_name, i+1) # test robust acc & benign acc test(writer, net, model_name, i+1) t1 = time() print('时间:{:3f}'.format((t1 - t0) / 60)) writer.close() end = time() print('时间:{:3f}'.format((end-start)/60)) if __name__ == '__main__': main()
StarcoderdataPython
1779794
<reponame>peteeckel/netbox-dns<filename>netbox_dns/forms/nameserver.py from django.forms import CharField from netbox.forms import ( NetBoxModelBulkEditForm, NetBoxModelFilterSetForm, NetBoxModelCSVForm, NetBoxModelForm, ) from utilities.forms import TagFilterField from netbox_dns.models import NameServer class NameServerForm(NetBoxModelForm): """Form for creating a new NameServer object.""" class Meta: model = NameServer fields = ("name", "tags") class NameServerFilterForm(NetBoxModelFilterSetForm): """Form for filtering NameServer instances.""" name = CharField( required=False, label="Name", ) tag = TagFilterField(NameServer) model = NameServer class NameServerCSVForm(NetBoxModelCSVForm): class Meta: model = NameServer fields = ("name",) class NameServerBulkEditForm(NetBoxModelBulkEditForm): model = NameServer
StarcoderdataPython
3333317
#!/usr/bin/env python import unittest from ssl_announcer import listener class TestListener(unittest.TestCase): def test_add(self): value = 1 actual = listener.add_two(value) expected = 3 self.assertEqual(expected, actual) if __name__ == "__main__": unittest.main()
StarcoderdataPython
4842938
option_button = ['About', 'Eligible Modules', 'Exam Info', 'Details', 'Go back'] goodbye = ['See you soon!', 'Have a nice day :)', 'Have a great day!', 'See you later!', 'Goodbye for now!', 'See you later!', 'Goodbye :)'] welcome_message = "Welcome to NUS Timetable Reminders!* 📆\ \n\n\n*To create your reminders:*\ \n\n1. */add* and send me your timetable URL 📎\ \n\n2. */activate* and select a timing ⏰\ \n\n3. Your reminders will be generated! ✅\ \n\n\n*Here's what else I can do:*\ \n\n📷 Send in a photo/URL of your timetable for *module info*\n\n🔍 *Search* modules from NUSMods\ \n\n📚 Get a *weekly overview* of your timetable\ \n\nClick on the *menu* button on the bottom left to explore more features!\ \n\nIf you need any help, use the /help command :)" help_message = "*Help Menu*\n\n1. *How to add my timetable? 📚*\n\nUse the /add command and send in the link to your timetable from NUSMods.\ \n\nUpon successfully saving your timetable, you will be able to use the rest of the features.\n\n\n2. *How to obtain weekly overview? 📆*\n\nUse the *📆 Weekly Overview* button to get a summary of your classes for the current week.\ \n\nOnly upcoming classes are reflected in the weekly summary.\n\n\n3. *How do I set my reminders? ⏰*\n\nWith the /activate command, you can set how early in advance you want to be notified.\nIf you do not wish to receive reminders, use the /deactivate command.\ \n\n\n4. *Where can I view my timetable? 🧾*\n\nAfter adding your timetable, you will be able to view class information such as venues and lesson times by using the *📚 My Classes* button.\ \n\n\n5. *How to retrieve module information? ℹ️*\n\nYou may use the following commands to obtain information about specific modules.\n\n*🔍 Search*\ \nUse this command and enter module codes on a new line.\ \n\n*📷 Info*\nUse this to retrieve module information using either an image or a link to your NUSMods timetable.\ \n\n\n6. *How to use image recognition feature? 📸*\n\nUse *📷 Info* and send in a PNG/JPG file of your timetable from NUSMods to retrieve data for all your modules.\n\nAlternatively, you may also send in a link to your timetable.\ \n\n\n7. *How do I delete my timetable? 🗑*\n\nUse the /remove command to delete all saved timetable information. This feature also automatically deactivates any active reminders.\ \n\n\n8. *Where do I report bugs? 🐞*\n\nShould you encounter any bugs while using the bot, please enter /bugs and report the issue in the next message.\ \n\n\nThank you for using NUS Timetable Reminders bot. Hope it has helped you to attend your classes on time and make more informed decisions when choosing modules! Stay tuned for more features :)"
StarcoderdataPython
1611908
"""Logging and Profiling """ from . import settings from datetime import datetime from time import time as get_time from platform import python_version _VERBOSITY_LEVELS_FROM_STRINGS = {'error': 0, 'warn': 1, 'info': 2, 'hint': 3} def info(*args, **kwargs): return msg(*args, v='info', **kwargs) def error(*args, **kwargs): args = ('Error:',) + args return msg(*args, v='error', **kwargs) def warn(*args, **kwargs): args = ('WARNING:',) + args return msg(*args, v='warn', **kwargs) def hint(*args, **kwargs): return msg(*args, v='hint', **kwargs) def _settings_verbosity_greater_or_equal_than(v): if isinstance(settings.verbosity, str): settings_v = _VERBOSITY_LEVELS_FROM_STRINGS[settings.verbosity] else: settings_v = settings.verbosity return settings_v >= v def msg(*msg, v=4, time=False, memory=False, reset=False, end='\n', no_indent=False, t=None, m=None, r=None): """Write message to logging output. Log output defaults to standard output but can be set to a file by setting `sc.settings.log_file = 'mylogfile.txt'`. v : {'error', 'warn', 'info', 'hint'} or int, (default: 4) 0/'error', 1/'warn', 2/'info', 3/'hint', 4, 5, 6... time, t : bool, optional (default: False) Print timing information; restart the clock. memory, m : bool, optional (default: Faulse) Print memory information. reset, r : bool, optional (default: False) Reset timing and memory measurement. Is automatically reset when passing one of ``time`` or ``memory``. end : str (default: '\n') Same meaning as in builtin ``print()`` function. no_indent : bool (default: False) Do not indent for ``v >= 4``. """ # variable shortcuts if t is not None: time = t if m is not None: memory = m if r is not None: reset = r if isinstance(v, str): v = _VERBOSITY_LEVELS_FROM_STRINGS[v] if v == 3: # insert "--> " before hints msg = ('-->',) + msg if v >= 4 and not no_indent: msg = (' ',) + msg if _settings_verbosity_greater_or_equal_than(v): if not time and not memory and len(msg) > 0: _write_log(*msg, end=end) if reset: try: settings._previous_memory_usage, _ = get_memory_usage() except: pass settings._previous_time = get_time() if time: elapsed = get_passed_time() msg = msg + ('({})'.format(_sec_to_str(elapsed)),) _write_log(*msg, end=end) if memory: _write_log(get_memory_usage(), end=end) m = msg def _write_log(*msg, end='\n'): """Write message to log output, ignoring the verbosity level. This is the most basic function. Parameters ---------- *msg : One or more arguments to be formatted as string. Same behavior as print function. """ from .settings import logfile if logfile == '': print(*msg, end=end) else: out = '' for s in msg: out += str(s) + ' ' with open(logfile, 'a') as f: f.write(out + end) def _sec_to_str(t): """Format time in seconds. Parameters ---------- t : int Time in seconds. """ from functools import reduce return "%d:%02d:%02d.%02d" % \ reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(t*100,), 100, 60, 60]) def get_passed_time(): now = get_time() elapsed = now - settings._previous_time settings._previous_time = now return elapsed def print_passed_time(): now = get_time() elapsed = now - settings._previous_time settings._previous_time = now from functools import reduce elapsed = "%d:%02d:%02d.%02d" % reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(elapsed*100,), 100, 60, 60]) return elapsed def print_version(): from . import __version__ _write_log('Running scvelo', __version__, '(python ' + python_version() + ')', 'on {}.'.format(get_date_string())) def print_versions(): for mod in ['scvelo', 'scanpy', 'anndata', 'loompy', 'numpy', 'scipy', 'matplotlib', 'sklearn', 'pandas']: mod_name = mod[0] if isinstance(mod, tuple) else mod mod_install = mod[1] if isinstance(mod, tuple) else mod try: print('{}=={}'.format(mod_install, __import__(mod_name).__version__), end=' ') except (ImportError, AttributeError): pass print() def get_date_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") from anndata.logging import print_memory_usage from anndata.logging import get_memory_usage from sys import stdout class ProgressReporter: def __init__(self, total, interval=3): self.count = 0 self.total = total self.timestamp = get_time() self.interval = interval def update(self): self.count += 1 if settings.verbosity > 1 and (get_time() - self.timestamp > self.interval or self.count == self.total): self.timestamp = get_time() percent = int(self.count * 100 / self.total) stdout.write('\r' + '... %d%%' % percent) stdout.flush() def finish(self): if settings.verbosity > 1: stdout.write('\r') stdout.flush()
StarcoderdataPython
1705876
<reponame>projectcalico/layer-etcd-proxy<filename>lib/etcdctl.py from charmhelpers.core.hookenv import log from subprocess import CalledProcessError from shlex import split from subprocess import check_output import os class EtcdCtl: ''' etcdctl modeled as a python class. This python wrapper consumes and exposes some of the commands contained in etcdctl. Related to unit registration, cluster health, and other operations ''' def register(self, cluster_data): ''' Perform self registration against the etcd leader and returns the raw output response. @params cluster_data - a dict of data to fill out the request to push our registration to the leader requires keys: leader_address, port, unit_name, private_address, management_port ''' # Build a connection string for the cluster data. connection = get_connection_string([cluster_data['private_address']], cluster_data['management_port']) # Create a https url to the leader unit name on the private addres. command = "etcdctl -C {0} member add {1} " \ "{2}".format(cluster_data['leader_address'], cluster_data['unit_name'], connection) try: result = self.run(command) except CalledProcessError: log('Notice: Unit failed self registration', 'WARNING') return # ['Added member named etcd12 with ID b9ab5b5a2e4baec5 to cluster', # '', 'ETCD_NAME="etcd12"', # 'ETCD_INITIAL_CLUSTER="etcd11=https://10.113.96.26:2380,etcd12=https://10.113.96.206:2380"', # 'ETCD_INITIAL_CLUSTER_STATE="existing"', ''] reg = {} for line in result.split('\n'): if 'Added member' in line: reg['cluster_unit_id'] = line.split('ID')[-1].strip(' ').split(' ')[0] # noqa if 'ETCD_INITIAL_CLUSTER=' in line: reg['cluster'] = line.split('="')[-1].rstrip('"') return reg def unregister(self, unit_id): ''' Perform self deregistration during unit teardown @params cluster_data - a dict of data to fill out the request to push our deregister command to the leader. requires keys: leader_address, port, etcd_unit_guid The unit_id can be obtained from the EtcdDatabag dict ''' command = "etcdctl member remove {}".format(unit_id) return self.run(command) def member_list(self, leader_address=None): ''' Returns the output from `etcdctl member list` as a python dict organized by unit_name, containing all the data-points in the resulting response. ''' members = {} if leader_address: cmd = "etcdctl --endpoint {} member list".format(leader_address) out = self.run(cmd) else: out = self.run("etcdctl member list") raw_member_list = out.strip('\n').split('\n') # Expect output like this: # 4f24ee16c889f6c1: name=etcd20 peerURLs=https://10.113.96.197:2380 clientURLs=https://10.113.96.197:2379 # noqa # edc04bb81479d7e8: name=etcd21 peerURLs=https://10.113.96.243:2380 clientURLs=https://10.113.96.243:2379 # noqa for unit in raw_member_list: if '[unstarted]' in unit: members['unstarted'] = {} continue unit_guid = unit.split(':')[0] unit_name = unit.split(' ')[1].split("=")[-1] peer_urls = unit.split(' ')[2].split("=")[-1] client_urls = unit.split(' ')[3].split("=")[-1] members[unit_name] = {'unit_id': unit_guid, 'name': unit_name, 'peer_urls': peer_urls, 'client_urls': client_urls} return members def member_update(self, unit_id, uri): ''' Update the etcd cluster member by unit_id with a new uri. This allows us to change protocol, address or port. @params unit_id: The string ID of the unit in the cluster. @params uri: The string universal resource indicator of where to contact the peer. ''' out = '' try: command = 'etcdctl member update {0} {1}'.format(unit_id, uri) log(command) # Run the member update command for the existing unit_id. out = self.run(command) except CalledProcessError as cpe: log('Failed to update member {0}'.format(unit_id), 'WARNING') log(cpe.output) return out def cluster_health(self): ''' Returns the output of etcdctl cluster-health as a python dict organized by topical information with detailed unit output ''' health = {} try: out = self.run('etcdctl cluster-health') health_output = out.strip('\n').split('\n') health['status'] = health_output[-1] health['units'] = health_output[0:-2] except CalledProcessError as cpe: log('Notice: Unit failed cluster-health check', 'WARNING') log(cpe.output) health['status'] = 'cluster is unhealthy see log file for details.' health['units'] = [] return health def run(self, command): ''' Wrapper to subprocess calling output. This is a convenience method to clean up the calls to subprocess and append TLS data''' os.environ['ETCDCTL_CA_FILE'] = '/etc/ssl/etcd/ca.pem' os.environ['ETCDCTL_CERT_FILE'] = '/etc/ssl/etcd/server.pem' os.environ['ETCDCTL_KEY_FILE'] = '/etc/ssl/etcd/server-key.pem' return check_output(split(command)).decode('ascii') def get_connection_string(members, port, protocol='https'): ''' Return a connection string for the list of members using the provided port and protocol (defaults to https)''' connections = [] for address in members: connections.append('{0}://{1}:{2}'.format(protocol, address, port)) connection_string = ','.join(connections) return connection_string
StarcoderdataPython
1647681
<reponame>ajfar-bem/wisebldg<filename>DeviceAPI/API_SmartThings.py # -*- coding: utf-8 -*- ''' Copyright (c) 2016, Virginia Tech All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees, nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof. VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE under Contract DE-EE0006352 #__author__ = "<NAME>" #__credits__ = "" #__version__ = "3.5" #__maintainer__ = "BEMOSS Team" #__email__ = "<EMAIL>" #__website__ = "www.bemoss.org" #__created__ = "2016-10-17 12:04:50" #__lastUpdated__ = "2016-10-18 11:23:33" ''' '''This API class is for an agent that want to discover/communicate/monitor Tumalow energy ingenuity battery storage''' import time import requests import json from pprint import pprint from bemoss_lib.utils import db_helper from BaseAPI import baseAPI from bemoss_lib.utils.BEMOSS_ONTOLOGY import BEMOSS_ONTOLOGY debug=True class API(baseAPI): def __init__(self,**kwargs): super(API, self).__init__(**kwargs) self.uri = None self.base_url = None if 'token' in kwargs.keys(): self.access_token = kwargs['token'] self.getAppEndpoints(kwargs['token']) if 'nickname' in kwargs.keys(): self.nickname = kwargs['nickname'] if 'mac_address' in kwargs.keys(): self.mac_address = kwargs['mac_address'] def API_info(self): return [{'device_model': 'STenableSwitch', 'vendor_name': 'SmartThings', 'communication': 'WiFi', 'support_oauth': True, 'device_type_id': 3, 'api_name': 'API_SmartThings', 'html_template': 'plugload/plugload.html', 'agent_type': 'BasicAgent', 'identifiable': False, 'authorizable': False, 'is_cloud_device': True, 'schedule_weekday_period': 4, 'schedule_weekend_period': 4, 'allow_schedule_period_delete': True, 'chart_template': 'charts/charts_plugload.html'}, {'device_model': 'STenablePowerSwitch', 'vendor_name': 'SmartThings', 'communication': 'WiFi', 'support_oauth': True, 'device_type_id': 3, 'api_name': 'API_SmartThings', 'html_template': 'plugload/plugload.html', 'agent_type': 'BasicAgent', 'identifiable': False, 'authorizable': False, 'is_cloud_device': True, 'schedule_weekday_period': 4, 'schedule_weekend_period': 4, 'allow_schedule_period_delete': True, 'chart_template': 'charts/charts_wtplug.html'}, {'device_model': 'STenableDimmer', 'vendor_name': 'SmartThings', 'communication': 'WiFi', 'support_oauth': True, 'device_type_id': 3, 'api_name': 'API_SmartThings', 'html_template': 'lighting/lighting.html', 'agent_type': 'BasicAgent', 'identifiable': False, 'authorizable': False, 'is_cloud_device': True, 'schedule_weekday_period': 4, 'schedule_weekend_period': 4, 'allow_schedule_period_delete': True, 'chart_template': 'charts/charts_lighting.html'} ] def dashboard_view(self): return {"top": None, "center": {"type": "image", "value": 'LightSW.png'}, "bottom": BEMOSS_ONTOLOGY.STATUS.NAME, "image": "LightSW.png"} def ontology(self): return {"status": BEMOSS_ONTOLOGY.STATUS, "power": BEMOSS_ONTOLOGY.POWER, "brightness": BEMOSS_ONTOLOGY.BRIGHTNESS} def discover(self, username, password, token): self.getAppEndpoints(token) responses = list() header = {'Authorization': 'Bearer ' + token} url = self.uri + '/discover' r = requests.get(url, headers=header) try: if r.status_code == 200: device = {'switch': [], 'powermeter': [], 'dimmer': []} for item in json.loads(r.content): device[item['type']].append((item['id'], item['name'])) added_device = [] for dimmer in device['dimmer']: added_device.append(dimmer[0]) responses.append({'address': None, 'vendor': 'SmartThings', 'mac': dimmer[0].replace('-', ''), 'model': "STenableDimmer", 'nickname': dimmer[1] }) for powermeter in device['powermeter']: if powermeter[0] not in added_device: added_device.append(powermeter[0]) responses.append({'address': None, 'vendor': 'SmartThings', 'mac': powermeter[0].replace('-', ''), 'model': "STenablePowerSwitch", 'nickname': powermeter[1] }) for switch in device['switch']: if switch[0] not in added_device: added_device.append(switch[0]) responses.append({'address': None, 'vendor': 'SmartThings', 'mac': switch[0].replace('-', ''), 'model': "STenableSwitch", 'nickname': switch[1] }) return responses else: return responses except: return responses def renewConnection(self): pass def getAppEndpoints(self, access_token): get_endpoint_url = 'https://graph.api.smartthings.com/api/smartapps/endpoints' headers = {'Authorization': 'Bearer ' + access_token} headers.update({'Content-Type': 'application/json'}) response = requests.get(get_endpoint_url, headers=headers) endpoint_info = json.loads(response.content)[0] self.uri = endpoint_info['uri'] # self.uri = 'https://graph.api.smartthings.com/api/smartapps/installations/468648eb-e879-45da-bcca-d7ce79280228' self.base_url = endpoint_info['base_url'] def getDataFromDevice(self): header = {'Authorization': 'Bearer ' + self.access_token} switch_url = self.uri + '/switches' power_url = self.uri + '/powermeters' dimmer_url = self.uri + '/dimmers' devicedata = dict() r = requests.get(switch_url, headers=header) for item in json.loads(r.content): if item['id'].replace('-', '') == self.mac_address: status = BEMOSS_ONTOLOGY.STATUS.POSSIBLE_VALUES.ON if item['value'] == 'on' else BEMOSS_ONTOLOGY.STATUS.POSSIBLE_VALUES.OFF devicedata['status'] = status break r = requests.get(dimmer_url, headers=header) for item in json.loads(r.content): if item['id'].replace('-', '') == self.mac_address: devicedata['brightness'] = item['value'] break r = requests.get(power_url, headers=header) for item in json.loads(r.content): if item['id'].replace('-', '') == self.mac_address: devicedata['power'] = item['value'] break return devicedata def setDeviceData(self, postmsg): setDeviceStatusResult = True _data = json.dumps(postmsg) _data = json.loads(_data) seriel_no = self.mac_address[0:8] + '-' + self.mac_address[8:12] + '-' \ + self.mac_address[12:16] + '-' + self.mac_address[16:20] + \ '-' + self.mac_address[20:] if _data[BEMOSS_ONTOLOGY.STATUS.NAME] == BEMOSS_ONTOLOGY.STATUS.POSSIBLE_VALUES.OFF: status = 'off' elif _data[BEMOSS_ONTOLOGY.STATUS.NAME] == BEMOSS_ONTOLOGY.STATUS.POSSIBLE_VALUES.ON: status = 'on' control_url = self.uri+ '/switches/' + status + '_' + seriel_no header = {'Authorization': 'Bearer ' + self.access_token} try: r = requests.put(control_url, headers=header) except: raise if BEMOSS_ONTOLOGY.BRIGHTNESS.NAME in _data.keys() and status == 'on': brightness = int(_data[BEMOSS_ONTOLOGY.BRIGHTNESS.NAME]) control_url = self.uri+ '/dimmers/' + str(brightness) + '_' + seriel_no header = {'Authorization': 'Bearer ' + self.access_token} try: r = requests.put(control_url, headers=header) except: raise return setDeviceStatusResult # This main method will not be executed when this class is used as a module def main(): # create an object with initialized data from DeviceDiscovery Agent # requirements for instantiation1. model, 2.type, 3.api, 4. address SmartThings = API(agent_id='testagent', token = '<PASSWORD>') #BatteryStorage.getDeviceStatus() # SmartThings.getAppEndpoints('b3a26f5d-7d07-46df-9f12-e278e7db82db') # pprint(SmartThings.discover(None, None, 'b3a26f5d-7d07-46df-9f12-e278e7db82db')) # SmartThings.nickname = 'Switch2' SmartThings.mac_address = '98fa6201fff740339614970646337f5b' print SmartThings.getDataFromDevice() # SmartThings.setDeviceData({'status':'OFF', 'brightness':60}) if __name__ == "__main__": main()
StarcoderdataPython
181354
<gh_stars>10-100 import numpy as np import h5py debug = False termination = h5py.File('./termination.h5', 'r').get('data') reward = h5py.File('./reward.h5', 'r').get('data') activations = h5py.File('./activations.h5', 'r').get('data') actions = h5py.File('./actions.h5', 'r').get('data') qvals = h5py.File('./qvals.h5', 'r').get('data') states = h5py.File('./states.h5', 'r').get('data') if debug: print('Shape of termination: \n', (np.array(termination)).shape) print('Shape of activations: \n', (np.array(activations)).shape) print('Shape of actions: \n', (np.array(actions)).shape) print('Shape of reward: \n', (np.array(reward)).shape) print('Shape of qvals: \n', (np.array(qvals)).shape) print('Shape of states: \n', (np.array(states)).shape) startTrajectory = [] # first index in a trajectory that leads to success endTrajectory = [] # last index in a trajectory that leads to success initialIndex = 0 for i in range(len(termination) - 1): if debug: print(str(i) + ' , ' + str(termination[i]) + ' , ' + str(reward[i])) if termination[i + 1] == 1 and reward[i] == 0: startTrajectory.append(initialIndex) endTrajectory.append(i) if debug: print('Success: ' + str(initialIndex) + ' , ' + str(i)) initialIndex = i + 2 elif termination[i + 1] == 1: initialIndex = i + 2 rewardClean = [] activationsClean = [] qvalsClean = [] statesClean = [] actionsClean = [] totalStates = 0 for i in range(len(startTrajectory)): totalStates += endTrajectory[i] - startTrajectory[i] + 1 for j in range(startTrajectory[i], endTrajectory[i]): rewardClean.append(reward[j]) activationsClean.append(activations[j, :]) qvalsClean.append(qvals[j]) actionsClean.append(actions[j]) statesClean.append(states[j, :]) rCleanFile = h5py.File('rewardClean.h5', 'w') rCleanFile.create_dataset('data', data=rewardClean) aCleanFile = h5py.File('actionsClean.h5', 'w') aCleanFile.create_dataset('data', data=actionsClean) actCleanFile = h5py.File('activationsClean.h5', 'w') actCleanFile.create_dataset('data', data=activationsClean) qCleanFile = h5py.File('qvalsClean.h5', 'w') qCleanFile.create_dataset('data', data=qvalsClean) sCleanFile = h5py.File('statesClean.h5', 'w') sCleanFile.create_dataset('data', data=statesClean) print('Done! Total states kept: ' + str(totalStates))
StarcoderdataPython
70880
<gh_stars>10-100 import os import sys import numpy as np import random import scipy import torch class Policy(object): def __init__(self, taxonomy_id, path_max_length, right_node_reward): self.taxonomy_id = taxonomy_id self.right_node_reward = right_node_reward self.wrong_node_reward = -0.1 self.path_max_length = path_max_length def init_state(self, current_node, mode): if mode == 'greedy': self.greedy_state = [ {} for x in current_node ] self.state = self.greedy_state elif mode == 'random': self.random_state = [ {} for x in current_node ] self.state = self.random_state self.level = 0 def update_state(self, index, prob, current_node, batch_index): length = index.size()[0] for i in range(length): node = current_node[index[i]] node_state = prob[index[i]] batch_num = batch_index[index[i]] self.state[int(batch_num)][int(node)] = {'prob': node_state, 'level': self.level} self.level += 1 def select_current_node(self, index, text, current_node, batch_index): text_ret = [text[i] for i in index] current_node_ret = [current_node[i] for i in index] batch_index_ret = [batch_index[i] for i in index] return text_ret, np.asarray(current_node_ret), np.asarray(batch_index_ret) def get_children(self, text, current_node, batch_index): new_text = [] new_node = [] new_batch_index = [] for i, node in enumerate(current_node): children = self.taxonomy_id[node]['children'] new_node.extend(children) new_text += [text[i] for x in children] new_batch_index += [batch_index[i]] * len(children) return new_text, np.asarray(new_node), np.asarray(new_batch_index) def get_greedy_action(self, prob): greedy_index = torch.nonzero(prob[:, 1] > prob[:, 0], as_tuple=False) return greedy_index.squeeze(1) def get_random_action(self, prob, batch_index, batch_size, K): prob = prob.detach().cpu().numpy() action = [] batch_prob = [[] for i in range(batch_size)] batch_action = [[] for i in range(batch_size)] for i in range(len(prob)): index_ = batch_index[i] batch_prob[index_].append(prob[i]) batch_action[index_].append(i) for i in range(batch_size): batch_selected_actions = [] for j, one_weight in enumerate(batch_prob[i]): one_action = np.random.choice([0, 1], p=one_weight) if one_action == 1: batch_selected_actions.append(batch_action[i][j]) if len(batch_selected_actions) > K: batch_selected_actions = random.sample(batch_selected_actions, K) action.extend(batch_selected_actions) action = sorted(action) return torch.Tensor(action).to(torch.int64) def get_path(self, state): node_level = {} for key, val in state.items(): level = val['level'] if level not in node_level: node_level[level] = [] node_level[level].append(key) node_rec = {one_node: False for one_node in state.keys()} path = [] sorted_level = sorted(node_level.keys(), reverse=True) for level in sorted_level: level_node = node_level[level] for one_node in level_node: if node_rec[one_node]: continue current_node = one_node one_path = [] get_end = False while True: if current_node == 0: get_end = True break if current_node not in state: break one_path.append(current_node) current_node = self.taxonomy_id[current_node]['parent'] if get_end: one_path.reverse() path.append(one_path) for one_node in one_path: node_rec[one_node] = True rest_node = [] for level, level_node in node_level.items(): for one_node in level_node: if not node_rec[one_node]: rest_node.append(one_node) return path, rest_node def path_reward(self, state, category, path): def judge_path(path, all_path): for one_path in all_path: flag = True for node in path: if node not in one_path: flag = False break if flag: return True else: continue return False reward = [] for i, one_state in enumerate(state): predicted_path, rest_node = self.get_path(one_state) batch_reward = 0. for one_path in predicted_path: path_reward = 0. if judge_path(one_path, path[i]): for one_node in one_path: path_reward += self.right_node_reward * (-one_state[one_node]['prob'][1]) else: for one_node in one_path: path_reward += self.wrong_node_reward * (-one_state[one_node]['prob'][1]) batch_reward += path_reward for one_node in rest_node: batch_reward += -1 * (-one_state[one_node]['prob'][1]) reward.append(batch_reward) return reward def get_reward(self, state, category, path): return self.path_reward(state, category, path)
StarcoderdataPython
3230922
<reponame>ZhiHaoi/autonetkit<filename>autonetkit/design/routing.py from autonetkit.design.utils import filters from autonetkit.network_model.network_model import NetworkModel from autonetkit.network_model.topology import Topology from autonetkit.network_model.types import DeviceType def _build_igp_base(network_model: NetworkModel, topology: Topology): t_l2_conn = network_model.get_topology("layer2_conn") routers = filters.routers(t_l2_conn) topology.add_nodes_from(routers) links = [l for l in t_l2_conn.links() if l.n1.type == l.n2.type == DeviceType.ROUTER and l.n1.get("asn") == l.n2.get("asn")] topology.add_links_from(links) # remove the zero degree routers -> no IGP to configure single_routers = [n for n in topology.nodes() if n.degree() == 0] topology.remove_nodes_from(single_routers) def build_ospf(network_model: NetworkModel): """ @param network_model: """ t_ospf = network_model.create_topology("ospf") t_in = network_model.get_topology("input") _build_igp_base(network_model, t_ospf) for node in t_ospf.nodes(): input_node = t_in.get_node_by_id(node.id) # TODO: allow specifying the OSPF area on input topology node.set("area", input_node.get("ospf_area")) node_area = node.get("area") for port in filters.physical_ports(node): port.set("area", node_area) def build_isis(network_model: NetworkModel): """ @param network_model: """ t_isis = network_model.create_topology("isis") _build_igp_base(network_model, t_isis) def build_ibgp(network_model: NetworkModel): """ @param network_model: """ t_ibgp = network_model.create_topology("ibgp") t_l2_conn = network_model.get_topology("layer2_conn") routers = filters.routers(t_l2_conn) t_ibgp.add_nodes_from(routers) pairs = [(s, t) for s in t_ibgp.nodes() for t in t_ibgp.nodes() if s.get("asn") == t.get("asn") if s != t] # get loopback for n1, n2 in pairs: p1 = n1.loopback_zero() p2 = n2.loopback_zero() t_ibgp.create_link(p1, p2) def build_ebgp(network_model: NetworkModel): """ @param network_model: """ t_ebgp = network_model.create_topology("ebgp") t_l2_conn = network_model.get_topology("layer2_conn") routers = filters.routers(t_l2_conn) t_ebgp.add_nodes_from(routers) ebgp_links = [l for l in t_l2_conn.links() if l.n1.get("asn") != l.n2.get("asn")] t_ebgp.add_links_from(ebgp_links)
StarcoderdataPython
169387
<filename>prediction.py<gh_stars>0 import numpy as np from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, LSTM, Dropout """ Original Code https://github.com/INVESTAR/StockAnalysisInPython/blob/master/09_Deep_Learning_Prediction/ch09_09_RNN_StockPrediction.py modified by s-jun """ def RNN(df): raw_df = df def MinMaxScaler(data): """최솟값, 최댓값 이용해 0~1 값으로 변환""" numerator = data - np.min(data, 0) denominator = np.max(data, 0) - np.min(data, 0) return numerator / (denominator + 1e-7) dfx = raw_df[['open', 'high', 'low', 'close', 'volume']] dfx = MinMaxScaler(dfx) dfy = dfx[['close']] x = dfx.values.tolist() y = dfy.values.tolist() data_x = [] data_y = [] window_size = 10 for i in range(len(y) - window_size): _x = x[i: i + window_size] _y = y[i + window_size] data_x.append(_x) data_y.append(_y) # 훈련용 데이터셋 train_size = int(len(data_y) * 0.7) train_x = np.array(data_x[0:train_size]) train_y = np.array(data_y[0:train_size]) # 테스트용 데이터셋 test_size = len(data_y) - train_size test_x = np.array(data_x[train_size:len(data_x)]) test_y = np.array(data_y[train_size:len(data_y)]) model = Sequential() model.add(LSTM(units=10, activation='relu', return_sequences=True, input_shape=(window_size, 5))) model.add(Dropout(0.1)) model.add(LSTM(units=10, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(units=1)) model.summary() model.compile(optimizer='adam', loss='mean_squared_error') # 최적화도구: adam model.fit(train_x, train_y, epochs=50, batch_size=30) # epochs : 학습횟수, batch_size: 훈련데이터 갯수 pred_y = model.predict(test_x) predict_price_tommorow = (raw_df.close.iloc[-1] * pred_y[-1] / dfy.close.iloc[-1])[0] predict_price_today = (raw_df.close.iloc[-2] * pred_y[-2] / dfy.close.iloc[-2])[0] percentage = round(((predict_price_tommorow / predict_price_today - 1) * 100), 3) percentage = str(percentage) + '%' return str(raw_df.index[-1]), percentage
StarcoderdataPython
1714727
<filename>setuz/parsers/brand.py<gh_stars>1-10 from ..schemes.brand import BrandListSchema, BrandSchema def brand_parser(response) -> BrandListSchema: data = response.json() brands: list = [] for result in data['results']: brands.append(BrandSchema( id=result['id'], name=result['name'], tm=result['tm'] )) instance: BrandListSchema = BrandListSchema( count=data['count'], next=data['next'], prev=data['prev'], results=brands ) return instance
StarcoderdataPython
91473
# Course: EE551 Python for Engineer # Author: <NAME> # Date: 2021/05/04 # Version: 1.0 # Defines routes for the front-end from flask import render_template, url_for, flash, redirect, request, send_from_directory, send_file from image_processor.forms import ImageProcessForm from image_processor import app from image_processor.algorithms.edge_detection import gaussian, sobel_edge, nonMax from image_processor.algorithms.line_detection import hessian, RANSAC import os # Performs the specified operation on an image # Checks if required output already exists in cache # Inputs: # form_picture: file path of the image to be processed # form_process: the operation to be performed on the image # Outputs: # processed_path: file path of the resulting image # filename: file name of the resulting image def process_picture(form_picture, form_process): processed_path = "" filename = "" if form_process == "gaussian": processed_path = os.path.join(app.root_path, 'static/images/gaussian_img.png') filename = "gaussian_img.png" if not os.path.isfile(processed_path): gaussian(form_picture, 2) processed_path = url_for('static', filename='images/' + 'gaussian_img.png') if form_process == "sobel": processed_path = os.path.join(app.root_path, 'static/images/sobel_img.png') filename = "sobel_img.png" if not os.path.isfile(processed_path): sobel_edge(form_picture) processed_path = url_for('static', filename='images/' + 'sobel_img.png') if form_process == "nonMax": processed_path = os.path.join(app.root_path, 'static/images/non_max_img.png') filename = "non_max_img.png" if not os.path.isfile(processed_path): nonMax(form_picture) processed_path = url_for('static', filename='images/' + 'non_max_img.png') if form_process == 'hessian': processed_path = os.path.join(app.root_path, 'static/images/hessian_img.png') filename = "hessian_img.png" if not os.path.isfile(processed_path): hessian(form_picture) processed_path = url_for('static', filename='images/' + 'hessian_img.png') if form_process == 'ransac': processed_path = os.path.join(app.root_path, 'static/images/ransac_img.png') filename = "ransac_img.png" if not os.path.isfile(processed_path): hessian_img = hessian(form_picture) RANSAC(form_picture, hessian_img) processed_path = url_for('static', filename='images/' + 'ransac_img.png') return processed_path, filename # Route for home page which has the main functionality of the website # Contains a form for image upload and space for displaying result # Also displays an optional download button after processing # Methods: # GET: Called when user navigates to this route. Image displays are blank in this case. # POST: Called when user submits the form. Image display areas show original image and the result with selected filter applied @app.route("/", methods=['GET', 'POST']) def home(): download = False form = ImageProcessForm() picture_path = url_for('static', filename='images/' + 'default_original.jpeg') processed_path = url_for('static', filename='images/' + 'default_original.jpeg') images_folder = os.path.join(app.root_path, 'static/images') if request.method == 'GET': for image_file in os.listdir(images_folder): if image_file != "default_original.jpeg": os.remove(os.path.join(images_folder, image_file)) if form.validate_on_submit(): download=True picture_path = os.path.join(app.root_path, 'static/images/upload.png') if form.image.data is not None: form.image.data.save(picture_path) for image_file in os.listdir(images_folder): if image_file != "default_original.jpeg" and image_file != "upload.png": os.remove(os.path.join(images_folder, image_file)) processed_path, filename = process_picture(picture_path, form.algorithm.data) picture_path = url_for('static', filename='images/upload.png') return render_template('home.html', title='Image Processor', original_image=picture_path, processed_image=processed_path, form=form, download=download, filename=filename) return render_template('home.html', title='Image Processor', original_image=picture_path, processed_image=processed_path, form=form, download=download) # Downloads an image from storage to user's device # Inputs: # filename: file path of the image to be downloaded @app.route('/download/<filename>') def download(filename): file_path = os.path.join(app.root_path, 'static/images', filename) return send_file(file_path, as_attachment=True) # Prevents image from being fetched by the browser from its cache # This is necessary to avoid the issue of wrong image being displayed in result area if the page is not refreshed # It is accomplished by modifying some header values in the response object # Inputs: # response: the response object from the server @app.after_request def after_request(response): response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0" response.headers["Expires"] = 0 response.headers["Pragma"] = "no-cache" return response
StarcoderdataPython
3285766
<reponame>MattiasFredriksson/py-c3d<gh_stars>0 ''' Classes used to represent the concept of a parameter in a .c3d file. ''' import struct import numpy as np from .utils import DEC_to_IEEE, DEC_to_IEEE_BYTES class ParamData(object): '''A class representing a single named parameter from a C3D file. Attributes ---------- name : str Name of this parameter. dtype: DataTypes Reference to the DataTypes object associated with the file. desc : str Brief description of this parameter. bytes_per_element : int, optional For array data, this describes the size of each element of data. For string data (including arrays of strings), this should be -1. dimensions : list of int For array data, this describes the dimensions of the array, stored in column-major (Fortran) order. For arrays of strings, the dimensions here will be the number of columns (length of each string) followed by the number of rows (number of strings). bytes : str Raw data for this parameter. ''' def __init__(self, name, dtype, desc='', bytes_per_element=1, dimensions=None, bytes=b'', handle=None): '''Set up a new parameter, only the name is required.''' self.name = name self.dtypes = dtype self.desc = desc self.bytes_per_element = bytes_per_element self.dimensions = dimensions or [] self.bytes = bytes if handle: self.read(handle) def __repr__(self): return '<Param: {}>'.format(self.desc) @property def num_elements(self) -> int: '''Return the number of elements in this parameter's array value.''' e = 1 for d in self.dimensions: e *= d return e @property def total_bytes(self) -> int: '''Return the number of bytes used for storing this parameter's data.''' return self.num_elements * abs(self.bytes_per_element) @property def binary_size(self) -> int: '''Return the number of bytes needed to store this parameter.''' return ( 1 + # group_id 2 + # next offset marker 1 + len(self.name.encode('utf-8')) + # size of name and name bytes 1 + # data size # size of dimensions and dimension bytes 1 + len(self.dimensions) + self.total_bytes + # data 1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes ) def write(self, group_id, handle): '''Write binary data for this parameter to a file handle. Parameters ---------- group_id : int The numerical ID of the group that holds this parameter. handle : file handle An open, writable, binary file handle. ''' name = self.name.encode('utf-8') handle.write(struct.pack('bb', len(name), group_id)) handle.write(name) handle.write(struct.pack('<h', self.binary_size - 2 - len(name))) handle.write(struct.pack('b', self.bytes_per_element)) handle.write(struct.pack('B', len(self.dimensions))) handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions)) if self.bytes is not None and len(self.bytes) > 0: handle.write(self.bytes) desc = self.desc.encode('utf-8') handle.write(struct.pack('B', len(desc))) handle.write(desc) def read(self, handle): '''Read binary data for this parameter from a file handle. This reads exactly enough data from the current position in the file to initialize the parameter. ''' self.bytes_per_element, = struct.unpack('b', handle.read(1)) dims, = struct.unpack('B', handle.read(1)) self.dimensions = [struct.unpack('B', handle.read(1))[ 0] for _ in range(dims)] self.bytes = b'' if self.total_bytes: self.bytes = handle.read(self.total_bytes) desc_size, = struct.unpack('B', handle.read(1)) self.desc = desc_size and self.dtypes.decode_string(handle.read(desc_size)) or '' def _as(self, dtype): '''Unpack the raw bytes of this param using the given struct format.''' return np.frombuffer(self.bytes, count=1, dtype=dtype)[0] def _as_array(self, dtype, copy=True): '''Unpack the raw bytes of this param using the given data format.''' if not self.dimensions: return [self._as(dtype)] elems = np.frombuffer(self.bytes, dtype=dtype) # Reverse shape as the shape is defined in fortran format view = elems.reshape(self.dimensions[::-1]) if copy: return view.copy() return view class ParamReadonly(object): ''' Wrapper exposing readonly attributes of a `c3d.parameter.ParamData` entry. ''' def __init__(self, data): self._data = data def __eq__(self, other): return self._data is other._data @property def name(self) -> str: ''' Get the parameter name. ''' return self._data.name @property def desc(self) -> str: ''' Get the parameter descriptor. ''' return self._data.desc @property def dtypes(self): ''' Convenience accessor to the `c3d.dtypes.DataTypes` instance associated with the parameter. ''' return self._data.dtypes @property def dimensions(self) -> (int, ...): ''' Shape of the parameter data (Fortran format). ''' return self._data.dimensions @property def num_elements(self) -> int: '''Return the number of elements in this parameter's array value.''' return self._data.num_elements @property def bytes_per_element(self) -> int: '''Return the number of bytes used to store each data element.''' return self._data.bytes_per_element @property def total_bytes(self) -> int: '''Return the number of bytes used for storing this parameter's data.''' return self._data.total_bytes @property def binary_size(self) -> int: '''Return the number of bytes needed to store this parameter.''' return self._data.binary_size @property def int8_value(self): '''Get the parameter data as an 8-bit signed integer.''' return self._data._as(self.dtypes.int8) @property def uint8_value(self): '''Get the parameter data as an 8-bit unsigned integer.''' return self._data._as(self.dtypes.uint8) @property def int16_value(self): '''Get the parameter data as a 16-bit signed integer.''' return self._data._as(self.dtypes.int16) @property def uint16_value(self): '''Get the parameter data as a 16-bit unsigned integer.''' return self._data._as(self.dtypes.uint16) @property def int32_value(self): '''Get the parameter data as a 32-bit signed integer.''' return self._data._as(self.dtypes.int32) @property def uint32_value(self): '''Get the parameter data as a 32-bit unsigned integer.''' return self._data._as(self.dtypes.uint32) @property def uint_value(self): ''' Get the parameter data as a unsigned integer of appropriate type. ''' if self.bytes_per_element >= 4: return self.uint32_value elif self.bytes_per_element >= 2: return self.uint16_value else: return self.uint8_value @property def int_value(self): ''' Get the parameter data as a signed integer of appropriate type. ''' if self.bytes_per_element >= 4: return self.int32_value elif self.bytes_per_element >= 2: return self.int16_value else: return self.int8_value @property def float_value(self): '''Get the parameter data as a floating point value of appropriate type.''' if self.bytes_per_element > 4: if self.dtypes.is_dec: raise AttributeError("64 bit DEC floating point is not supported.") # 64-bit floating point is not a standard return self._data._as(self.dtypes.float64) elif self.bytes_per_element == 4: if self.dtypes.is_dec: return DEC_to_IEEE(self._data._as(np.uint32)) else: # is_mips or is_ieee return self._data._as(self.dtypes.float32) else: raise AttributeError("Only 32 and 64 bit floating point is supported.") @property def bytes_value(self) -> bytes: '''Get the raw byte string.''' return self._data.bytes @property def string_value(self): '''Get the parameter data as a unicode string.''' return self.dtypes.decode_string(self._data.bytes) @property def int8_array(self): '''Get the parameter data as an array of 8-bit signed integers.''' return self._data._as_array(self.dtypes.int8) @property def uint8_array(self): '''Get the parameter data as an array of 8-bit unsigned integers.''' return self._data._as_array(self.dtypes.uint8) @property def int16_array(self): '''Get the parameter data as an array of 16-bit signed integers.''' return self._data._as_array(self.dtypes.int16) @property def uint16_array(self): '''Get the parameter data as an array of 16-bit unsigned integers.''' return self._data._as_array(self.dtypes.uint16) @property def int32_array(self): '''Get the parameter data as an array of 32-bit signed integers.''' return self._data._as_array(self.dtypes.int32) @property def uint32_array(self): '''Get the parameter data as an array of 32-bit unsigned integers.''' return self._data._as_array(self.dtypes.uint32) @property def int64_array(self): '''Get the parameter data as an array of 32-bit signed integers.''' return self._data._as_array(self.dtypes.int64) @property def uint64_array(self): '''Get the parameter data as an array of 32-bit unsigned integers.''' return self._data._as_array(self.dtypes.uint64) @property def float32_array(self): '''Get the parameter data as an array of 32-bit floats.''' # Convert float data if not IEEE processor if self.dtypes.is_dec: # _as_array but for DEC if not self.dimensions: return [self.float_value] return DEC_to_IEEE_BYTES(self._data.bytes).reshape(self.dimensions[::-1]) # Reverse fortran format else: # is_ieee or is_mips return self._data._as_array(self.dtypes.float32) @property def float64_array(self): '''Get the parameter data as an array of 64-bit floats.''' # Convert float data if not IEEE processor if self.dtypes.is_dec: raise ValueError('Unable to convert bytes encoded in a 64 bit floating point DEC format.') else: # is_ieee or is_mips return self._data._as_array(self.dtypes.float64) @property def float_array(self): '''Get the parameter data as an array of 32 or 64 bit floats.''' # Convert float data if not IEEE processor if self.bytes_per_element == 4: return self.float32_array elif self.bytes_per_element == 8: return self.float64_array else: raise TypeError("Parsing parameter bytes to an array with %i bit " % self.bytes_per_element + "floating-point precission is not unsupported.") @property def int_array(self): '''Get the parameter data as an array of integer values.''' # Convert float data if not IEEE processor if self.bytes_per_element == 1: return self.int8_array elif self.bytes_per_element == 2: return self.int16_array elif self.bytes_per_element == 4: return self.int32_array elif self.bytes_per_element == 8: return self.int64_array else: raise TypeError("Parsing parameter bytes to an array with %i bit integer values is not unsupported." % self.bytes_per_element) @property def uint_array(self): '''Get the parameter data as an array of integer values.''' # Convert float data if not IEEE processor if self.bytes_per_element == 1: return self.uint8_array elif self.bytes_per_element == 2: return self.uint16_array elif self.bytes_per_element == 4: return self.uint32_array elif self.bytes_per_element == 8: return self.uint64_array else: raise TypeError("Parsing parameter bytes to an array with %i bit integer values is not unsupported." % self.bytes_per_element) @property def bytes_array(self): '''Get the parameter data as an array of raw byte strings.''' # Decode different dimensions if len(self.dimensions) == 0: return np.array([]) elif len(self.dimensions) == 1: return np.array(self._data.bytes) else: # Convert Fortran shape (data in memory is identical, shape is transposed) word_len = self.dimensions[0] dims = self.dimensions[1:][::-1] # Identical to: [:0:-1] byte_steps = np.cumprod(self.dimensions[:-1])[::-1] # Generate mult-dimensional array and parse byte words byte_arr = np.empty(dims, dtype=object) for i in np.ndindex(*dims): # Calculate byte offset as sum of each array index times the byte step of each dimension. off = np.sum(np.multiply(i, byte_steps)) byte_arr[i] = self._data.bytes[off:off+word_len] return byte_arr @property def string_array(self): '''Get the parameter data as a python array of unicode strings.''' # Decode different dimensions if len(self.dimensions) == 0: return np.array([]) elif len(self.dimensions) == 1: return np.array([self.string_value]) else: # Parse byte sequences byte_arr = self.bytes_array # Decode sequences for i in np.ndindex(byte_arr.shape): byte_arr[i] = self.dtypes.decode_string(byte_arr[i]) return byte_arr @property def any_value(self): ''' Get the parameter data as a value of 'traditional type'. Traditional types are defined in the Parameter section in the [user manual]. Returns ------- value : int, float, or str Depending on the `bytes_per_element` field, a traditional type can be a either a signed byte, signed short, 32-bit float, or a string. [user manual]: https://www.c3d.org/docs/C3D_User_Guide.pdf ''' if self.bytes_per_element >= 4: return self.float_value elif self.bytes_per_element >= 2: return self.int16_value elif self.bytes_per_element == -1: return self.string_value else: return self.int8_value @property def any_array(self): ''' Get the parameter data as an array of 'traditional type'. Traditional types are defined in the Parameter section in the [user manual]. Returns ------- value : array Depending on the `bytes_per_element` field, a traditional type can be a either a signed byte, signed short, 32-bit float, or a string. [user manual]: https://www.c3d.org/docs/C3D_User_Guide.pdf ''' if self.bytes_per_element >= 4: return self.float_array elif self.bytes_per_element >= 2: return self.int16_array elif self.bytes_per_element == -1: return self.string_array else: return self.int8_array @property def _as_any_uint(self): ''' Attempt to parse the parameter data as any unsigned integer format. Checks if the integer is stored as a floating point value. Can be used to read 'POINT:FRAMES' or 'POINT:LONG_FRAMES' when not accessed through `c3d.manager.Manager.last_frame`. ''' if self.bytes_per_element >= 4: # Check if float value representation is an integer value = self.float_value if float(value).is_integer(): return int(value) return self.uint32_value elif self.bytes_per_element >= 2: return self.uint16_value else: return self.uint8_value class Param(ParamReadonly): ''' Wrapper exposing both readable and writable attributes of a `c3d.parameter.ParamData` entry. ''' def __init__(self, data): super(Param, self).__init__(data) def readonly(self): ''' Returns a readonly `c3d.parameter.ParamReadonly` instance. ''' return ParamReadonly(self._data) @property def bytes(self) -> bytes: ''' Get or set the parameter bytes. ''' return self._data.bytes @bytes.setter def bytes(self, value): self._data.bytes = value
StarcoderdataPython
48326
__all__ = ['Database']
StarcoderdataPython
3253053
from lib.modules.DetectionModule import * class Module(DetectionModule): def __init__(self, name, event_json): super().__init__(name=name, event_json=event_json) def run(self): self.logger.debug('Running the {} detection module'.format(self.name)) # Loop over each sandboxed sample in the event. for sample in self.event_json['sandbox']: # Loop over all of the mutexes. for mutex in sample['mutexes']: s = 'DC_MUTEX-' if s.lower() in mutex.lower(): self.detections.append('Detected Dark Comet by the mutex: {}'.format(mutex)) self.tags.append('darkcomet')
StarcoderdataPython
3226679
<filename>tests/nn/data_parallel/test_fsdp_overlap.py # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring """ Test FSDP and ensure expected overlapping between all_gather and forward. """ from statistics import mean import time from unittest.mock import patch import pytest import torch from torch.cuda import Event import torch.multiprocessing as mp import torch.nn as nn from fairscale.nn import enable_wrap, wrap from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP from fairscale.utils.testing import ( dist_init, get_cycles_per_ms, skip_if_single_gpu, teardown, temp_files_ctx, torch_version, ) class Layer(nn.Module): def __init__(self, compute_cycles, has_params: bool): super().__init__() self.sleep_cycles = compute_cycles self.optional_param = None if has_params: self.optional_param = nn.Parameter(torch.rand(1)) def forward(self, x): # Get 2 events. self.e1 = Event(enable_timing=True) self.e2 = Event(enable_timing=True) # Record the fake forward compute time. self.e1.record() if self.sleep_cycles > 0: torch.cuda._sleep(self.sleep_cycles) if self.optional_param is not None: x = x + self.optional_param # force the param to be part of the graph self.e2.record() return x def get_time(self): # return the recorded duration. return self.e1.elapsed_time(self.e2) def _create_model(fsdp_config, compute_cycles, has_params: bool): with enable_wrap(wrapper_cls=FSDP, **fsdp_config): model = wrap( nn.Sequential( wrap(Layer(compute_cycles, has_params)), wrap(Layer(compute_cycles, has_params)), wrap(Layer(compute_cycles, has_params)), wrap(Layer(compute_cycles, has_params)), ) ).cuda() return model class Min10: def __init__(self): self.data = [] def add(self, new_data): if len(self.data) < 10: self.data.append(new_data) else: self.data = sorted(self.data) if new_data < self.data[-1]: self.data[-1] = new_data def avg(self): return mean(self.data) def _distributed_worker( gpu_id, world_size, fsdp_config, tempfile, tempfile_rpc, ): torch.cuda.set_device(gpu_id) rank = gpu_id result = dist_init(rank, world_size, tempfile, tempfile_rpc) assert result, "Dist init failed" # Save the original torch.distributed.all_gather function since we will # patch it to include an artificial delay. orig_all_gather = torch.distributed.all_gather def run(compute_cycles, all_gather_cycles): has_params = all_gather_cycles > 0 model = _create_model(fsdp_config, compute_cycles, has_params) # Get the input and sets the input's requires_grad to True because # we have a fake compute in the forward pass. batch = torch.rand(1).cuda() batch.requires_grad = True # We run 20 iterations but only collect timing data from the minimal 10 # data points because nondeterministic system events can disturb the timing. cpu_iter = Min10() cpu_wait = Min10() gpu_compute = Min10() gpu_total = Min10() for _ in range(20): # Get two events for measuring the overall time. e1 = Event(enable_timing=True) e2 = Event(enable_timing=True) cpu_start = time.process_time() all_gather_called = False def _delayed_all_gather(*args, **kwargs): nonlocal all_gather_called all_gather_called = True torch.cuda._sleep(all_gather_cycles) return orig_all_gather(*args, **kwargs) # forward pass # # Even though both e1 & e2 are on the compute stream, since # compute depends on all_gather, e2-e1 includes all_gather time. e1.record() with patch("torch.distributed.all_gather", _delayed_all_gather): out = model(batch) if has_params and world_size > 1: assert all_gather_called else: assert not all_gather_called e2.record() # backward pass out.backward() if torch_version() >= (1, 7, 0): model.zero_grad(set_to_none=True) else: for p in model.parameters(): p.grad = None cpu_iter_time = time.process_time() - cpu_start # wait for gpu out.item() cpu_wait_for_gpu_time = time.process_time() - cpu_start - cpu_iter_time # get sum of the compute time times = [] for mod in model.modules(): if not isinstance(mod, Layer): continue times.append(mod.get_time()) # get gpu compute + all_gather time overall_gpu_time = e1.elapsed_time(e2) cpu_iter.add(cpu_iter_time) cpu_wait.add(cpu_wait_for_gpu_time) gpu_compute.add(sum(times)) gpu_total.add(overall_gpu_time) del model return { "cpu_iter": cpu_iter.avg(), "cpu_wait": cpu_wait.avg(), "gpu_compute": gpu_compute.avg(), "gpu_total": gpu_total.avg(), } sleep_cycles = int(100 * get_cycles_per_ms()) e1 = run(0, 0) # no compute, no all-gather e2 = run(0, sleep_cycles) # no compute, only all-gather e3 = run(sleep_cycles, 0) # only compute, no all-gather e4 = run(sleep_cycles, sleep_cycles) # both compute and all-gather debug_string = f"\nrank{rank}:\n e1: {e1}\n e2: {e2}\n e3: {e3}\n e4: {e4}" print(debug_string) # Check the cpu/gpu timing. CPU should run ahead of GPU. Therefore, cpu-gpu # wait should be long, except when there is no real work on GPU. # # If the assertions fail below, we likely have a cpu-gpu wait in the forward/backward pass. short = [e1["cpu_iter"], e2["cpu_iter"], e3["cpu_iter"], e4["cpu_iter"], e1["cpu_wait"]] long = [e3["cpu_wait"], e4["cpu_wait"]] if world_size == 1: short.append(e2["cpu_wait"]) # all gather should not be happening. else: long.append(e2["cpu_wait"]) # all gather should happen and prolong the cpu-gpu wait. for s in short: for l in long: # 10X longer is a safe margin, since the GPU work timing is around 100X more # of that of the CPU. assert s * 10 < l, f"{s} * 10 < {l} in " + debug_string # Check the GPU timing. short = [e1["gpu_compute"], e1["gpu_total"], e2["gpu_compute"]] long = [e3["gpu_compute"], e3["gpu_total"], e4["gpu_compute"], e4["gpu_total"]] if world_size == 1: short.append(e2["gpu_total"]) # all gather should not be happening. else: long.append(e2["gpu_total"]) # all gather should happen and prolong the cpu-gpu wait. for s in short: for l in long: # 10X longer is a safe margin, since the time is around 100X longer # when there is work on GPU vs. no work. assert s * 10 < l, f"{s} * 10 < {l} in " + debug_string # Check the GPU overlapping when there is all-gather. if world_size > 1: compute_only = e3["gpu_compute"] all_gather_only = e2["gpu_total"] both = e4["gpu_total"] assert compute_only + all_gather_only > 1.1 * both, ( f"{compute_only} + {all_gather_only} > 1.1 * {both} in " + debug_string ) teardown() @skip_if_single_gpu @pytest.mark.parametrize("world_size", [1, 2]) @pytest.mark.parametrize("flatten", ["flatten", "no_flatten"]) @pytest.mark.parametrize("mixed", ["mixed", "full"]) def test_forward_overlap(world_size, flatten, mixed): fsdp_config = { "flatten_parameters": flatten == "flatten", "mixed_precision": mixed == "mixed", } with temp_files_ctx(2) as temp_files: mp.spawn( _distributed_worker, (world_size, fsdp_config, temp_files[0], temp_files[1]), nprocs=world_size, )
StarcoderdataPython
143016
class Response: def __init__(self, inst): self.instance = inst @property def id(self): return self.instance['status'] @property def status(self): return self.instance['status']
StarcoderdataPython
54636
<filename>src/run_sign.py #!/usr/bin/env python # SPDX-License-Identifier: Apache-2.0 # # The OpenSearch Contributors require contributions made to # this file be licensed under the Apache-2.0 license or a # compatible open source license. import sys from sign_workflow.sign_args import SignArgs from sign_workflow.sign_artifacts import SignArtifacts from sign_workflow.signer import Signer from system import console def main(): args = SignArgs() console.configure(level=args.logging_level) sign = SignArtifacts.from_path( path=args.target, components=args.components, artifact_type=args.type, signature_type=args.sigtype, signer=Signer() ) sign.sign() if __name__ == "__main__": sys.exit(main())
StarcoderdataPython
148430
<filename>hostel_project/hostel_webapp/migrations/0001_initial.py # Generated by Django 2.2 on 2020-08-05 14:27 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('roll_number', models.CharField(max_length=15)), ('room_number', models.IntegerField()), ('avatar', models.ImageField(upload_to=None)), ('is_secretary', models.BooleanField(default=False)), ], ), migrations.CreateModel( name='Complaint', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField(max_length=500)), ('title', models.CharField(max_length=100)), ('complaint_pic', models.ImageField(upload_to=None)), ('date', models.DateTimeField(default=django.utils.timezone.now)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hostel_webapp.Student')), ], ), ]
StarcoderdataPython
3364728
<reponame>jmeppley/py-metagenomics<filename>screen_table.py<gh_stars>1-10 #!/usr/bin/env python """ Take list of reads (or any names) from file. Remove these (or all but these) from each of a list of text tables (e.g. m8 files). Table to screen can also be piped through STDIN. To identify reads hitting specific sequences, use screenHitTable. """ import sys import logging from edl.util import * def main(): import argparse # set up CLI usage = "usage: %prog -l LIST [OPTIONS] TABLE(S)" description = __doc__ parser = argparse.ArgumentParser(description=description) add_screen_arguments(parser, accs=True) add_IO_arguments(parser) parser.add_argument( "-d", "--delim", dest="delim", default="\t", help="Input table delimiter (tab is default). If set to 'None', " "split on any whitespace.", metavar="DELIM") parser.add_argument( "-c", "--col", dest="col", type=int, default=0, help="Column to screen (0 is default)", metavar="INDEX") add_universal_arguments(parser) arguments = parser.parse_args() setup_logging(arguments) # allow funky characters in delim arguments if arguments.delim == 'None': arguments.delim = None elif arguments.delim != '\t': arguments.delim = bytes( arguments.delim, 'utf-8').decode('unicode-escape') logging.debug("Table delim: '%s'" % (arguments.delim)) if arguments.listDelim is not None: arguments.listDelim = bytes( arguments.listDelim, 'utf-8').decode('unicode-escape') # get read list logging.debug("List file: '%s'\nList delim: '%s'" % (arguments.listFile, arguments.listDelim)) screen_set = get_screen_list(arguments, accs=arguments.accs) logging.debug("Got list of %d reads" % (len(screen_set))) if len(screen_set) > 0: logging.debug("For example: %s" % (next(iter(screen_set)))) for (inhandle, outhandle) in inputIterator(arguments): scanFileForReads( screen_set, inhandle, arguments.keep, outhandle, arguments.delim, arguments.col, arguments.accs) ################ # Functions ################ def die(msg): sys.stderr.write("%s\n" % msg) sys.exit(1) def scanFileForReads(reads, inhandle, keep, outhandle, delim, col, accs): lineCount = 0 matchCount = 0 if keep: logging.info("Keeping matched reads") else: logging.info("Discarding matched reads") for line in inhandle: lineCount += 1 read = line.rstrip('\r\n').split(delim)[col] if accs: read = parseAcc(read) logging.debug("looking for %s in %s" % (read, line)) match = read in reads if match == keep: # write line if either # - read matches list AND keep is True # - read not in list AND keep is False outhandle.write(line) matchCount += 1 logging.info("Kept %d of %d lines" % (matchCount, lineCount)) if __name__ == "__main__": main()
StarcoderdataPython
12285
<reponame>Brownc03/python-api-challenge # OpenWeatherMap API Key weather_api_key = "ae41fcf95db0d612b74e2b509abe9684" # Google API Key g_key = "<KEY>"
StarcoderdataPython
3396776
<gh_stars>10-100 #! /usr/bin/env python # -*- coding: utf-8 -*- import pkgutil import six from nose.tools import ( assert_equal, assert_not_equal, assert_raises, assert_is_instance, raises) import url from url.url import StringURL, UnicodeURL def test_bad_port(): def test(example): assert_raises(ValueError, url.parse, example) examples = [ 'http://www.python.org:65536/', 'http://www.python.org:-20/', 'http://www.python.org:8589934592/', 'http://www.python.org:80hello/' ] for example in examples: yield test, example def test_deparam_sane(): def test(bad, good): assert_equal(url.parse(bad).strip().deparam(['c']).unicode, good) examples = [ ('?a=1&b=2&c=3&d=4', '?a=1&b=2&d=4'), # Maintains order ('?a=1&&&&&&b=2' , '?a=1&b=2' ), # Removes excess &'s (';a=1;b=2;c=3;d=4', ';a=1;b=2;d=4'), # Maintains order (';a=1;;;;;;b=2' , ';a=1;b=2' ), # Removes excess ;'s (';foo_c=2' , ';foo_c=2' ), # Not overzealous ('?foo_c=2' , '?foo_c=2' ), # ... ('????foo=2' , '?foo=2' ), # Removes leading ?'s (';foo' , ';foo' ), ('?foo' , '?foo' ), ('' , '' ) ] base = 'http://testing.com/page' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_deparam_case_insensitivity(): def test(bad, good): assert_equal(url.parse(bad).deparam(['HeLlO']).unicode, good) examples = [ ('?hELLo=2', ''), ('?HELLo=2', '') ] base = 'http://testing.com/page' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_filter_params(): def function(name, value): '''Only keep even-valued parameters.''' return int(value) % 2 def test(bad, good): assert_equal(url.parse(bad).filter_params(function).unicode, good) examples = [ ('?a=1&b=2', '?b=2'), (';a=1;b=2', ';b=2') ] base = 'http://testing.com/page' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_lower(): def test(bad, good): assert_equal(url.parse(bad).unicode, good) examples = [ ('www.TESTING.coM' , 'www.testing.com/' ), ('WWW.testing.com' , 'www.testing.com/' ), ('WWW.testing.com/FOO', 'www.testing.com/FOO') ] for bad, good in examples: bad = 'http://' + bad good = 'http://' + good yield test, bad, good def test_abspath(): def test(bad, good): assert_equal(url.parse(bad).abspath().unicode, good) examples = [ ('howdy' , 'howdy' ), ('hello//how//are' , 'hello/how/are'), ('hello/../how/are', 'how/are' ), ('hello//..//how/' , 'how/' ), ('a/b/../../c' , 'c' ), ('../../../c' , 'c' ), ('./hello' , 'hello' ), ('./././hello' , 'hello' ), ('a/b/c/' , 'a/b/c/' ), ('a/b/c/..' , 'a/b/' ), ('a/b/.' , 'a/b/' ), ('a/b/./././' , 'a/b/' ), ('a/b/../' , 'a/' ), ('.' , '' ), ('../../..' , '' ), ('////foo' , 'foo' ), ('/foo/../whiz.' , 'whiz.' ), ('/foo/whiz./' , 'foo/whiz./' ), ('/foo/whiz./bar' , 'foo/whiz./bar') ] base = 'http://testing.com/' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_escape(): def test(bad, good): assert_equal(url.parse(bad).escape().unicode, good) # Escaping should also be idempotent assert_equal(url.parse(bad).escape().escape().unicode, good) examples = [ ('hello%20and%20how%20are%20you', 'hello%20and%20how%20are%20you'), ('danny\'s pub' , 'danny\'s%20pub' ), ('danny%27s pub' , 'danny\'s%20pub' ), ('danny\'s pub?foo=bar&yo' , 'danny\'s%20pub?foo=bar&yo' ), ('hello%2c world' , 'hello,%20world' ), ('%3f%23%5b%5d' , '%3F%23%5B%5D' ), # Thanks to @myronmarston for these test cases ('foo?bar none=foo bar' , 'foo?bar%20none=foo%20bar' ), ('foo;a=1;b=2?a=1&b=2' , 'foo;a=1;b=2?a=1&b=2' ), ('foo?bar=["hello","howdy"]' , 'foo?bar=%5B%22hello%22,%22howdy%22%5D'), # Example from the wild ('http://www.balset.com/DE3FJ4Yg/p:h=300&m=2011~07~25~2444705.png&ma=cb&or=1&w=400/2011/10/10/2923710.jpg', 'http://www.balset.com/DE3FJ4Yg/p:h=300&m=2011~07~25~2444705.png&ma=cb&or=1&w=400/2011/10/10/2923710.jpg'), # Example with userinfo ('http://user%3Apass@foo.com/', 'http://user:pass@foo.com/') ] base = 'http://testing.com/' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_strict_escape(): def test(bad, good): assert_equal(url.parse(bad).escape(strict=True).unicode, good) # Escaping should also be idempotent assert_equal( url.parse(bad).escape(strict=True).escape(strict=True).unicode, good) examples = [ ('http://testing.com/danny%27s pub', 'http://testing.com/danny%27s%20pub'), ('http://testing.com/this%5Fand%5Fthat', 'http://testing.com/this_and_that'), ('http://user:pass@foo.com', 'http://user:pass@foo.com/'), (u'http://José:no <EMAIL>', 'http://Jos%C3%A9:no%20<EMAIL>/'), ('http://oops!:don%27t@<EMAIL>.com', 'http://oops!:<EMAIL>%2<EMAIL>/'), (u'española,nm%2cusa.html?gunk=junk+glunk&foo=bar baz', 'espa%C3%B1ola,nm%2Cusa.html?gunk=junk+glunk&foo=bar%20baz'), ('http://foo.com/bar\nbaz.html\n', 'http://foo.com/bar%0Abaz.html%0A'), ('http://foo.com/bar.jsp?param=\n/value%2F', 'http://foo.com/bar.jsp?param=%0A/value%2F'), ('http://user%3apass@foo.com/', 'http://user%3Apass@foo.com/') ] for bad, good in examples: yield test, bad, good def test_userinfo(): def test(bad, good): assert_equal(url.parse(bad).unicode, good) examples = [ ('http://user:pass@foo.com', 'http://user:pass@foo.com'), ('http://just-a-name@foo.com', 'http://just-a-name@foo.com') ] suffix = '/page.html' for bad, good in examples: bad = bad + suffix good = good + suffix yield test, bad, good def test_not_equal(): def test(first, second): # None of these examples should evaluate as strictly equal assert_not_equal(url.parse(first), url.parse(second), 'URL(%s) should not equal URL(%s)' % (first, second)) # Using a string assert_not_equal(url.parse(first), second, 'URL(%s) should not equal %s' % (first, second)) # Symmetric assert_not_equal(url.parse(second), url.parse(first), 'URL(%s) should not equal URL(%s)' % (second, first)) # Using a string, symmetric assert_not_equal(url.parse(second), first, 'URL(%s) should not equal %s' % (second, first)) # Should equal self assert_equal(url.parse(first), first, 'URL(%s) should equal itself' % first) assert_equal(url.parse(second), second, 'URL(%s) should equal itself' % second) # These examples should not work. This includes all the examples from equivalence # test as well. examples = [ ('http://foo.com:80' , 'http://foo.com/' ), ('https://foo.com:443' , 'https://foo.com/' ), ('http://foo.com/?b=2&&&&a=1', 'http://foo.com/?a=1&b=2' ), ('http://foo.com/%A2%B3' , 'http://foo.com/%a2%b3' ), ('http://foo.com/a/../b/.' , 'http://foo.com/b/' ), (u'http://www.kündigen.de/' , 'http://www.xn--kndigen-n2a.de/'), (u'http://www.kündiGen.DE/' , 'http://www.xn--kndigen-n2a.de/'), ('http://foo.com:' , 'http://foo.co.uk/' ), ('http://foo.com:8080' , 'http://foo.com/' ), ('https://foo.com:4430' , 'https://foo.com/' ), ('http://foo.com?page&foo' , 'http://foo.com/?page' ), ('http://foo.com/?b=2&c&a=1' , 'http://foo.com/?a=1&b=2' ), ('http://foo.com/%A2%B3%C3' , 'http://foo.com/%a2%b3' ), (u'http://www.kündïgen.de/' , 'http://www.xn--kndigen-n2a.de/'), ('http://user:pass@foo.com/' , 'http://foo.com/' ), ('http://just-user@foo.com/' , 'http://foo.com/' ), ('http://user:pass@foo.com/' , 'http://pass:user@foo.com/' ) ] for first, second in examples: yield test, first, second def test_equiv(): def test(first, second): # Equiv with another URL object assert url.parse(first).equiv(url.parse(second)) # Equiv with a string assert url.parse(first).equiv(second) # Make sure it's also symmetric assert url.parse(second).equiv(url.parse(first)) # Symmetric with string arg assert url.parse(second).equiv(first) # Should be equivalent to self assert url.parse(first).equiv(first) assert url.parse(second).equiv(second) # Things to consider here are: # # - default ports (https://foo.com/ == https://foo.com:443/) # - capitalization of the hostname # - capitalization of the escaped characters in the path examples = [ ('http://foo.com:80' , 'http://foo.com/' ), ('https://foo.com:443' , 'https://foo.com/' ), ('http://foo.com/?b=2&&&&a=1', 'http://foo.com/?a=1&b=2' ), ('http://foo.com/%A2%B3' , 'http://foo.com/%a2%b3' ), ('http://foo.com/a/../b/.' , 'http://foo.com/b/' ), (u'http://www.kündigen.de/' , 'http://www.xn--kndigen-n2a.de/'), (u'http://www.kündiGen.DE/' , 'http://www.xn--kndigen-n2a.de/'), ('http://user:pass@foo.com/' , 'http://foo.com/' ), ('http://just-user@foo.com/' , 'http://foo.com/' ) ] for first, second in examples: yield test, first, second def test_not_equiv(): def test(first, second): # Equiv with another URL object assert not url.parse(first).equiv(url.parse(second)) # Equiv with a string assert not url.parse(first).equiv(second) # Make sure it's also symmetric assert not url.parse(second).equiv(url.parse(first)) # Symmetric with string arg assert not url.parse(second).equiv(first) # Should be equivalent to self assert url.parse(first).equiv(first) assert url.parse(second).equiv(second) # None of these examples should evaluate as strictly equal assert_not_equal(url.parse(first), url.parse(second), 'URL(%s) should not equal URL(%s)' % (first, second)) # Using a string assert_not_equal(url.parse(first), second, 'URL(%s) should not equal %s' % (first, second)) # Symmetric assert_not_equal(url.parse(second), url.parse(first), 'URL(%s) should not equal URL(%s)' % (second, first)) # Using a string, symmetric assert_not_equal(url.parse(second), first, 'URL(%s) should not equal %s' % (second, first)) # Should equal self assert_equal(url.parse(first), first, 'URL(%s) should equal itself' % first) assert_equal(url.parse(second), second, 'URL(%s) should equal itself' % second) # Now some examples that should /not/ pass examples = [ ('http://foo.com:' , 'http://foo.co.uk/' ), ('http://foo.com:8080' , 'http://foo.com/' ), ('https://foo.com:4430' , 'https://foo.com/' ), ('http://foo.com?page&foo' , 'http://foo.com/?page' ), ('http://foo.com/?b=2&c&a=1' , 'http://foo.com/?a=1&b=2' ), ('http://foo.com/%A2%B3%C3' , 'http://foo.com/%a2%b3' ), (u'http://www.kündïgen.de/' , 'http://www.xn--kndigen-n2a.de/') ] for first, second in examples: yield test, first, second def test_str_repr(): def test(first, second): assert_equal(str(url.parse(toparse)), strng) assert_equal(repr(url.parse(toparse)), '<url.URL object "%s" >' % strng) examples = [ ('http://foo.com/', 'http://foo.com/'), ('http://FOO.com/', 'http://foo.com/') ] for toparse, strng in examples: yield test, toparse, strng def test_canonical(): def test(bad, good): assert_equal(url.parse(bad).canonical().unicode, good) examples = [ ('?b=2&a=1&c=3', '?a=1&b=2&c=3'), (';b=2;a=1;c=3', ';a=1;b=2;c=3') ] base = 'http://testing.com/' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_defrag(): def test(bad, good): assert_equal(url.parse(bad).defrag().unicode, good) examples = [ ('foo#bar', 'foo') ] base = 'http://testing.com/' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_deuserinfo(): def test(bad, good): assert_equal(url.parse(bad).deuserinfo().unicode, good) examples = [ ('http://user:pass@foo.com/', 'http://foo.com/'), ('http://just-user@foo.com/', 'http://foo.com/') ] for bad, good in examples: yield test, bad, good def test_punycode(): def test(uni, puny): assert_equal(url.parse(uni).escape().punycode().unicode, puny) # Also make sure punycode is idempotent assert_equal( url.parse(uni).escape().punycode().punycode().unicode, puny) # Make sure that we can reverse the procedure correctly assert_equal( url.parse(uni).escape().punycode().unpunycode().unescape(), uni) # And we get what we'd expect going the opposite direction assert_equal( url.parse(puny).unescape().unpunycode().unicode, uni) examples = [ (u'http://www.kündigen.de/', 'http://www.xn--kndigen-n2a.de/'), (u'http://россия.иком.museum/', 'http://xn--h1alffa9f.xn--h1aegh.museum/'), (u'https://t…/', 'https://xn--t-9hn/'), (u'http://россия.иком.museum/испытание.html', 'http://xn--h1alffa9f.xn--h1aegh.museum/%D0%B8%D1%81%D0%BF%D1%8B%D1%82%D0%B0%D0%BD%D0%B8%D0%B5.html') ] for uni, puny in examples: yield test, uni, puny def test_punycode_relative_urls(): def test(example): assert_equal(url.parse(example).escape().punycode().unicode, example) # Also make sure punycode is idempotent assert_equal( url.parse(example).escape().punycode().punycode().unicode, example) # Make sure that we can reverse the procedure correctly assert_equal( url.parse(example).escape().punycode().unpunycode().unescape(), example) # And we get what we'd expect going the opposite direction assert_equal( url.parse(example).unescape().unpunycode().unicode, example) # Make sure that we can't punycode or unpunycode relative urls examples = ['foo', '../foo', '/bar/foo'] for relative in examples: yield test, relative def test_punycode_encode_errors(): def test(example): assert_raises(ValueError, url.parse('http://' + example).punycode) # Taken from url-cpp examples = [ (('a' * 3855) + '\xF4\x8F\xBF\xBF'), (('a' * 8190) + '\xC2\x80\xF2\x80\x82\x80') ] for example in examples: yield test, example def test_segment_lengths(): def test(example): assert_raises(ValueError, url.parse(example).punycode) examples = [ 'http://foo..com/', 'http://foo../', 'http://this-is-a-very-long-segment-that-has-more-than-sixty-three-characters.com/', 'http://this-is-a-very-long-segment-that-has-more-than-sixty-three-characters/' ] for example in examples: yield test, example def test_punycode_decode_errors(): def test(example): assert_raises(ValueError, url.parse('http://xn--' + example).unpunycode) # Taken from url-cpp examples = [ 'd9juau41awczcz', '\xc3\xbc-', 's121kz41webp2qdk6492joxumu36', '999999b' ] for example in examples: yield test, example def test_relative(): def test(rel, absolute): assert_equal(base.relative(rel).unicode, absolute) base = url.parse('http://testing.com/a/b/c') examples = [ ('../foo' , 'http://testing.com/a/foo' ), ('./foo' , 'http://testing.com/a/b/foo' ), ('foo' , 'http://testing.com/a/b/foo' ), ('/foo' , 'http://testing.com/foo' ), ('http://foo.com/bar', 'http://foo.com/bar' ), ('/foo' , 'http://testing.com/foo' ), (u'/\u200Bfoo' , u'http://testing.com/\u200Bfoo'), ('../../../../' , 'http://testing.com/' ), (u'http://www\u200B.tiagopriscostudio.com', u'http://www\u200B.tiagopriscostudio.com/') ] for rel, absolute in examples: yield test, rel, absolute def test_relative_javascript(): rel = 'javascript:console.log("hello")' base = 'http://foo.com/path' assert_equal(rel, url.parse(rel).relative_to(base).unicode) def test_sanitize(): def test(bad, good): assert_equal(url.parse(bad).sanitize().unicode, good) examples = [ ('../foo/bar none', 'foo/bar%20none') ] base = 'http://testing.com/' for bad, good in examples: bad = base + bad good = base + good yield test, bad, good def test_remove_default_port(): def test(query, result): assert_equal(url.parse(query).remove_default_port().unicode, result) examples = [ ('http://foo.com:80/' , 'http://foo.com/' ), ('https://foo.com:443/', 'https://foo.com/' ), ('http://foo.com:8080/', 'http://foo.com:8080/') ] for query, result in examples: yield test, query, result def test_absolute(): def test(query, result): assert_equal(url.parse(query).absolute, result) examples = [ ('http://foo.com/bar', True ), ('foo/' , False), ('http://foo.com' , True ), ('/foo/bar/../' , False) ] for query, result in examples: yield test, query, result def test_hostname(): def test(query, result): assert_equal(url.parse(query).hostname, result) examples = [ ('http://foo.com/bar', 'foo.com'), ('http://bar.foo.com/bar', 'bar.foo.com'), ('/foo', '') ] for query, result in examples: yield test, query, result def test_pld(): def test(query, result): assert_equal(url.parse(query).pld, result) examples = [ ('http://foo.com/bar' , 'foo.com'), ('http://bar.foo.com/bar' , 'foo.com'), ('/foo' , ''), ('http://com/bar' , ''), ('http://foo.გე' , 'foo.გე'), ('http://bar.foo.გე' , 'foo.გე'), ('http://foo.xn--node' , 'foo.xn--node'), ('http://bar.foo.xn--node', 'foo.xn--node'), ('http://foo.co.uk' , 'foo.co.uk') ] for query, result in examples: yield test, query, result def test_tld(): def test(query, result): assert_equal(url.parse(query).tld, result) examples = [ ('http://foo.com/bar' , 'com'), ('http://bar.foo.com/bar', 'com'), ('/foo' , ''), ('http://com/bar' , 'com'), ('http://foo.გე' , 'გე'), ('http://bar.foo.გე' , 'გე'), ('http://foo.xn--node' , 'xn--node'), ('http://bar.foo.xn--node', 'xn--node'), ('http://foo.co.uk' , 'co.uk') ] for query, result in examples: yield test, query, result def test_empty_hostname(): def test(example): # Equal to itself assert_equal(url.parse(example), example) # String representation equal to the provided example assert_equal(url.parse(example).unicode, example) examples = [ 'http:///path', 'http://userinfo@/path', 'http://:80/path', ] for example in examples: yield test, example def test_copy(): def test(example): original = url.parse(example) copy = original.copy() assert_equal(original, copy) assert_not_equal(id(original), id(copy)) examples = [ 'http://testing.com/danny%27s pub', 'http://testing.com/this%5Fand%5Fthat', 'http://user:pass@foo.com', u'http://José:no way@foo.com', 'http://oops!:don%27t@foo.com' u'española,nm%2cusa.html?gunk=junk+glunk&foo=bar baz', 'http://foo.com/bar\nbaz.html\n', 'http://foo.com/bar.jsp?param=\n/value%2F', 'http://user%3apass@foo.com/' ] for example in examples: yield test, example def test_set_psl(): '''Can set the PSL to use.''' def test(rules, example, pld, tld): try: url.set_psl(rules) assert_equal(url.parse(example).pld, pld) assert_equal(url.parse(example).tld, tld) finally: url.set_psl(pkgutil.get_data('url', 'psl/2016-08-16.psl')) examples = [ ('uk', 'http://foo.co.uk/', 'co.uk', 'uk' ), ('co.uk', 'http://foo.co.uk/', 'foo.co.uk', 'co.uk') ] for rules, example, pld, tld in examples: yield test, rules, example, pld, tld @raises(ValueError) def test_psl_exception(): '''Raises ValueError when PSL code throws.''' url.parse('http://empty..com').pld def test_tel(): '''Can parse tel links properly.''' parsed = url.parse('tel:0108202201') assert_equal(parsed.scheme, 'tel') assert_equal(parsed.path, '0108202201') def test_unknown_protocol(): '''Can parse unknown protocol links.''' parsed = url.parse('unknown:0108202201') assert_equal(parsed.scheme, '') assert_equal(parsed.path, 'unknown:0108202201') def test_component_assignment(): parsed = url.parse('http://user@example.com:80/path;params?query#fragment') parsed.scheme = 'https' parsed.userinfo = 'username' parsed.host = 'foo.example.com' parsed.port = 443 parsed.path = '/another/path' parsed.params = 'no-params' parsed.query = 'no-query' parsed.fragment = 'no-fragment' assert_equal( parsed.unicode, 'https://username@foo.example.com:443/another/path;no-params?no-query#no-fragment' ) def test_component_assignment_unicode(): parsed = url.parse('http://user@example.com:80/path;params?query#fragment') parsed.scheme = u'https' parsed.userinfo = u'username' parsed.host = u'foo.example.com' parsed.port = 443 parsed.path = u'/another/path' parsed.params = u'no-params' parsed.query = u'no-query' parsed.fragment = u'no-fragment' assert_equal( parsed.unicode, 'https://username@foo.example.com:443/another/path;no-params?no-query#no-fragment' ) def test_string_url(): parsed = StringURL.parse('http://user@example.com:80/path;params?query#fragment') properties = [ 'scheme', 'host', 'params', 'query', 'fragment', 'userinfo', 'pld', 'tld' ] for prop in properties: yield assert_is_instance, getattr(parsed, prop), six.binary_type def test_unicode_url(): parsed = UnicodeURL.parse('http://user@example.com:80/path;params?query#fragment') properties = [ 'scheme', 'host', 'params', 'query', 'fragment', 'userinfo', 'pld', 'tld' ] for prop in properties: yield assert_is_instance, getattr(parsed, prop), six.text_type
StarcoderdataPython
41689
import html from enum import Enum from .errors import HttpError, ApiError, InternalError, AppLaunchError, NoFocusedTextFieldError, \ ErrorCode, get_error_message, EncryptionError from .util import coalesce_none_or_empty class AppFeature(Enum): ''' Describes which features are supported by the current app. Attributes: UNKNOWN: The app feature was not recognized. TEXT_INPUT: The app has a text field focused. CURSOR_DISPLAY: The app has a cursor displayed. WEB_BROWSE: The app is using an embedded web browser. ''' UNKNOWN = 0 TEXT_INPUT = 1 CURSOR_DISPLAY = 2 WEB_BROWSE = 3 class AppControl(object): ''' Provides functionality for interacting with applications on the target device. Args: bravia_client: The parent :class:`BraviaClient` instance. http_client: The :class:`Http` instance associated with the parent client. ''' def __init__(self, bravia_client, http_client): self.bravia_client = bravia_client self.http_client = http_client def get_application_list(self, exclude_builtin=False): ''' Retrieves a list of applications installed on the target device. Args: exclude_builtin (bool): If True, excludes built-in Sony applications which are not exposed on the\ home screen. Raises: TypeError: One or more arguments is the incorrect type. ApiError: The request to the target device failed. Returns: list(dict): A list of dicts containing the following properties: * name (`str or None`): The display name of the application. * uri (`str or None`): The internal URI at which the application can be accessed, used when referring to\ the app from other functions. * icon (`str or None`): A network URL pointing to the application's icon image. ''' self.bravia_client.initialize() if type(exclude_builtin) is not bool: raise TypeError("exclude_builtin must be a boolean type") response = self.http_client.request(endpoint="appControl", method="getApplicationList", version="1.0") apps = [] if response is None: return apps if type(response) is not list: raise ApiError("API returned unexpected response format for getApplicationList") for app_info in response: app = { "name": html.unescape(coalesce_none_or_empty(app_info.get("title"))), "uri": coalesce_none_or_empty(app_info.get("uri")), "icon": coalesce_none_or_empty(app_info.get("icon")) } if exclude_builtin and app["uri"] is not None and "com.sony.dtv.ceb" in app["uri"]: continue apps.append(app) return apps def get_application_feature_status(self): ''' Determines which features are supported by the currently running application on the target device. Raises: ApiError: The request to the target device failed. Returns: dict: A dict with the following keys with boolean values: * textInput (`bool`): True if the application currently has a text input focused. * cursorDisplay (`bool`): True if the application currently has an interactive cursor. * webBrowse (`bool`): True if the application currently has a web browser displayed. ''' self.bravia_client.initialize() try: response = self.http_client.request(endpoint="appControl", method="getApplicationStatusList", version="1.0") except HttpError as err: raise ApiError(get_error_message(err.error_code, str(err))) from None if type(response) is not list: raise ApiError("API returned unexpected response format for getApplicationStatusList") supported_features = { "textInput": AppFeature.TEXT_INPUT, "cursorDisplay": AppFeature.CURSOR_DISPLAY, "webBrowse": AppFeature.WEB_BROWSE } enabled_features = { AppFeature.TEXT_INPUT: False, AppFeature.CURSOR_DISPLAY: False, AppFeature.WEB_BROWSE: False } for feature in response: feature_type = supported_features.get(feature.get("name"), AppFeature.UNKNOWN) # Skip unsupported features if feature_type == AppFeature.UNKNOWN: continue enabled_features[feature_type] = True if feature["status"] == "on" else False return enabled_features def get_text_form(self): ''' Decrypts and returns the contents of the text field focused on the target device. Raises: InternalError: The target device was unable to encrypt the text. ApiError: The request to the target device failed. EncryptionError: The target device could not provide a valid encryption key. Returns: str or None: The text, or `None` if no text field is currently focused. ''' self.bravia_client.initialize() encrypted_key = self.bravia_client.encryption.get_rsa_encrypted_common_key() if encrypted_key is None: raise EncryptionError( "This device does not support the appropriate encryption needed to access text fields." ) try: response = self.http_client.request( endpoint="appControl", method="getTextForm", params={"encKey": encrypted_key}, version="1.1" ) except HttpError as err: # These errors likely indicate there is no focused text field, so return None if err.error_code == ErrorCode.REQUEST_DUPLICATED.value or err.error_code == ErrorCode.ILLEGAL_STATE.value: return None elif err.error_code == ErrorCode.ENCRYPTION_ERROR.value: raise InternalError("Internal error: The target device rejected our encryption key") else: raise ApiError(get_error_message(err.error_code, str(err))) from None if "text" not in response: raise ApiError("API returned unexpected response format for getTextForm") decrypted_text = self.bravia_client.encryption.aes_decrypt_b64(response["text"]) return decrypted_text def get_web_app_status(self): ''' Returns information about the web application currently in use on the target device. Raises: ApiError: The request to the target device failed. Returns: dict: A dict containing the following keys: * active (`bool`): True if there is currently a web application running on the target device. * url (`str or None`): The URL of the application currently running, None if no such app is running. ''' self.bravia_client.initialize() try: response = self.http_client.request(endpoint="appControl", method="getWebAppStatus", version="1.0") except HttpError as err: raise ApiError(get_error_message(err.error_code, str(err))) from None return { "active": True if response.get("active") == "true" else False, "url": coalesce_none_or_empty(response.get("url")) } def set_active_app(self, uri): ''' Opens the specified app on the target device. Args: uri (str): The URI of the application to open (acquired using :func:`get_application_list()`) Raises: TypeError: One or more arguments is the incorrect type. AppLaunchError: The application could not be opened. ApiError: The request to the target device failed. ''' self.bravia_client.initialize() if type(uri) is not str: raise TypeError("uri must be a string type") try: self.http_client.request( endpoint="appControl", method="setActiveApp", params={"uri": uri}, version="1.0" ) except HttpError as err: if err.error_code == ErrorCode.ANOTHER_REQUEST_IN_PROGRESS.value: raise AppLaunchError( "Another app is currently in the process of launching" ) elif err.error_code == ErrorCode.FAILED_TO_LAUNCH.value: raise AppLaunchError("The app failed to launch") elif err.error_code == ErrorCode.REQUEST_IN_PROGRESS.value: # This is actually a success message, so ignore it pass else: raise ApiError(get_error_message(err.error_code, str(err))) from None def set_text_form(self, text): ''' Enters the specified text in the focused text field on the target device. Text is encrypted before being sent to the device. Args: text (str): The text to input. Raises: TypeError: One or more arguments is the incorrect type. ApiError: The request to the device failed. EncryptionError: The target device could not provide a valid encryption key. NoFocusedTextFieldError: There is no text field to input text to on the target device. InternalError: The target device failed to decrypt the text. ''' self.bravia_client.initialize() if type(text) is not str: raise TypeError("text must be a string type") encrypted_key = self.bravia_client.encryption.get_rsa_encrypted_common_key() if encrypted_key is None: raise EncryptionError( "This device does not support the appropriate encryption needed to access text fields." ) encrypted_text = self.bravia_client.encryption.aes_encrypt_b64(text) try: self.http_client.request( endpoint="appControl", method="setTextForm", params={"encKey": encrypted_key, "text": encrypted_text}, version="1.1" ) except HttpError as err: if err.error_code == ErrorCode.ILLEGAL_STATE.value: raise NoFocusedTextFieldError( "The target device does not currently have a writable text field focused." ) elif err.error_code == ErrorCode.ENCRYPTION_FAILED.value: raise InternalError("Internal error: The target device rejected our encryption key. This is a bug.") else: raise ApiError(get_error_message(err.error_code, str(err))) from None def terminate_all_apps(self): ''' Instructs the target device to terminate all running applications. Raises: ApiError: The request to the target device failed. ''' self.bravia_client.initialize() try: self.http_client.request(endpoint="appControl", method="terminateApps", version="1.0") except HttpError as err: if err.error_code == ErrorCode.FAILED_TO_TERMINATE.value: # Some apps may not be allowed to be terminated. This is an expected response in that case. pass else: raise ApiError(get_error_message(err.error_code, str(err))) from None
StarcoderdataPython
3254105
# Copyright (c) 2019 Works Applications Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mmap from . import SYSTEM_DICT_VERSION, USER_DICT_VERSION_1, USER_DICT_VERSION_2 from .dictionaryheader import DictionaryHeader from .doublearraylexicon import DoubleArrayLexicon from .grammar import Grammar class BinaryDictionary(object): def __init__(self, bytes_: mmap.mmap, grammar: Grammar, header: DictionaryHeader, lexicon: DoubleArrayLexicon): self._bytes = bytes_ self._grammar = grammar self._header = header self._lexicon = lexicon @staticmethod def _read_dictionary(filename, access=mmap.ACCESS_READ): with open(filename, 'r+b') as system_dic: bytes_ = mmap.mmap(system_dic.fileno(), 0, access=access) offset = 0 header = DictionaryHeader.from_bytes(bytes_, offset) offset += header.storage_size() if header.version not in [SYSTEM_DICT_VERSION, USER_DICT_VERSION_1, USER_DICT_VERSION_2]: raise Exception('invalid dictionary version') grammar = None if header.version != USER_DICT_VERSION_1: grammar = Grammar(bytes_, offset) offset += grammar.get_storage_size() lexicon = DoubleArrayLexicon(bytes_, offset) return bytes_, grammar, header, lexicon @classmethod def from_system_dictionary(cls, filename): args = cls._read_dictionary(filename) version = args[2].version if version != SYSTEM_DICT_VERSION: raise IOError('invalid system dictionary') return cls(*args) @classmethod def from_user_dictionary(cls, filename): args = cls._read_dictionary(filename, mmap.ACCESS_COPY) version = args[2].version if version not in [USER_DICT_VERSION_1, USER_DICT_VERSION_2]: raise IOError('invalid user dictionary') return cls(*args) def close(self): del self._grammar del self._lexicon self._bytes.close() @property def grammar(self) -> Grammar: return self._grammar @property def header(self) -> DictionaryHeader: return self._header @property def lexicon(self) -> DoubleArrayLexicon: return self._lexicon
StarcoderdataPython
25067
# Generated by Django 3.0.10 on 2020-09-10 13:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_user_mobile_phone'), ] operations = [ migrations.AlterField( model_name='user', name='mobile_phone', field=models.CharField(blank=True, default='', max_length=255, verbose_name='Mobile phone number'), preserve_default=False, ), ]
StarcoderdataPython
1729606
<filename>bitwise.py #!/Applications/anaconda/envs/Python3/bin def main(): '''Bitwise Operators and Examples''' x, y, allOn = 0x55, 0xaa, 0xff print("x is: ", end="") bitPrint(x) print("y is: ", end="") bitPrint(y) print("allOn is: ", end="") bitPrint(allOn) # Bitwise OR: | print("x | y: ", end="") bitPrint(x | y) # Bitwise AND: & print("x & y: ", end="") bitPrint(x & y) # Bitwise XOR: ^ print("x ^ allOn: ", end="") bitPrint(x ^ allOn) # Bitwise Left Shift: << print("allOn << 4: ", end="") bitPrint(allOn << 4) # Bitwise Right Shift: >> print("allOn >> 4: ", end="") bitPrint(allOn >> 4) # Bitwise One's Complement: ~ print("One's complement of x (~x): ", end="") bitPrint(~x) return 0 def bitPrint(n): '''Prints a given number n in binary format to 8 places''' print('{:08b}'.format(n)) if __name__ == '__main__': main()
StarcoderdataPython
3363006
<filename>back-end/main.py<gh_stars>0 # uvicorn main:app --reload from products import delete_product, select_product, insert_product, product_model from users import delete_user, insert_user, select_user, user_model from start_app import * app = start_app() @app.get("/") async def root(): return "Api básica para consulta" @app.get("/products") async def root(): json = select_product.select_product_all() return json @app.get("/products/{id}") async def root(id): json = select_product.select_product(id) return json @app.post("/products") async def create_item(product: product_model.Product): insert_product.insert_product( product.name, product.price, product.description,) json = select_product.select_product_all() return json @app.delete("/products/{id}") async def delete_item(id): delete_product.delete_product(id) json = select_product.select_product_all() return json @app.get("/users") async def root(): json = select_user.select_user_all() return json @app.get("/users/{id}") async def root(id): json = select_user.select_user(id) return json @app.post("/users") async def create_item(user: user_model.User): insert_user.insert_user( user.name, user.surname, user.address,) json = select_user.select_user_all() return json @app.delete("/users/{id}") async def delete_item(id): delete_user.delete_user(id) json = select_user.select_user_all() return json
StarcoderdataPython
1692757
''' Example of MBEANN in Python solving XOR. ''' import multiprocessing import os import pickle import random import time import numpy as np from examples.xor.settings import SettingsEA, SettingsMBEANN from mbeann.base import Individual, ToolboxMBEANN from mbeann.visualize import visualizeIndividual def evaluateIndividual(ind): # XOR settings # Third value in the inputsSet is for the bias. # inputsSet = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], [1.0, 0.0, 0.5], [1.0, 1.0, 0.5]]) # XOR without bias inputs. inputsSet = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]) outputsSet = np.array([[0.0], [1.0], [1.0], [0.0]]) outputsFromNetwork = [] for inputs in inputsSet: outputsFromNetwork += [ind.calculateNetwork(inputs)] fitness = 0.0 for a, b in zip(outputsSet, outputsFromNetwork): fitness += np.abs(a - b) return fitness if __name__ == '__main__': # Number of worker processes to run evolution. numProcesses = multiprocessing.cpu_count() # Evolutionary algorithm settings. popSize = SettingsEA.popSize maxGeneration = SettingsEA.maxGeneration isMaximizingFit = SettingsEA.isMaximizingFit eliteSize = SettingsEA.eliteSize tournamentSize = SettingsEA.tournamentSize tournamentBestN = SettingsEA.tournamentBestN randomSeed = 0 # int(time.time()) random.seed(randomSeed) st = random.getstate() data_dir = os.path.join(os.path.dirname(__file__), 'results_xor_{}'.format(randomSeed)) os.makedirs(data_dir, exist_ok=True) with open('{}/random_state.pkl'.format(data_dir), mode='wb') as out_pkl: # Saving the random state just in case. pickle.dump(st, out_pkl) if numProcesses > 1: pool = multiprocessing.Pool(processes=numProcesses) pop = [Individual(SettingsMBEANN.inSize, SettingsMBEANN.outSize, SettingsMBEANN.hidSize, SettingsMBEANN.initialConnection, SettingsMBEANN.maxWeight, SettingsMBEANN.minWeight, SettingsMBEANN.initialWeightType, SettingsMBEANN.initialWeighMean, SettingsMBEANN.initialWeightScale, SettingsMBEANN.maxBias, SettingsMBEANN.minBias, SettingsMBEANN.initialBiasType, SettingsMBEANN.initialBiasMean, SettingsMBEANN.initialBiasScale, SettingsMBEANN.isReccurent, SettingsMBEANN.activationFunc, SettingsMBEANN.actFunc_Alpha, SettingsMBEANN.actFunc_Beta) for i in range(popSize)] tools = ToolboxMBEANN(SettingsMBEANN.p_addNode, SettingsMBEANN.p_addLink, SettingsMBEANN.p_weight, SettingsMBEANN.p_bias, SettingsMBEANN.weightMutationType, SettingsMBEANN.weightMutationScale, SettingsMBEANN.biasMutationType, SettingsMBEANN.biasMutationScale, SettingsMBEANN.addNodeWeightValue) log_stats = ['Gen', 'Mean', 'Std', 'Max', 'Min'] with open('{}/log_stats.pkl'.format(data_dir), mode='wb') as out_pkl: pickle.dump(log_stats, out_pkl) for gen in range(maxGeneration): print("------") print("Gen {}".format(gen)) if numProcesses > 1: fitnessValues = pool.map(evaluateIndividual, pop) else: fitnessValues = [] for ind in pop: fitnessValues += [evaluateIndividual(ind)] for ind, fit in zip(pop, fitnessValues): ind.fitness = fit[0] log_stats = [gen, np.mean(fitnessValues), np.std(fitnessValues), np.max(fitnessValues), np.min(fitnessValues)] with open('{}/log_stats.pkl'.format(data_dir), mode='ab') as out_pkl: pickle.dump(log_stats, out_pkl) print("Mean: " + str(np.mean(fitnessValues)) + "\tStd: " + str(np.std(fitnessValues)) + "\tMax: " + str(np.max(fitnessValues)) + "\tMin: " + str(np.min(fitnessValues))) # Save the best individual. with open('{}/data_ind_gen{:0>4}.pkl'.format(data_dir, gen), mode='wb') as out_pkl: pop.sort(key=lambda ind: ind.fitness, reverse=isMaximizingFit) pickle.dump(pop[0], out_pkl) visualizeIndividual( pop[0], '{}/mbeann_ind_gen{:0>4}.pdf'.format(data_dir, gen)) tools.selectionSettings(pop, popSize, isMaximizingFit, eliteSize) if eliteSize > 0: elite = tools.preserveElite() # pop = tools.selectionRandom() pop = tools.selectionTournament(tournamentSize, tournamentBestN) for i, ind in enumerate(pop): tools.mutateWeightValue(ind) tools.mutateBiasValue(ind) tools.mutateAddNode(ind) tools.mutateAddLink(ind) if eliteSize > 0: pop = elite + pop
StarcoderdataPython
1656067
from os import path, getcwd from classifier.application import create_app settings_file = path.join(getcwd(), "settings.py") app = create_app(settings_file)
StarcoderdataPython
3326225
<reponame>odinn13/comb_spec_searcher-1 """ The constructor class contains all the method, and logic, needed to get the enumeration, generate objects, and sample objects. The default constructors implemented are: - CartesianProduct - DisjointUnion - Empty Currently the constructors are implemented in one variable, namely 'n' which is used throughout to denote size. """ import abc from itertools import product from random import randint from typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional, Tuple from sympy import Eq, Function from ..combinatorial_class import CombinatorialClassType, CombinatorialObjectType __all__ = ("Constructor", "CartesianProduct", "DisjointUnion") RelianceProfile = Tuple[Dict[str, Tuple[int, ...]], ...] SubGens = Tuple[Callable[..., Iterator[CombinatorialObjectType]], ...] SubRecs = Tuple[Callable[..., int], ...] SubSamplers = Tuple[Callable[..., CombinatorialObjectType], ...] class Constructor(abc.ABC, Generic[CombinatorialClassType, CombinatorialObjectType]): """The constructor is akin to the 'counting function' in the comb exp paper.""" @abc.abstractmethod def get_equation(self, lhs_func: Function, rhs_funcs: Tuple[Function, ...]) -> Eq: """ Return the sympy.Eq in the form lhs_func = f(rhs_funcs). """ @abc.abstractmethod def reliance_profile(self, n: int, **parameters: int) -> RelianceProfile: """ Return the reliance profile. That is for the parameters given, which parameters of each individual subclass are required. """ @abc.abstractmethod def get_recurrence(self, subrecs: SubRecs, n: int, **parameters: int) -> int: """ Return the count for the given parameters, assuming the children are counted by the subrecs given. """ @abc.abstractmethod def get_sub_objects( self, subgens: SubGens, n: int, **parameters: int ) -> Iterator[Tuple[CombinatorialObjectType, ...]]: """Return the subobjs/image of the bijection implied by the constructor.""" @abc.abstractmethod def random_sample_sub_objects( self, parent_count: int, subsamplers: SubSamplers, subrecs: SubRecs, n: int, **parameters: int ): """Return a randomly sampled subobjs/image of the bijection implied by the constructor.""" class CartesianProduct(Constructor[CombinatorialClassType, CombinatorialObjectType]): """ The CartesianProduct is initialised with the children of the rule that is being counted. These are needed in the reliance profile. In particular, the CombinatorialClass that you are counting must have implemented the methods is_atom', 'minimum_size_of_object', 'get_minimum_values' which are needed to ensure that the recursions are productive. This CartesianProduct constructor considers compositions all of the parameters including n. then these should be passed to one other factor. The details of how the parameters map forward must be given using 'extra_parameters'. This is a tuple, where the ith dictionary tells the constructor for each parent variable maps to on the child. If it does not map to the child it should not be a key in the dictionary. """ def __init__( self, parent: CombinatorialClassType, children: Iterable[CombinatorialClassType], extra_parameters: Optional[Tuple[Dict[str, str], ...]] = None, ): children = tuple(children) if extra_parameters is not None: self.extra_parameters = tuple(extra_parameters) else: self.extra_parameters = tuple(dict() for _ in children) self.minimum_sizes = {"n": parent.minimum_size_of_object()} for k in parent.extra_parameters: self.minimum_sizes[k] = parent.get_minimum_value(k) self.min_child_sizes = tuple( {"n": child.minimum_size_of_object()} for child in children ) self.max_child_sizes = tuple( {"n": child.minimum_size_of_object()} if child.is_atom() else {} for child in children ) self.parent_parameters = ("n",) + parent.extra_parameters for (idx, child), parameters in zip(enumerate(children), self.extra_parameters): for k in parent.extra_parameters: self.min_child_sizes[idx][k] = ( child.get_minimum_value(parameters[k]) if k in parameters else 0 ) if k not in parameters: self.max_child_sizes[idx][k] = 0 elif child.is_atom(): self.max_child_sizes[idx][k] = child.get_minimum_value( parameters[k] ) def get_equation(self, lhs_func: Function, rhs_funcs: Tuple[Function, ...]) -> Eq: res = 1 for extra_parameters, rhs_func in zip(self.extra_parameters, rhs_funcs): res *= rhs_func.subs( {child: parent for parent, child in extra_parameters.items()}, simultaneous=True, ) return Eq(lhs_func, res) def reliance_profile(self, n: int, **parameters: int) -> RelianceProfile: #  TODO: consider when parameters are subsets of each other etc assert all( set(["n", *parameters]) == set(min_child_sizes) for min_child_sizes in self.min_child_sizes ) parameters["n"] = n return tuple( { k: tuple( range( min_child_sizes[k], min( [ parameters[k] - self.minimum_sizes[k] + min_child_sizes[k] + 1 ] + ( [max_child_sizes[k] + 1] if k in max_child_sizes else [] ), ), ) ) for k in min_child_sizes } for min_child_sizes, max_child_sizes in zip( self.min_child_sizes, self.max_child_sizes ) ) def _valid_compositions( self, n: int, **parameters: int ) -> Iterator[Tuple[Dict[str, int], ...]]: reliance_profile = self.reliance_profile(n, **parameters) def _helper( minmaxes: Tuple[Dict[str, Tuple[int, int]], ...], **parameters: int ): if len(minmaxes) == 1: minmax = minmaxes[0] if all( minmax[k][0] <= parameters[k] <= minmax[k][1] for k in self.parent_parameters ): yield ({**parameters},) return still_to_come = { k: sum(minmax[k][0] for minmax in minmaxes[1:]) for k in self.parent_parameters } max_available = { k: sum(minmax[k][1] for minmax in minmaxes[1:]) for k in self.parent_parameters } minmax = minmaxes[0] for values in product( *[ range( max(minmax[k][0], parameters[k] - max_available[k]), min(minmax[k][1], parameters[k] - still_to_come[k]) + 1, ) for k in self.parent_parameters ] ): params = dict(zip(self.parent_parameters, values)) update_params = { k: parameters[k] - params[k] for k in self.parent_parameters } for comp in _helper(minmaxes[1:], **update_params): yield (params,) + comp if all(all(profile.values()) for profile in reliance_profile): minmaxes: Tuple[Dict[str, Tuple[int, int]], ...] = tuple( {k: (min(profile[k]), max(profile[k])) for k in self.parent_parameters} for profile in reliance_profile ) parameters["n"] = n yield from _helper(minmaxes, **parameters) def get_extra_parameters( self, child_parameters: Tuple[Dict[str, int], ...] ) -> Optional[List[Dict[str, int]]]: """ Will return the extra parameters dictionary based on the given child parameters given. If there is a contradiction, that is some child parameter is given two or more different values that do not match, then None will be returned indicating that there is a contradiction, and so there are no objects satisfying the child parameters. """ res: List[Dict[str, int]] = [] for params, map_params in zip(child_parameters, self.extra_parameters): assert all( params[k] == 0 for k in self.parent_parameters if k not in map_params and k != "n" ) extra_params: Dict[str, int] = {"n": params["n"]} for k in map_params: mapped_k = map_params[k] if mapped_k not in extra_params: extra_params[mapped_k] = params[k] elif extra_params[mapped_k] != params[k]: return None res.append(extra_params) return res def get_recurrence(self, subrecs: SubRecs, n: int, **parameters: int) -> int: # The extra parameters variable maps each of the parent parameter to # the unique child that it was mapped to. res = 0 for child_parameters in self._valid_compositions(n, **parameters): tmp = 1 extra_parameters = self.get_extra_parameters(child_parameters) if extra_parameters is None: continue for rec, extra_params in zip(subrecs, extra_parameters): tmp *= rec(n=extra_params.pop("n"), **extra_params) if tmp == 0: break res += tmp return res def get_sub_objects( self, subgens: SubGens, n: int, **parameters: int ) -> Iterator[Tuple[CombinatorialObjectType, ...]]: assert len(parameters) == 0, "only implemented in one variable, namely 'n'" for comp in self._valid_compositions(n): for sub_objs in product( *tuple(subgen(n=d["n"]) for d, subgen in zip(comp, subgens)) ): yield tuple(sub_objs) def random_sample_sub_objects( self, parent_count: int, subsamplers: SubSamplers, subrecs: SubRecs, n: int, **parameters: int ): assert not parameters, "only implemented in one variable" random_choice = randint(1, parent_count) total = 0 for comp in self._valid_compositions(n): tmp = 1 for i, rec in enumerate(subrecs): tmp *= rec(n=comp[i]["n"]) if tmp == 0: break total += tmp if random_choice <= total: return tuple( subsampler(d["n"]) for d, subsampler in zip(comp, subsamplers) ) @staticmethod def get_eq_symbol() -> str: return "=" @staticmethod def get_op_symbol() -> str: return "x" def __str__(self) -> str: return "Cartesian product" class DisjointUnion(Constructor[CombinatorialClassType, CombinatorialObjectType]): """ The DisjointUnion constructor takes as input the children. Each constructor is unique up to the length of the children being used to count. Extra parameters are passed on using the parameters dictionaries. Each dictionary's keys should be the extra variable of the child pointing the variable on the parent it came from. If a parents variable does not map to a child, then this variable must be 0 as the child contains no occurences. The fixed value dictionaries passed will be used ensure that the parameter of a child must take on the given value. """ def __init__( self, parent: CombinatorialClassType, children: Tuple[CombinatorialClassType, ...], extra_parameters: Optional[Tuple[Dict[str, str], ...]] = None, fixed_values: Optional[Tuple[Dict[str, int], ...]] = None, ): self.number_of_children = len(children) if extra_parameters is not None: self.extra_parameters = extra_parameters assert len(extra_parameters) == len(children) else: assert not parent.extra_parameters self.extra_parameters = tuple( dict() for _ in range(self.number_of_children) ) self.zeroes = tuple( frozenset(parent.extra_parameters) - frozenset(parameter.keys()) for parameter in self.extra_parameters ) if fixed_values is not None: self.fixed_values = fixed_values assert len(fixed_values) == len(children) else: self.fixed_values = tuple({} for _ in children) def get_equation(self, lhs_func: Function, rhs_funcs: Tuple[Function, ...]) -> Eq: res = 0 for rhs_func, extra_parameters in zip(rhs_funcs, self.extra_parameters): res += rhs_func.subs( {child: parent for parent, child in extra_parameters.items()}, simultaneous=True, ) return Eq(lhs_func, res) def reliance_profile(self, n: int, **parameters: int) -> RelianceProfile: # TODO: implement in multiple variables and use in get_recurrence assert not parameters, "only implemented in one variable, namely 'n'" return tuple({"n": (n,)} for _ in range(self.number_of_children)) def get_extra_parameters( self, n: int, **parameters: int ) -> List[Optional[Dict[str, int]]]: """ Will return the extra parameters dictionary based on the parent's parameters. If there is a contradiction, that is some child parameter is given two or more different values that do not match, then None will be returned for that child, indicating that 0 objects on the child match the parents parameters. """ res: List[Optional[Dict[str, int]]] = [] for i, extra_parameters in enumerate(self.extra_parameters): update_params: Dict[str, int] = {**self.fixed_values[i]} for parent_var, child_var in extra_parameters.items(): updated_value = parameters[parent_var] if child_var not in update_params: update_params[child_var] = updated_value elif update_params[child_var] != updated_value: break else: res.append(update_params) continue res.append(None) return res def get_recurrence(self, subrecs: SubRecs, n: int, **parameters: int) -> int: res = 0 for (idx, rec), extra_params in zip( enumerate(subrecs), self.get_extra_parameters(n, **parameters) ): # if a parent parameter is not mapped to by some child parameter # then it is assumed that the value of the parent parameter must be 0 if extra_params is None or any( val != 0 and k in self.zeroes[idx] for k, val in parameters.items() ): continue res += rec(n=n, **extra_params) return res @staticmethod def get_sub_objects( subgens: SubGens, n: int, **parameters: int ) -> Iterator[Tuple[CombinatorialObjectType, ...]]: assert len(parameters) == 0, "only implemented in one variable, namely 'n'" for i, subgen in enumerate(subgens): for gp in subgen(n, **parameters): yield tuple(None for _ in range(i)) + (gp,) + tuple( None for _ in range(len(subgens) - i - 1) ) @staticmethod def random_sample_sub_objects( parent_count: int, subsamplers: SubSamplers, subrecs: SubRecs, n: int, **parameters: int ): random_choice = randint(1, parent_count) total = 0 for idx, (subrec, subsampler) in enumerate(zip(subrecs, subsamplers)): total += subrec(n=n, **parameters) if random_choice <= total: obj = subsampler(n=n, **parameters) return ( tuple(None for _ in range(idx)) + (obj,) + tuple(None for _ in range(len(subrecs) - idx - 1)) ) @staticmethod def get_eq_symbol() -> str: return "=" @staticmethod def get_op_symbol() -> str: return "+" def __str__(self): return "disjoint union"
StarcoderdataPython
181491
# Generated by Django 2.2.7 on 2019-11-06 23:28 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Todo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('todo', models.CharField(help_text='Obrigatório preencher o Todo', max_length=100)), ('done', models.BooleanField(default=False)), ('created_at', models.DateTimeField(auto_now_add=True)), ('closed_at', models.DateTimeField()), ], ), ]
StarcoderdataPython
161821
import serial import time import sys import os #------------------------------------------------------------------------------- # !!! START USER UPDATE !!! COM_PORT = 'COM3' # Windows COM port # the Teensy is connected to BAUD_RATE = 1000000 # !!! END USER UPDATE !!! #------------------------------------------------------------------------------- FILENAME_SIZE = 23 # CAN-Logger file names are 23 bytes long FILESIZE_SIZE = 10 # CAN-Logger file is is uint32_t...10 bytes BLOCK_SIZE = 512 # CAN-Logger logs and sends data in 512 byte blocks START_CMD = str.encode( "START" ) + b'\x00' # CAN-Logger serial start command SKIP_CMD = str.encode( "SKIP " ) + b'\x00' # CAN-Logger serial skip command #------------------------------------------------------------------------------- # printProgress # # This function will print to the standard output with a progress bar. This # code was copeid from: # stackoverflow.com/questions/3173320/text-progress-bar-in-the-console # # Parameters # ---------- # iteration: int The numerator of the status percentage # total int The numerator of the status percentage # # Returns # ------- # none #------------------------------------------------------------------------------- def printProgress ( iteration, total ): prefix = 'Progress:' suffix = 'Complete' decimals = 1 length = 50 fill = '█' printEnd = "\r" percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) if iteration == total: print() #------------------------------------------------------------------------------- # getFileFromTeensy # # This function will complete the file transer request by getting the file # size and then getting the file itself. The file size is 10 bytes and the # file will be a multiple of the CAN-Logger block array size (16-bytes * # kBlockSize), so all components of the data transfer are of known lengths. # The calls to read the COM serial ports are all done by trying to read the # expected amount of data. The COM serial read command will only pass if the # correct amount of data has been read. If the read commands aren't getting # all of the exected data, then the COM serial read command timeout may need # to be increased. # # Parameters # ---------- # # Returns # ------- # rv boolean True = pass, False = fail #------------------------------------------------------------------------------- def getFileFromTeensy (): rv = True fileSize = None ser.write( START_CMD ) # Request the file size buf = ser.read( FILESIZE_SIZE ) if ( len( buf ) == FILESIZE_SIZE ): fileSize = int( buf.decode() ) print ( "File Name: %s, File Size: %s" % ( fileName , fileSize ) ) elif ( len( buf ) > 0 ): print( "Expected filesize size: %s, received: %s " % ( FILENAME_SIZE, len ( buf ) ) ) rv = False return rv else: print ( "Didn't receive file size for %s" % (fileName) ) rv = False return rv byteCnt = 0 blocks = [] ser.write( START_CMD ) # Request the file while ( True ): buf = ser.read( BLOCK_SIZE ) if ( len( buf ) == BLOCK_SIZE ): blocks.append( buf ) byteCnt += BLOCK_SIZE printProgress( byteCnt, fileSize ) if ( byteCnt == fileSize ): with open( fileName, 'wb' ) as fid: for block in blocks: fid.write( block ) break else: print( "Expected buffer size: 512, received: %s " % ( len ( buf ) ) ) rv = False break return rv #------------------------------------------------------------------------------- # !!! MAIN PROGRAM !!! #------------------------------------------------------------------------------- retries = 5 while (True): # Setup the serial connection to the Teensy try: ser = serial.Serial(COM_PORT, BAUD_RATE, timeout=1) break except: print ("Attempting to connect to the Teensy...") time.sleep(1) retries-=1 if ( retries == 0 ): portNum = -1 while ( portNum < 0 or portNum > 15 ): portNum = int( input( "Using %s, verify the COM port number (0-15): " % ( COM_PORT ) ) ) COM_PORT = 'COM'+str( portNum ) retries = 5 print ("Connected to the Teensy, requesting CANLogger files...") while ( True ): # File request loop fileName = None ser.write( START_CMD ) # Request the file name buf = ser.read( FILENAME_SIZE ) if ( len( buf ) == FILENAME_SIZE ): print( buf.decode() ) fileName = buf.decode() elif ( len( buf ) > 0 ): print( buf ) print( "Expected filename size: %s, received: %s " % ( FILENAME_SIZE, len ( buf ) ) ) break else: # No response, assume no more files and exit print ("No more files to transfer...") break if ( os.path.isfile( fileName ) ): print( "File %s already exists, skipping..." % ( fileName ) ) ser.write( SKIP_CMD ) time.sleep(2) else: if not getFileFromTeensy(): # Exit if something goes wrong break # reason is sent to standard out
StarcoderdataPython
124384
<filename>ex47_tests.py from nose.tools import * from ex47.game import Room def test_room(): gold = Room("GoldRoom", """This room has gold in it you can grab. There's a door to the north.""") assert_equal(gold.name, "GoldRoom") assert_equal(gold.paths, {}) def test_room_paths(): center = Room("Center", "Test room in the center.") north = Room("North", "Test room in the north.") south = Room("South", "Test room in the south.") center.add_paths({'north': north, 'south': south})
StarcoderdataPython
1747462
#!/usr/bin/env python3 ################ mylis: Tiny Scheme Environment in Python 3.10 ## Additional runtime support by <NAME> for lis.py by ## <NAME> (c) 2010-18; See http://norvig.com/lispy.html import functools as ft import itertools as it import operator as op import math import readline # "unused" import to enable readline in input() import sys from collections.abc import Sequence, Iterator from typing import Any, Protocol, Callable, NoReturn import lis from exceptions import UndefinedSymbol, UnexpectedCloseParen, EvaluatorException ################ enhanced and new built-ins def s_expr(obj: object) -> str: "Convert Python object into Lisp s-expression." match obj: case True: return '#t' case False: return '#f' case list(obj): items = ' '.join(s_expr(x) for x in obj) return f'({items})' case lis.Symbol(x): return x case _: return repr(obj) def display(obj: object) -> str: output = s_expr(obj) print(output) def variadic_sub(first, *rest): if rest: return first - sum(rest) else: return -first def variadic_truediv(first, *rest): if rest: return first / math.prod(rest) else: return 1 / first def variadic_comparison(op, current, *rest): for arg in rest: if not op(current, arg): return False current = arg return True def standard_env() -> lis.Environment: env = lis.standard_env() env.update({ # enhancements '#f': False, '#t': True, '+': lambda *args: sum(args), '-': variadic_sub, '*': lambda *args: math.prod(args), '/': variadic_truediv, '=': lambda first, *rest: all(first == x for x in rest), '<': ft.partial(variadic_comparison, op.lt), '>': ft.partial(variadic_comparison, op.gt), '<=': ft.partial(variadic_comparison, op.le), '>=': ft.partial(variadic_comparison, op.ge), 'append': lambda *args: list(it.chain(*args)), # additional built-ins 'quotient': op.floordiv, 'display': display, 'filter': lambda *args: list(filter(*args)), }) return env ################ non-interactive execution def run_lines(source: str, env: lis.Environment | None = None) -> Iterator[Any]: global_env = lis.Environment({}, standard_env()) if env is not None: global_env.update(env) tokens = lis.tokenize(source) while tokens: exp = lis.read_from_tokens(tokens) yield lis.evaluate(exp, global_env) def run(source: str, **env: lis.Expression) -> Any: for result in run_lines(source, env): pass return result ############### multi-line REPL class QuitRequest(Exception): """Signal to quit multi-line input.""" ELLIPSIS = '\N{HORIZONTAL ELLIPSIS}' def raise_unexpected_paren(line: str) -> NoReturn: max_msg_len = 16 if len(line) < max_msg_len: msg = line else: msg = ELLIPSIS + line[-(max_msg_len-1):] raise UnexpectedCloseParen(msg) QUIT_COMMAND = '.q' InputFn = Callable[[str], str] def multiline_input(prompt1: str, prompt2: str, *, quit_cmd: str = QUIT_COMMAND, input_fn: InputFn = input) -> str: paren_cnt = 0 lines = [] prompt = prompt1 while True: line = input_fn(prompt).rstrip() if line == quit_cmd: raise QuitRequest() for char in line: if char == '(': paren_cnt += 1 elif char == ')': paren_cnt -= 1 if paren_cnt < 0: raise_unexpected_paren(line) lines.append(line) prompt = prompt2 if paren_cnt == 0: break return '\n'.join(lines) def multiline_repl(prompt1: str = '> ', prompt2: str = '... ', error_mark: str = '***', *, quit_cmd: str = QUIT_COMMAND, input_fn: InputFn = input) -> None: """Read-Eval-Print-Loop""" global_env = lis.Environment({}, standard_env()) print(f'To exit type {QUIT_COMMAND}', file=sys.stderr) while True: # ___________________________________________ Read try: source = multiline_input(prompt1, prompt2, quit_cmd=quit_cmd, input_fn=input_fn) except (EOFError, QuitRequest): break except UnexpectedCloseParen as exc: print(error_mark, exc) continue if not source: continue # ___________________________________________ Eval current_exp = lis.parse(source) try: result = lis.evaluate(current_exp, global_env) except EvaluatorException as exc: print(error_mark, exc) continue # ___________________________________________ Print if result is not None: print(s_expr(result)) ############### command-line integration class TextReader(Protocol): def read(self) -> str: ... def run_file(source_file: TextReader, env: lis.Environment | None = None) -> Any: source = source_file.read() return run(source, **env) def env_from_args(args: Sequence[str]) -> lis.Environment: env = {} for arg in (a for a in args if '=' in a): parts = arg.split('=') if len(parts) != 2 or not all(parts): continue name, val = parts try: atom = lis.parse_atom(val) except ValueError: continue env[name] = atom return env ############### main PROMPT1 = '\N{WHITE RIGHT-POINTING TRIANGLE} ' PROMPT2 = '\N{MIDLINE HORIZONTAL ELLIPSIS} ' ERROR_MARK = '\N{POLICE CARS REVOLVING LIGHT} ' def repl(): multiline_repl(PROMPT1, PROMPT2, ERROR_MARK) def main(args: list[str]) -> None: if len(args) == 1: repl() else: arg_env = env_from_args(args[1:]) with open(args[1]) as source_file: try: run_file(source_file, arg_env) except UndefinedSymbol as exc: key = exc.args[0] print(f'{ERROR_MARK} {key!r} was not defined.') cmd = ' '.join(args) print(' You can define it as an option:') print(f' $ {cmd} {key}=<value>') if __name__ == '__main__': main(sys.argv)
StarcoderdataPython
198302
from flask import jsonify from api import app import logging as logger from api.controllers import responseListOfCoinsSimulation as request_simulation_list_coins response = request_simulation_list_coins.response_list() @app.route('/api/v1/simulationlist/', methods=['GET']) def simulation_list(): logger.debug("Inside the get method of Simulation List") http_code = 200 return jsonify({ 'simulationlist': response }), http_code
StarcoderdataPython
4808007
<reponame>cognitiaclaeves/sam-python-slackapp-template<filename>src/app.py """ Slack chat-bot Lambda handler. Modified from: https://github.com/Beartime234/sam-python-slackapp-template """ # Module Imports import os import logging import json import time import hmac import hashlib import json import urllib.parse import base64 import threading # import multiprocessing # import requests from slack import WebClient as Slack_WebClient from http.client import UNSUPPORTED_MEDIA_TYPE, BAD_REQUEST from http.client import OK as OK_200 # Local imports import helpers from version import __version__ # Get Environment Variables # This is declared globally because as this is useful for tests etc. SECRETS_NAME = os.environ["SECRETS_NAME"] STAGE = os.environ["STAGE"] CUTOFF = os.environ.get('SLACK_LAMBDA_MASTER_CUTOFF') THREADED_LAMBDA_HEADER = 'X-Spawn-Lambda-Thread' UNIT_TEST_HEADER_FLAGS = 'X-Unit-Test-Flags' F_SKIP_THREAD_SPAWN = 'skip-thread-spawn' F_B64_STUB = 'b64stub' F_B64_RESP = 'b64response' # Set up logging here info so we should get the LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) # Ignore non important logs from botocore and boto3 cause they talk to much logging.getLogger('botocore').setLevel(logging.CRITICAL) logging.getLogger('boto3').setLevel(logging.CRITICAL) # Grab secrets for the application. SECRETS = json.loads(helpers.get_secrets(SECRETS_NAME)) SLACK_WEBCLIENT = Slack_WebClient(SECRETS["BOT_TOKEN"]) def obscure_dict(some_dict): ret = {} for each_key in some_dict: val = some_dict[each_key] ret[each_key] = '{}..{}..{}'.format( val[0:2], len(val), val[-2:] ) return ret def encode_b64_dict(response_dict): """ utility to take dictionary and return {'req_body_base64' : 'some64string=='} """ body_json = json.dumps(response_dict) body_bytes = body_json.encode('utf-8') body_base64 = base64.b64encode(body_bytes) ret = { 'req_body_base64': '{}'.format(body_base64.decode('utf-8')) } return ret # if "bot_id" in slack_body_dict: # logging.warning("Ignore bot event") # else: # # Get the text of the message the user sent to the bot, # # and reverse it. # ret = process_event(slack_event_dict) # slack_event_dict = {} # if slack_event_dict: # # Get the ID of the channel where the message was posted. # channel_id = slack_event_dict["channel"] # response = slack_client.chat_postMessage( # channel=channel_id, # text=ret # ) # LOGGER.debug('Response: {}'.format(response)) def process_not_implemented(**kwargs): """ default process function - return not implemented """ ret_dict = { 'function_return': 'not-implemented', 'slack_response': {} } return ret_dict def process_event(**kwargs): """ process slack event """ slack_event_dict = kwargs['body']['event'] ret_dict = process_not_implemented() if 'bot_id' in slack_event_dict: LOGGER.debug('Ignoring event ({}) caused by bot chatter.'.format(slack_event_dict["type"])) ret_dict = { 'function_return': 'ignored bot chatter', 'slack_response': {} } else: LOGGER.debug('Will process event: {}'.format(slack_event_dict["type"])) if slack_event_dict["type"] == "message": text = slack_event_dict.get("text") if UNIT_TEST_HEADER_FLAGS in kwargs['headers']: # If unit-test flag is detected, reverse the text if text: reversed_text = text[::-1] channel_id = slack_event_dict["channel"] response = SLACK_WEBCLIENT.chat_postMessage( channel=channel_id, text=reversed_text ) ret_dict = { 'function_return': reversed_text, 'slack_response': response } return ret_dict def process_shortcut(**kwargs): """ process slack shortcut (message / global) """ ret_dict = process_not_implemented() return ret_dict def process_slash_cmd(**kwargs): """ process slack slash command """ ret_dict = process_not_implemented() return ret_dict def lambda_handler(api_event, api_context): """Handle an incoming HTTP request from a Slack chat-bot. """ if type(SECRETS) is not dict: raise TypeError("Secrets response must be a dictionary.") if CUTOFF == True or CUTOFF == '1': LOGGER.warning("Master cutoff switch is on. Exiting lambda.") return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is engaged. Exiting.'}) LOGGER.info(f" -- Startup Information Version: {__version__}") LOGGER.debug(f"Secret Information: {obscure_dict(SECRETS)}") apievent_ContentType = (api_event.get('headers') or {}).get('Content-Type') or 'null' request_headers = api_event["headers"] # First and foremost, process challenge event, if sent: # This is to appease the slack challenge event that is sent # when subscribing to the slack event API. You can read more # here https://api.slack.com/events/url_verification if apievent_ContentType == 'application/json': apievent_body_ = json.loads(api_event.get('body') or {}) if is_challenge(apievent_body_): challenge_response_body = { "challenge": apievent_body_["challenge"] } LOGGER.info('Responding to challenge event.') return helpers.form_response(OK_200, challenge_response_body) # *** DO NOT DELETE BELOW, REVISIT IN FUTURE *** # Responding immediately is best practice recommended by Slack # If not challenge, then immediately return OK # when customer header THREADED_LAMBDA_HEADER is not present # If SKIP_THREAD_SPAWN is not sent in custom header UNIT_TEST_HEADER_FLAGS, # then spawn a new thread with the payload # Otherwise, if THREADED_LAMBDA_HEADER is not present, then process payload # To test behavior after the spawn, include THREADED_LAMBDA_HEADER # To test immediate response, do not include THREADED_LAMBDA_HEADER # if THREADED_LAMBDA_HEADER not in request_headers: # # Skip creating new thread if UNIT_TEST_HEADER_FLAGS indicates to not do so # if F_SKIP_THREAD_SPAWN not in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''): # # Spawn new thread with special thread header # api_event['headers'][THREADED_LAMBDA_HEADER] = 'respawned-to-self-for-async-response' # LOGGER.info('Launching separate thread for lambda to process request!') # # p = multiprocessing.Process(target=lambda_handler, args=(api_event, api_context, )) # # p.start() # # p.join() # t = threading.Thread(target=lambda_handler, args=(api_event, api_context), daemon=False ) # t.start() # # I couldn't get this to work like I wanted it to. # # I wanted to spawn an autonomous thread that would finish running after this thread returns (dies) # # But I wasn't able to figure out if this was even possible to do. # # Since it currently executes "fast enough", I'm just going to wait for the processing to finish # t.join() # # https://stackoverflow.com/questions/53386968/multithreading-in-aws-lambda-using-python3 # LOGGER.info('Returning 200 OK to slack') # return helpers.form_response(OK_200, {}) # to test a long-running process doesn't die before the 'parent' thread: # for i in range(0, 29000000): # pass # *** DO NOT DELETE ABOVE, REVISIT IN FUTURE *** LOGGER.info(f'Detected Content-Type: {apievent_ContentType}') # At this stage, this could be multiple things (see notes below), so log entire dictionary as json LOGGER.debug('api_event: {}'.format(json.dumps(api_event))) # Set default processing function process_function = process_not_implemented # load dict with payload, set processing function to match body contents # Note: Being a little sloppy with this initially; # It is possible I will need to be more specific about different slack calls later # This may manifest with slack calls being processed with the wrong function if apievent_ContentType in ('application/x-www-form-urlencoded'): apievent_body_ = urllib.parse.parse_qs(api_event.get('body')) apievent_body_['slack_event_type'] = 'slash-command' process_function = process_event if 'payload' in apievent_body_: new_apievent_body_ = { 'payload' : [] } LOGGER.debug('apievent_body_: {}'.format(apievent_body_)) for each_el in apievent_body_.get('payload') or {}: new_apievent_body_['payload'].append( json.loads(each_el) ) apievent_body_ = new_apievent_body_ apievent_body_['slack_event_type'] = 'shortcut' process_function = process_shortcut LOGGER.debug('payload based apievent_body: {}'.format(apievent_body_)) elif apievent_ContentType in ('application/json'): apievent_body_ = api_event.get('body') try: apievent_body_ = json.loads(apievent_body_) apievent_body_['slack_event_type'] = 'json-string' except TypeError: pass if 'slack_event_type' not in apievent_body_: apievent_body_['slack_event_type'] = 'json' else: LOGGER.error(f'Content-Type unexpected: {apievent_ContentType}') return helpers.form_response(UNSUPPORTED_MEDIA_TYPE, {"Error": f"Unexpected Content-Type ({apievent_ContentType})"}) LOGGER.debug('body({}): {}'.format(apievent_ContentType, json.dumps(apievent_body_))) slack_event_dict = apievent_body_.get("event") or {} LOGGER.debug('event dict: {}'.format(json.dumps(slack_event_dict))) # Grab relevant information form the api_event # slack_body_raw = api_event["body"] # slack_body_dict = json.loads(slack_body_raw) slack_body_dict = apievent_body_ if F_B64_STUB in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''): # If F_B64_STUB is present in request header UNIT_TEST_HEADER_FLAGS, # return the body dict as b64 encoded json to test exepected data structure stub_return = encode_b64_dict(apievent_body_) return helpers.form_response(OK_200, stub_return) # If the stage is production make sure that we are receiving events from slack otherwise we don't care if STAGE is "prod": LOGGER.debug(f"We are in production. So we are going to verify the request.") if not verify_request(request_headers["X-Slack-Signature"], request_headers["X-Slack-Request-Timestamp"], slack_body_raw, SECRETS["SIGNING_SECRET"]): return helpers.form_response(BAD_REQUEST, {"Error": "Bad Request Signature"}) # If cutoff is half-engaged, terminate execution here # if CUTOFF == '.5': # LOGGER.debug("Master cutoff switch is half-engaged. Exiting except for unit tests.") # return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is half-engaged. Exiting except for unit tests.'}) # There are different types of payloads. # - slash-commands # - message-shortcuts # - challenge # - events # See unit tests for more information. # call appropriate processing function for slack call if CUTOFF: skip_slack_call = True else: skip_slack_call = False ret = process_function( body=apievent_body_, skip_slack=skip_slack_call ) # TODO - Remove # b64_body = (json.loads(ret_body or {})).get('req_body_base64') # if b64_body: # b64_bytes = b64_body.encode('utf-8') # msg_bytes = base64.b64decode(b64_bytes) # msg = msg_bytes.decode('utf-8') # body_dict = json.loads(msg) # else: # body_dict = json.loads(ret_body) # return body_dict # def b64_encode_dict() # if F_B64_RESP in (request_headers.get(UNIT_TEST_HEADER_FLAGS) or ''): # # If F_B64_RESP is present in request header UNIT_TEST_HEADER_FLAGS, # # return a response dict as b64 encoded json to test exepected behavior # lambda_response = encode_b64_dict( ret ) # return helpers.form_response(OK_200, lambda_response) # # If there is a request header indicating a unit test, # # then return the body dict as b64 encoded json # # to test if data is exactly as expected # body_json = json.dumps(apievent_body_) # body_bytes = body_json.encode('utf-8') # body_base64 = base64.b64encode(body_bytes) # stub_return = { # 'req_body_base64': '{}'.format(body_base64.decode('utf-8')) # } # return helpers.form_response(OK_200, stub_return) # This parses the slack body dict to get the event JSON # this will hold information about the text and # the user who did it. # Build the slack client. This allows us make slack API calls # read up on the python-slack-client here. We get this from # AWS secrets manager. https://github.com/slackapi/python-slackclient # slack_client = Slack_WebClient(secrets["BOT_TOKEN"]) # We need to discriminate between events generated by # the users, which we want to process and handle, # and those generated by the bot. # if "bot_id" in slack_body_dict: # logging.warning("Ignore bot event") # else: # # Get the text of the message the user sent to the bot, # # and reverse it. # ret = process_event(slack_event_dict) # slack_event_dict = {} # if slack_event_dict: # # Get the ID of the channel where the message was posted. # channel_id = slack_event_dict["channel"] # response = slack_client.chat_postMessage( # channel=channel_id, # text=ret # ) # LOGGER.debug('Response: {}'.format(response)) # Everything went fine return a good response. if CUTOFF == '.5': LOGGER.warning("Master cutoff switch is half-engaged. Exiting except for unit tests.") return helpers.form_response(OK_200, {'CUTOFF ERROR': 'Master cutoff switch is half-engaged. Exiting except for unit tests.'}) else: return helpers.form_response(OK_200, {}) def is_challenge(slack_event_body: dict) -> bool: """Is the event a challenge from slack? If yes return the correct response to slack Args: slack_event_body (dict): The slack event JSON Returns: returns True if it is a slack challenge event returns False otherwise """ if "challenge" in slack_event_body: LOGGER.info("Challenge Data: {}".format(slack_event_body['challenge'])) return True return False def verify_request(slack_signature: str, slack_timestamp: str, slack_event_body: str, app_signing_secret) -> bool: """Does the header sent in the request match the secret token. If it doesn't it may be an insecure request from someone trying to pose as your application. You can read more about the url-verification and why this is necessary here https://api.slack.com/docs/verifying-requests-from-slack Args: app_signing_secret (str): The apps local signing secret that is given by slack to compare with formulated. slack_signature (str): The header of the http_request from slack X-Slack-Signature slack_timestamp (str): The header of the http_request from slack X-Slack-Request-Timestamp slack_event_body (str): The slack event body that must be formulated as a string Returns: A boolean. If True the request was valid if False request was not valid. """ if abs(time.time() - float(slack_timestamp)) > 60 * 5: # The request is older then 5 minutes LOGGER.warning(f"Request verification failed. Timestamp was over 5 mins old for the request") return False sig_basestring = f"v0:{slack_timestamp}:{slack_event_body}".encode('utf-8') slack_signing_secret = bytes(app_signing_secret, 'utf-8') my_signature = 'v0=' + hmac.new(slack_signing_secret, sig_basestring, hashlib.sha256).hexdigest() if hmac.compare_digest(my_signature, slack_signature): return True else: LOGGER.warning(f"Verification failed. my_signature: {my_signature} slack_signature: {slack_signature}") return False
StarcoderdataPython
2799
from math import pi from numpy import array, ndarray, divide, sqrt, argsort, sort, diag, trace from numpy.linalg import eig, norm class HartreeFock(): zeta = array([38.474970, 5.782948, 1.242567, 0.298073]) num_aos = len(zeta) num_mos = 0 energy_tolerance = 0.0001; density_tolerance = 0.001 prev_energy = 0 prev_density = [] def __init__(self, num_elec): # Make sure we can pair electrons if num_elec % 2 != 0: raise Exception("Can't do a RHF with", num_elec, "electrons.") else: print("Restricted Hartree-Fock with", num_elec, "electron(s).") # We're RHF, so pair up spins in each molecular orbital self.num_mos = int(num_elec / 2) if self.num_mos > self.num_aos: raise Exception("Can't create", self.num_mos, "molecular orbital(s) from", self.num_aos, "atomic orbital(s).") else: print(self.num_aos, "atomic orbital(s) and", self.num_mos, "molecular orbital(s).") print("Zeta: ", self.zeta) self.prev_density = ndarray(shape=(self.num_aos,self.num_aos),dtype=float, order='C') def one_electron_integrals(self): def overlap_kernel(zeta_i, zeta_j): return pow(pi / (zeta_i + zeta_j), 1.5) def kinetic_kernel(zeta_i, zeta_j): return 3 * pow(pi, 1.5) * (zeta_i * zeta_j) / pow(zeta_i + zeta_j, 2.5) def nucattr_kernel(zeta_i, zeta_j): return (-4 * pi) / (zeta_i + zeta_j) # Initialise our matrices overlap = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C') kinetic = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C') nucattr = ndarray(shape=(self.num_aos,self.num_aos), dtype=float, order='C') for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): overlap[i_ao,j_ao] = overlap_kernel(self.zeta[i_ao], self.zeta[j_ao]) kinetic[i_ao,j_ao] = kinetic_kernel(self.zeta[i_ao], self.zeta[j_ao]) nucattr[i_ao,j_ao] = nucattr_kernel(self.zeta[i_ao], self.zeta[j_ao]) return overlap, kinetic, nucattr def two_electron_integrals(self): def tei_kernel(zeta_i, zeta_j, zeta_k, zeta_l): temp_1 = (zeta_i + zeta_j) * (zeta_k + zeta_l) temp_2 = sqrt(zeta_i + zeta_j + zeta_k + zeta_l) return 2 * pow(pi, 2.5) / (temp_1 * temp_2) teis = ndarray(shape=(self.num_aos,self.num_aos,self.num_aos,self.num_aos), dtype=float, order='C') for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): for k_ao in range(self.num_aos): for l_ao in range(self.num_aos): teis[i_ao,j_ao,k_ao,l_ao] = tei_kernel(self.zeta[i_ao], self.zeta[j_ao], self.zeta[k_ao], self.zeta[l_ao]) return teis def basis_transformation_matrix(self, overlap): # Get the eigenvalues and eigenvectors of the overlap matrix overlap_evals, overlap_evecs = eig(overlap) # Create diagonal matrix with entries given by inverse of eigenvalues of # overlap matrix try: inv_sqrt_evals = diag(divide(1., sqrt(overlap_evals))) except: raise Exception("Overlap matrix is not positive definite.") # Construct the basis transformation matrix and return it return overlap_evecs @ inv_sqrt_evals @ overlap_evecs.T def fock_matrix(self, core_hamiltonian, teis, density): fock = ndarray(shape=density.shape, dtype=float, order='C') for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): fock[i_ao,j_ao] = core_hamiltonian[i_ao,j_ao] for k_ao in range(self.num_aos): for l_ao in range(self.num_aos): coulomb = teis[i_ao,k_ao,j_ao,l_ao] exchange = teis[i_ao,k_ao,l_ao,j_ao] fock[i_ao,j_ao] += density[k_ao,l_ao] * (coulomb - 0.5*exchange) return fock def density_matrix(self, overlap, basis_transform, fock): def ordered_eigensystem(matrix): # Generate the eigenvalues and eigenvectors of the matrix evals, evecs = eig(matrix) # Sort the eigenvalues in ascending order and keep a track of what index they # were originally assigned ordered_indices = argsort(evals) ordered_evals = sort(evals) # Order the eigenvectors in asceding order of their corresponding eigenvalues ordered_evecs = ndarray(shape=evecs.shape, dtype=float, order='C') ordered_transform = ndarray(shape=evecs.shape, dtype=float, order='C') for i_evec in range(len(ordered_evals)): ordered_evecs[:,i_evec] = evecs[:,ordered_indices[i_evec]] ordered_transform[i_evec,:] = basis_transform[ordered_indices[i_evec],:] # Return the ordered eigenvalues and corresponding eigenvectors return ordered_evals, ordered_evecs, ordered_transform # Transform Fock matrix to orthogonal basis fock = basis_transform.T @ fock @ basis_transform # Get the eigenvalues and eigenvectors of the input Fock matrix fock_evals, fock_evecs, new_transform = ordered_eigensystem(fock) # Transform the eigenvectors of the Fock matrix back to the original basis fock_evecs = new_transform @ fock_evecs # First of all we make sure the eigenvectors of the Fock matrix are normalised by the # overlap matrix (these are molecular orbitals, afterall) for i_mo in range(self.num_aos): ao_coeffs = fock_evecs[:,i_mo] norm = ao_coeffs.T @ overlap @ ao_coeffs fock_evecs[:,i_mo] /= sqrt(norm) # Initialise the density matrix density = ndarray(shape=overlap.shape, dtype=float, order='C') # Loop over all elements in the density matrix and accumulate for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): density[i_ao,j_ao] = 0.0 # We accumulate only over occupied molecular orbitals! Note that we also have # access to the virtual orbitals at this point, but they're effectively discarded for i_mo in range(self.num_mos): density[i_ao,j_ao] += 2 * fock_evecs[i_ao,i_mo] * fock_evecs[j_ao,i_mo] return fock_evecs, density def scf_energy(self, density, core_hamiltonian, fock): energy = 0.0 for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): energy += 0.5 * density[i_ao,j_ao] * (core_hamiltonian[i_ao,j_ao] + fock[i_ao,j_ao]) return energy def check_convergence(self, energy, density): if abs(energy - self.prev_energy) < self.energy_tolerance: energy_converged = True else: energy_converged = False self.prev_energy = energy if norm(density - self.prev_density) < self.density_tolerance: density_converged = True else: density_converged = False self.prev_density = density return energy_converged, density_converged def mulliken(self, overlap, density): return trace(density @ overlap) def run(self, num_cycles): print("Hartree-Fock will run for a maximum of", num_cycles, "SCF iteration(s).") overlap, kinetic, nucattr = self.one_electron_integrals() core_hamiltonian = kinetic + nucattr teis = self.two_electron_integrals() basis_transform = self.basis_transformation_matrix(overlap) _, density = self.density_matrix(overlap, basis_transform, core_hamiltonian) energy = self.scf_energy(density, core_hamiltonian, core_hamiltonian) for i in range(num_cycles): fock = self.fock_matrix(core_hamiltonian, teis, density) fock_evecs, density = self.density_matrix(overlap, basis_transform, fock) energy = self.scf_energy(density, core_hamiltonian, fock) print("Iteration", i, "SCF Energy:", energy) energy_converged, density_converged = self.check_convergence(energy, density) if energy_converged and density_converged: print("SCF has converged!") for i_mo in range(self.num_mos): print("Molecular Orbital", i_mo, "Coefficients :", fock_evecs[:,i_mo]) print("Mulliken charge:", self.mulliken(overlap, density)) break if i == num_cycles - 1: print("SCF failed to converge.") print("Energy Convergence Check:", energy_converged) print("Density Convergence Check:", density_converged) fock_mo_basis = ndarray(shape=(self.num_mos,self.num_mos), dtype=float, order='C') for i_mo in range(self.num_mos): for j_mo in range(self.num_mos): fock_mo_basis[i_mo,j_mo] = 0.0 for i_ao in range(self.num_aos): for j_ao in range(self.num_aos): fock_mo_basis[i_mo,j_mo] += fock_evecs[i_ao,j_mo] * fock_evecs[j_ao,i_mo] * fock[i_ao,j_ao] print(fock_mo_basis) if __name__ == "__main__": hf = HartreeFock(4) hf.run(2000)
StarcoderdataPython
1717210
# coding=utf-8 # Copyright 2018 The Hypebot Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides bank-like features and other functions for HypeCoins.""" # pylint: disable=broad-except from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections import math import numbers import random import re import threading from absl import logging from hypebot.core import schedule_lib from hypebot.core import util_lib from hypebot.data import messages from hypebot.protos import bank_pb2 from hypebot.protos import bet_pb2 from hypebot.protos import user_pb2 import six # pylint: disable=line-too-long # pylint: enable=line-too-long from google.protobuf import json_format # "Accounts" where various transactions end up BOOKIE_ACCOUNT = user_pb2.User(user_id='_hypebank', display_name='HypeBank') FEE_ACCOUNT = BOOKIE_ACCOUNT MINT_ACCOUNT = BOOKIE_ACCOUNT SCHOLARSHIP_ACCOUNT = user_pb2.User( user_id='_hypescholarship', display_name='HypeScholarship') SUBSCRIPTION_ACCOUNT = BOOKIE_ACCOUNT # pyformat: disable HYPECENTS = frozenset([ BOOKIE_ACCOUNT.user_id, FEE_ACCOUNT.user_id, MINT_ACCOUNT.user_id, SCHOLARSHIP_ACCOUNT.user_id, SUBSCRIPTION_ACCOUNT.user_id, ]) # pyformat: enable class Thievery(object): """Allows nefarious behavior. The more you steal, the more you get caught. The more you are a victim, the more you catch peeps. We keep a score which is an exponential decay of the sum of past successful theft amounts for victims and thiefs. Your percent of the total score impacts future theft chances. Hypebot has a fixed large number in each pool to prevent solitary thefts from overloading the system. Periodically, all scores are reduced except hypebot's. """ # Rate to decay scores. I.e., score_t+1 = score_t * DECAY_RATE _DECAY_RATE = 0.75 # Arrow object specifying when decay should occur. _DECAY_TIME = util_lib.ArrowTime(2) # Baseline percentage of victim balance that can be stolen half of the time. _BASE_BALANCE_PERCENT = 0.02 # Fixed thief / victim score for hypebot. _HYPEBOT_SCORE = 1000 def __init__(self, store, bank, bot_name, timezone): self._store = store self._bank = bank self._bot_name = bot_name self._protected_peeps = [self._bot_name] + list(HYPECENTS) self._scheduler = schedule_lib.HypeScheduler(timezone) self._scheduler.DailyCallback( # Ensures we schedule this event at 2am local time instead of UTC. self._DECAY_TIME.to(timezone), self._store.RunInTransaction, self._DecayAllScores) def Rob(self, thief, victim, amount, msg_fn): """Attempt a robbery.""" if amount < 0: msg_fn(None, 'Did you mean !hc gift?') return if victim.user_id in self._protected_peeps: msg_fn(None, 'The Godfather protects his family.') self._bank.ProcessPayment( thief, user_pb2.User(user_id=self._bot_name, display_name=self._bot_name), 500, 'In Soviet Russia, %s steals from you.' % self._bot_name, msg_fn) return victim_balance = self._bank.GetBalance(victim) if victim_balance <= 0: msg_fn(None, 'You cannot milk a dead cow.') return thief_alert = self._GetPDF('thief')[thief.user_id] victim_alert = self._GetPDF('victim')[victim.user_id] offset = self._BASE_BALANCE_PERCENT * (1 - thief_alert - victim_alert) failure_chance = self._Sigmoid(amount / victim_balance, offset) rob_attempt_score = random.random() logging.info('(%s: %0.2f, %s: %0.2f) %s of %s attempt %0.2f >? %0.2f', thief, thief_alert, victim, victim_alert, amount, victim_balance, rob_attempt_score, failure_chance) if rob_attempt_score < failure_chance: self._bank.ProcessPayment(thief, SCHOLARSHIP_ACCOUNT, min(self._bank.GetBalance(thief), amount), 'Victim scholarship fund', msg_fn) self._DistributeToPastVictims(msg_fn) if (rob_attempt_score < failure_chance * thief_alert / (thief_alert + victim_alert + 1e-6)): msg_fn(None, '%s is a known thief and was caught.' % thief.display_name) else: msg_fn( None, '%s is on high alert and caught %s.' % (victim.display_name, thief.display_name)) return # TODO: Fold ProcessPayment into the UpdateScores tx. # We don't worry about the victim having insufficient funds since there is a # 0% chance of stealing 100% of someone's money. if self._bank.ProcessPayment(victim, thief, amount, 'Highway robbery', msg_fn): self._store.RunInTransaction(self._UpdateScores, thief, victim, amount) formatted_amount = util_lib.FormatHypecoins(amount) msg_fn( None, '%s stole %s from %s' % (thief.display_name, formatted_amount, victim.display_name)) # We privmsg the victim to make sure they know who stole their hypecoins. msg_fn( victim, 'You\'ve been robbed! %s stole %s' % (thief.display_name, formatted_amount)) def _Sigmoid(self, value, offset, scale=200.0): return 1 / (1 + math.exp(-scale * (value - offset))) def _GetScores(self, collection, tx=None): """Gets scores for collection. Args: collection: {string} which set of scores to get. tx: {storage_lib.HypeTransaction} an optional transaction to pass along to GetJsonValue. Returns: {dict<string, float>} scores keyed by name. """ scores = self._store.GetJsonValue(self._bot_name, 'scores:%s' % collection, tx) return collections.defaultdict( int, scores or {self._bot_name: self._HYPEBOT_SCORE}) def _GetPDF(self, collection): """Gets probability density function of scores for collection.""" scores = self._GetScores(collection) total_score = sum(scores.values()) pdf = {peep: score / total_score for peep, score in scores.items()} return collections.defaultdict(float, pdf) def _AddToScore(self, collection, name, amount, tx=None): """Add {amount} to {names}'s score in {collection}.""" scores = self._GetScores(collection, tx) scores[name] += amount logging.info('Updating %s scores: %s', collection, scores) self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores, tx) def _UpdateScores(self, thief, victim, amount, tx=None): self._AddToScore('thief', thief.user_id, amount, tx) self._AddToScore('victim', victim.user_id, amount, tx) return True def _DecayAllScores(self, tx=None): self._DecayScores('thief', tx) self._DecayScores('victim', tx) return True def _DecayScores(self, collection, tx=None): """Decay scores for {collection}.""" scores = { peep: int(score * self._DECAY_RATE) for peep, score in self._GetScores(collection, tx).items() if score > 0 } scores[self._bot_name] = self._HYPEBOT_SCORE logging.info('Updating %s scores: %s', collection, scores) self._store.SetJsonValue(self._bot_name, 'scores:%s' % collection, scores, tx) def _DistributeToPastVictims(self, msg_fn): """Distribute funds in scholarship account to past victims.""" victim_scores = self._GetPDF('victim') scholarship_balance = self._bank.GetBalance(SCHOLARSHIP_ACCOUNT) self._bank.ProcessPayment( SCHOLARSHIP_ACCOUNT, [user_pb2.User(user_id=v) for v in victim_scores.keys()], scholarship_balance, 'Victim scholarship fund', msg_fn, merchant_weights=victim_scores.values()) class Bookie(object): """Class for managing a betting ledger. The data-model used by Bookie is rows mapping to dicts serialized as strings. """ _BET_SUBKEY = 'bets' _ledger_lock = threading.RLock() def __init__(self, store, bank, inventory): self._store = store self._bank = bank self._inventory = inventory def LookupBets(self, game, user: user_pb2.User = None, resolver=None): """Returns bets for game, optionally filtered by user or resolver.""" with self._ledger_lock: bets = self._GetBets(game) # Filtering is done slightly strangely, but it ensures that the same # structure is kept regardless of filtering and that if a filter was given # but the game has no matches for that filter, we return an empty dict if user: user_id = user.user_id bets = {user_id: bets[user_id]} if user_id in bets else {} if resolver: bets = { user_id: [bet for bet in user_bets if bet.resolver == resolver ] for user_id, user_bets in bets.items() } bets = collections.defaultdict(list, bets) return bets # TODO: PlaceBet needs to be fixed to throw on error. def PlaceBet(self, game, bet, msg_fn, more=False): """Places a bet for game on behalf of user. PlaceBet will withdraw funds from the bank to fund the bet. Args: game: The game this bet is for. bet: Bet proto describing what bet to place. msg_fn: {callable(channel, msg)} function to send messages. more: A boolean that decides if the bet amount should be added to any current bets. Returns: {boolean} whether bet placing was successful or not. """ return self._store.RunInTransaction(self._PlaceBet, game, bet, more, msg_fn) def _PlaceBet(self, game, bet, more, msg_fn, *unused_args, **kwargs): """Internal version of PlaceBet to be run with a transaction.""" bet.game = game.name with self._ledger_lock: tx = kwargs.get('tx') if not tx: logging.error('_PlaceBet can only be called with a transaction.') return bets = self._GetBets(game.name, tx=tx) prior_bet = None for b in bets[bet.user.user_id]: if bet.target == b.target: prior_bet = b logging.info('%s has a prior_bet for %s:%s => %s', bet.user, game.name, bet.target, prior_bet) break if more and prior_bet: bet.amount += prior_bet.amount # Special handling to ensure we don't go overboard for lottery. if game.name == 'lottery': bet.amount = game.CapBet(bet.user, bet.amount, bet.resolver) net_amount = bet.amount - (prior_bet.amount if prior_bet else 0) if net_amount < 0: msg_fn(bet.user, 'Money on the table is not yours. Try a higher amount.') return False if prior_bet: details = 'Bet updated. Replaced %s with %s' % ( game.FormatBet(prior_bet), game.FormatBet(bet)) else: details = 'Bet placed. %s' % game.FormatBet(bet) if not self._bank.ProcessPayment(bet.user, BOOKIE_ACCOUNT, net_amount, details, msg_fn): return False # We do this after the payment processing so that we don't delete bets if # we can't correctly update them if prior_bet: bets[bet.user.user_id].remove(prior_bet) bets[bet.user.user_id].append(bet) self._SetBets(game.name, bets, tx=tx) return True def SettleBets(self, game, resolver, msg_fn, *args, **kwargs): """Settles all bets for game, clearing the ledger and paying out winnings. Args: game: The game to settle bets for. resolver: The bot trying to settle bets. Used to filter out bets placed by other bots which this bot shouldn't resolve. msg_fn: {callable(channel, msg)} function to send user messages. *args: Additional positional arguments to pass to settlement_fn. **kwargs: Additional keyword arguments to pass to settlement_fn. Returns: List of messages to send as notifications of settling bets. """ return self._store.RunInTransaction(self._SettleBets, game, resolver, msg_fn, *args, **kwargs) def _SettleBets(self, game, resolver, msg_fn, *args, **kwargs): """Internal version of SettleBets to be run with a transaction.""" with self._ledger_lock: tx = kwargs.get('tx') if not tx: logging.error('_SettleBets can only be called with a transaction.') return [] bets = self._GetBets(game.name, tx) if not bets: logging.warning('Tried to settle bets for %s, but no bets were found', game.name) return [] # Filter out bets with 'resolver' set and != the current bot unresolved_bets = collections.defaultdict(list) filtered_bets = collections.defaultdict(list) for user_id, user_bets in bets.items(): for bet in user_bets: if not bet.resolver or bet.resolver == resolver: filtered_bets[user_id].append(bet) else: unresolved_bets[user_id].append(bet) if not filtered_bets: logging.info('No bets found for resolver %s', resolver) return [] winner_info, unused_bets, notifications = game.SettleBets( filtered_bets, msg_fn, *args, **kwargs) # Merge bets that were filtered out of the pool with bets unused by the # game itself. We can't use a raw update here since we need to merge the # lists of bets for users with bets in both dicts. for user_id, user_bets in unresolved_bets.items(): if user_id in unused_bets: unused_bets[user_id] += user_bets else: unused_bets[user_id] = user_bets self._SetBets(game.name, unused_bets, tx=tx) for winner, winnings in winner_info: if isinstance(winnings, numbers.Number): if not self._bank.ProcessPayment(BOOKIE_ACCOUNT, winner, winnings, 'Gambling payout', msg_fn): logging.error('Couldn\'t pay %s %s for winning %s', winner, winnings, game.name) else: self._inventory.AddItem(winner, winnings) return notifications def _GetBets(self, row, tx=None): json_bets = self._store.GetJsonValue(row, self._BET_SUBKEY, tx) or {} bets = { u: [json_format.ParseDict(b, bet_pb2.Bet()) for b in user_bets ] for u, user_bets in json_bets.items() } return collections.defaultdict(list, bets) def _SetBets(self, row, bets, tx=None): json_bets = { u: [json_format.MessageToDict(b) for b in user_bets ] for u, user_bets in bets.items() } return self._store.SetJsonValue(row, self._BET_SUBKEY, json_bets, tx=tx) # TODO: Allow holds on accounts to ensure coins will exist for a # ProcessPayment in the near future. class Bank(object): """Class for managing user balances of hypecoins in the HypeBank.""" _BALANCE_SUBKEY = 'bank:balance' _TRANSACTION_SUBKEY = 'bank:transaction' _MIN_OVERDRAFT_FEE = 5 _MAX_OVERDRAFT_FEE_PERCENT = 0.05 # Bank class also might want a way to determine if a user has a balance or not def __init__(self, store, bot_name): self._store = store self._bot_name = bot_name self._withdraw_lock = threading.RLock() def GetBalance(self, user): balance = self._store.GetValue(user.user_id, self._BALANCE_SUBKEY) if not balance: return 0 return util_lib.SafeCast(balance, int, 0) def GetUserBalances(self, plebs_only=False): """Returns dict of user_ids mapping to their balance for all users.""" user_balances = self._store.GetSubkey(self._BALANCE_SUBKEY) # pylint: disable=g-complex-comprehension return { user_id: util_lib.SafeCast(balance, int, 0) for user_id, balance in user_balances if (not plebs_only or user_id not in HYPECENTS) and not user_id.startswith('http') } # pylint: enable=g-complex-comprehension def GetTransactions(self, user): json_entries = self._store.GetHistoricalValues(user.user_id, self._TRANSACTION_SUBKEY, 5) return [ json_format.ParseDict(entry, bank_pb2.LedgerEntry()) for entry in json_entries ] def GetBankStats(self, plebs_only=False): """Returns the total number of accounts and the sum of all balances.""" user_balances = self.GetUserBalances(plebs_only=plebs_only) balance_sum = sum(user_balances.values()) return len(user_balances), balance_sum def MintNewHypeCoins(self): """Creates new HypeCoins if MINT_ACCOUNT is running low. Specifically, if the MINT_ACCOUNT has less than 25% of the total HypeCoin market size, this method will mint new coins scaling linearly with the number of users, and logarithmically with the total market size. """ mint_balance = self.GetBalance(MINT_ACCOUNT) num_users, coins_in_circulation = self.GetBankStats() if mint_balance >= coins_in_circulation // 4: logging.info( 'Mint balance (%s) >= 25%% of market (%s), not minting new coins', util_lib.FormatHypecoins(mint_balance), util_lib.FormatHypecoins(coins_in_circulation)) return num_coins_to_mint = max( 5000, int(math.log(coins_in_circulation, 2) * num_users * 1000)) logging.info('Minting %s', util_lib.FormatHypecoins(num_coins_to_mint)) entry = bank_pb2.LedgerEntry( counterparty={ 'user_id': '_ether', 'display_name': 'Ether' }, amount=num_coins_to_mint, details='Minting') entry.create_time.GetCurrentTime() if not self._Deposit(MINT_ACCOUNT, num_coins_to_mint, entry, None): logging.error('Minting %s failed', util_lib.FormatHypecoins(num_coins_to_mint)) def ParseAmount(self, user, amount_str, msg_fn): """Read user's minds. Convert a string into an amount of hypecoins. Args: user: {string} user name. amount_str: {string} amount as string. msg_fn: {callable(channel, msg)} function to send messages. Returns: {Optional[int]} Amount as int or None if it can't be parsed. """ # Parser handlers. # Can return either an int value or a string. Strings will be replied to the # user and replaced with a None value. def _IntAmount(match, unused_balance): return int(match.groups()[0]) def _HumanIntAmount(match, unused_balance): try: return int(util_lib.UnformatHypecoins(match.groups()[0])) except ValueError: return None def _HexAmount(match, unused_balance): return int(match.groups()[0], 16) def _RandomBalance(unused_match, balance): return random.randint(1, balance) def _MemeTeam(unused_match, unused_balance): # TODO: Determine a way to trigger commands at will. # self.Meme(channel, None, None) return 'ayyy' # List of [regex, parser handler]. parsers = ( (r'%s$' % self._bot_name, lambda x, y: 'You can\'t put a price on this bot.'), (r'(dank)? ?memes?$', _MemeTeam), (r'(-?[0-9]+)$', _IntAmount), (r'(?:0x)([0-9,a-f]+)$', _HexAmount), (r'(a )?positive int$', _RandomBalance), (r'(-?[0-9.]+ ?[A-Za-z]+)$', _HumanIntAmount), ) balance = self.GetBalance(user) amount_str = amount_str.lower().strip() if amount_str in messages.GAMBLE_STRINGS: return balance amount = None for parser in parsers: match = re.match(parser[0], amount_str) if match: amount = parser[1](match, balance) break if amount is None: amount = 'Unrecognized amount.' if isinstance(amount, six.string_types): msg_fn(None, amount) amount = None return amount def FineUser(self, user, amount, details, msg_fn): return self.ProcessPayment( user, BOOKIE_ACCOUNT, amount, 'Fine: %s' % details, msg_fn, can_overdraft=True) def ProcessPayment(self, customer, merchants, num_coins, details, msg_fn, can_overdraft=False, merchant_weights=None): """Process payment from customer to merchant. The merchant will only be paid if the customer has the funds. Args: customer: {User} name of account to withdraw money. merchants: {User or list<User>} name(s) of account(s) to deposit money. num_coins: {int} number of hypecoins to transfer. details: {string} details of transaction. msg_fn: {callable(channel, msg)} function to send messages. can_overdraft: {boolean} whether it is possible to overdraft the account. If True, the account balance can go negative and no fees will be charged. If False, the transaction will fail and an overdraft fee will be assessed if there are insufficient funds for the transaction. merchant_weights: {list<float>} Weight of num_coins that each merchant will receive. Defaults to all 1's. Returns: {boolean} whether payment was successful. """ if num_coins < 0: logging.error('ProcessPayment called with negative value: %s, %s -> %s', num_coins, customer, merchants) return False if isinstance(merchants, user_pb2.User): merchants = [merchants] if merchant_weights is None: merchant_weights = [1] * len(merchants) total_weight = sum(merchant_weights) merchant_weights = [w / total_weight for w in merchant_weights] amount_paid = 0 success = True for i, (merchant, weight) in enumerate(zip(merchants, merchant_weights)): # Ensure we don't overpay due to rounding. merchant_amount = min( int(round(num_coins * weight)), num_coins - amount_paid) # Give the last person the extra coin to compensate for them losing a coin # sometimes. if i == len(merchants) - 1: merchant_amount = num_coins - amount_paid if merchant_amount > 0: withdrawl_entry = bank_pb2.LedgerEntry( details=details, counterparty=merchant) withdrawl_entry.create_time.GetCurrentTime() deposit_entry = bank_pb2.LedgerEntry( details=details, counterparty=customer, create_time=withdrawl_entry.create_time) if (self._Withdraw(customer, merchant_amount, withdrawl_entry, msg_fn, can_overdraft) and self._Deposit(merchant, merchant_amount, deposit_entry, msg_fn)): amount_paid += merchant_amount else: success = False return success def _Deposit(self, user: user_pb2.User, num_coins: int, entry: bank_pb2.LedgerEntry, msg_fn) -> bool: """Adds num_coins to user's balance. Args: user: User of account into which to deposit. num_coins: Number of hype coins to deposit. entry: Details of transaction. msg_fn: {callable(channel, msg)} function to send messages. Returns: Whether deposit was successful. """ if num_coins < 0: logging.error('Deposit called with negative value: %s, %s', user, num_coins) return False entry.amount = num_coins tx_name = 'CREDIT %s %s' % (num_coins, user.user_id) self._store.RunInTransaction( self._BankTransaction, user, num_coins, entry, tx_name=tx_name) if msg_fn: msg_fn( user, '%s deposited into your account. (%s)' % (util_lib.FormatHypecoins(num_coins), entry.details)) # TODO: Maybe fix returns now that RunInTransaction can throw. return True def _Withdraw(self, user: user_pb2.User, num_coins: int, entry: bank_pb2.LedgerEntry, msg_fn, can_overdraft: bool = False) -> bool: """Subtracts num_coins from user's balance. Args: user: User of account from which to withdraw. num_coins: Number of hype coins to withdraw. entry: Details of transaction. msg_fn: {callable(channel, msg)} function to send messages. can_overdraft: Whether it is possible to overdraft the account. If True, the account balance can go negative and no fees will be charged. If False, the transaction will fail and an overdraft fee will be assessed if there are insufficient funds for the transaction. Returns: Whether withdrawal was successful. """ if num_coins < 0: logging.error('Withdraw called with negative value: %s, %s', user, num_coins) return False # TODO: This should really be a transaction. with self._withdraw_lock: balance = self.GetBalance(user) if balance < num_coins and not can_overdraft: logging.info('Overdraft: %s, %d > %d', user, num_coins, balance) overdraft_fee = max(self._MIN_OVERDRAFT_FEE, int(balance * self._MAX_OVERDRAFT_FEE_PERCENT)) self.ProcessPayment( user, FEE_ACCOUNT, overdraft_fee, 'Overdraft fee', msg_fn, can_overdraft=True) return False entry.amount = -num_coins tx_name = 'DEBIT %s %s' % (num_coins, user.user_id) self._store.RunInTransaction( self._BankTransaction, user, -num_coins, entry, tx_name=tx_name) if msg_fn: msg_fn( user, '%s withdrawn from your account. (%s)' % (util_lib.FormatHypecoins(num_coins), entry.details)) # TODO: Maybe fix returns now that RunInTransaction can throw. return True def _BankTransaction(self, user: user_pb2.User, delta: int, entry: bank_pb2.LedgerEntry, tx=None): """Executes a hypecoin balance update, storing details in a log.""" try: self._store.UpdateValue(user.user_id, self._BALANCE_SUBKEY, delta, tx) self._store.PrependValue( user.user_id, self._TRANSACTION_SUBKEY, json_format.MessageToDict(entry), max_length=20, tx=tx) except Exception as e: logging.error('BankTransaction failed: %s', entry) raise e
StarcoderdataPython
1741799
'''OpenGL extension VERSION.GLX_1_3 This module customises the behaviour of the OpenGL.raw.GLX.VERSION.GLX_1_3 to provide a more Python-friendly API The official definition of this extension is available here: http://www.opengl.org/registry/specs/VERSION/GLX_1_3.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLX import _types, _glgets from OpenGL.raw.GLX.VERSION.GLX_1_3 import * from OpenGL.raw.GLX.VERSION.GLX_1_3 import _EXTENSION_NAME def glInitGlx13VERSION(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
StarcoderdataPython
131997
<reponame>toddrme2178/pyccel #$ header metavar print=True from pyccel.stdlib.internal.fitpack import bispev #$ header function bispev2(double[:] , int, double[:], int, double[:], int, int, double[:], int, double[:], int, double[:,:], int) def bispev2(tx, nx, ty, ny, c, kx, ky, x, mx, y, my, z, ierr): from numpy import empty lwrk = mx*(kx+1)+my*(ky+1) wrk = empty(lwrk) kwrk = mx+my iwrk = empty(kwrk,'int') bispev(tx, nx, ty, ny, c, kx, ky, x, mx, y, my, z, wrk, lwrk, iwrk, kwrk, ierr) #$ header macro (z,ierr), _bispev(tx, ty, c, kx, ky, x, y) := bispev2(tx, tx.count , ty, ty.count, c, kx, ky, x, x.count, y, y.count, z, ierr)
StarcoderdataPython
4812494
from cca.Rule import Rule class RuleCollection(object): def __init__(self, default_check = None): self.default_check = default_check self.rules = [] def add_rules(self, rules): self.rules.extend(rules) return self def add(self, verb_noun, msg_or_msg_func, check = None, mutate = None): if not self.default_check: r = Rule(verb_noun, msg_or_msg_func, check, mutate) else: d = dict(self.default_check) if check: d.update(check) r = Rule(verb_noun, msg_or_msg_func, d, mutate) self.rules.append(r) return self def __iter__(self): return self.rules.__iter__()
StarcoderdataPython
1618171
<reponame>Stienvdh/statrick # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.vyos.vyos.tests.unit.compat.mock import patch from ansible_collections.vyos.vyos.plugins.modules import ( vyos_interfaces, ) from ansible_collections.vyos.vyos.tests.unit.modules.utils import ( set_module_args, ) from .vyos_module import TestVyosModule, load_fixture class TestVyosFirewallInterfacesModule(TestVyosModule): module = vyos_interfaces def setUp(self): super(TestVyosFirewallInterfacesModule, self).setUp() self.mock_get_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config" ) self.get_config = self.mock_get_config.start() self.mock_load_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config" ) self.load_config = self.mock_load_config.start() self.mock_get_resource_connection_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection" ) self.get_resource_connection_config = ( self.mock_get_resource_connection_config.start() ) self.mock_get_resource_connection_facts = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection" ) self.get_resource_connection_facts = ( self.mock_get_resource_connection_facts.start() ) self.mock_execute_show_command = patch( "ansible_collections.vyos.vyos.plugins.module_utils.network.vyos." "facts.interfaces.interfaces.InterfacesFacts.get_device_data" ) self.execute_show_command = self.mock_execute_show_command.start() def tearDown(self): super(TestVyosFirewallInterfacesModule, self).tearDown() self.mock_get_resource_connection_config.stop() self.mock_get_resource_connection_facts.stop() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_execute_show_command.stop() def load_fixtures(self, commands=None): def load_from_file(*args, **kwargs): return load_fixture("vyos_interfaces_config.cfg") self.execute_show_command.side_effect = load_from_file def test_vyos_interfaces_merged(self): set_module_args( dict( config=[ dict(name="bond1", description="Bond - 1", enabled=True), dict(name="vtun1", description="vtun - 1", enabled=True), ], state="merged", ) ) commands = [ "set interfaces bonding bond1 description 'Bond - 1'", "set interfaces openvpn vtun1 description 'vtun - 1'", ] self.execute_module(changed=True, commands=commands) def test_vyos_interfaces_merged_newinterface(self): set_module_args( dict( config=[ dict( name="eth4", description="Ethernet 4", enabled=True, speed="auto", duplex="auto", ), dict(name="eth1", description="Configured by Ansible"), ], state="merged", ) ) commands = [ "set interfaces ethernet eth1 description 'Configured by Ansible'", "set interfaces ethernet eth4 description 'Ethernet 4'", "set interfaces ethernet eth4 duplex 'auto'", "set interfaces ethernet eth4 speed 'auto'", ] self.execute_module(changed=True, commands=commands) def test_vyos_interfaces_replaced_newinterface(self): set_module_args( dict( config=[ dict( name="eth4", description="Ethernet 4", enabled=True, speed="auto", duplex="auto", ), dict(name="eth1", description="Configured by Ansible"), ], state="replaced", ) ) commands = [ "set interfaces ethernet eth1 description 'Configured by Ansible'", "set interfaces ethernet eth4 description 'Ethernet 4'", "set interfaces ethernet eth4 duplex 'auto'", "set interfaces ethernet eth4 speed 'auto'", ] self.execute_module(changed=True, commands=commands) def test_vyos_interfaces_overridden_newinterface(self): set_module_args( dict( config=[ dict( name="eth4", description="Ethernet 4", enabled=True, speed="auto", duplex="auto", ), dict(name="eth1", description="Configured by Ansible"), ], state="overridden", ) ) commands = [ "set interfaces ethernet eth1 description 'Configured by Ansible'", "set interfaces ethernet eth4 description 'Ethernet 4'", "set interfaces ethernet eth4 duplex 'auto'", "set interfaces ethernet eth4 speed 'auto'", "delete interfaces ethernet eth3 description", ] self.execute_module(changed=True, commands=commands)
StarcoderdataPython
99545
import unittest # from threatnote.main import active_reports class active_reports(unittest.TestCase): def test_list_reports(self): """ Test that it can sum a list of integers """ data = [1, 2, 3] result = 6 self.assertEqual(result, 6) if __name__ == '__main__': unittest.main()
StarcoderdataPython
4814606
<filename>leetcode/L00234.py class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def isPalindrome(self, head: ListNode) -> bool: if not head or not head.next: return True full = half = copy = head rvsd = None while full and full.next: full, half = full.next.next, half.next while half: half.next, half, rvsd = rvsd, half.next, half while copy and rvsd: if copy.val != rvsd.val: return False copy, rvsd = copy.next, rvsd.next return True
StarcoderdataPython
3223676
<reponame>zahraahhajhsn/automatic-student-counter<filename>AuroraAppCode/screenshots.py # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'screenshots.ui' # # Created by: PyQt5 UI code generator 5.15.0 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets import pickle import os import cv2 import cameras class Ui_ScreenShots(object): def __init__(self): self.sc = pickle.load(open("sc.dat", "rb")) def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) MainWindow.showMaximized() MainWindow.setStyleSheet("QMainWindow{\n" "\n" "\n" "background-image: url(images/new.png);}") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget) self.horizontalLayout.setObjectName("horizontalLayout") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setEnabled(True) font = QtGui.QFont() font.setFamily("Arial Black") font.setPointSize(12) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout.addWidget(self.label) self.listofimages = QtWidgets.QListWidget(self.centralwidget) self.listofimages.setObjectName("listofimages") self.verticalLayout.addWidget(self.listofimages) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setText("") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("images/275665-200.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pushButton.setIcon(icon) self.pushButton.setIconSize(QtCore.QSize(100, 80)) self.pushButton.setFlat(True) self.pushButton.setObjectName("pushButton") self.pushButton.clicked.connect(lambda:self.back_button(MainWindow)) self.verticalLayout.addWidget(self.pushButton, 0, QtCore.Qt.AlignLeft|QtCore.Qt.AlignBottom) self.horizontalLayout.addLayout(self.verticalLayout) self.widget = QtWidgets.QWidget(self.centralwidget) self.widget.setObjectName("widget") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget) self.verticalLayout_5.setObjectName("verticalLayout_5") self.horizontalLayout.addWidget(self.widget, 0, QtCore.Qt.AlignBottom) self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setObjectName("verticalLayout_2") self.verticalWidget_4 = QtWidgets.QWidget(self.centralwidget) self.verticalWidget_4.setObjectName("verticalWidget_4") self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.verticalWidget_4) self.verticalLayout_4.setObjectName("verticalLayout_4") self.verticalLayout_2.addWidget(self.verticalWidget_4, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter) self.photoviewer = QtWidgets.QLabel(self.centralwidget) self.photoviewer.setStyleSheet("background-color: rgb(0, 0, 0);") self.photoviewer.setText("") self.photoviewer.setMaximumSize(630,490) self.photoviewer.setObjectName("photoviewer") self.listofimages.itemDoubleClicked.connect(lambda:self.display_image(self.listofimages.currentItem().text())) self.verticalLayout_2.addWidget(self.photoviewer) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.time = QtWidgets.QLabel(self.centralwidget) self.time.setObjectName("time") self.horizontalLayout_2.addWidget(self.time) self.horizontalLayout_2.setStretch(0, 1) self.verticalLayout_2.addLayout(self.horizontalLayout_2) self.verticalLayout_2.setStretch(0, 1) self.verticalLayout_2.setStretch(1, 8) self.verticalLayout_2.setStretch(2, 3) self.horizontalLayout.addLayout(self.verticalLayout_2) self.verticalLayout_6 = QtWidgets.QVBoxLayout() self.verticalLayout_6.setObjectName("verticalLayout_6") self.horizontalLayout.addLayout(self.verticalLayout_6) self.horizontalLayout.setStretch(0, 2) self.horizontalLayout.setStretch(1, 1) self.horizontalLayout.setStretch(2, 5) self.horizontalLayout.setStretch(3, 1) MainWindow.setCentralWidget(self.centralwidget) self.load_images(self.sc) self.screen=MainWindow self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"> ScreenShots :</p></body></html>")) self.time.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">Time taken:</span></p></body></html>")) def load_images(self,id): Directory = os.path.dirname(os.path.abspath(__file__)) # return directory of current file path = os.path.join(Directory,"%d"%(id)) list=os.listdir(path) for cls in list: img=cv2.imread(os.path.join(path,cls)) pixmap=QtGui.QPixmap(os.path.join(path,cls)) icon=QtGui.QIcon(pixmap) item=QtWidgets.QListWidgetItem() item.setIcon(icon) m=str(os.path.splitext(cls)[0]) if not m=="Thumbs": l=m.split("-Time-") date=l[0] l1=l[1].split("-persons-") persons=l1[1] l2=l1[0].split("-") s="Date:"+date+"\n Time:"+l2[0]+":"+l2[1]+":"+l2[2]+"\n persons:"+persons item.setText(m) self.listofimages.addItem(item) def display_image(self,string): # find directory of images Directory = os.path.dirname(os.path.abspath(__file__)) # return directory of current file path = os.path.join(Directory,"%d"%self.sc) pixmap=QtGui.QPixmap("%s/%s.png"%(path,string)) l = string.split("-Time-") date = l[0] l1 = l[1].split("-persons-") persons = l1[1] l2 = l1[0].split("-") s = "Date:" + date + "\n Time:" + l2[0] + ":" + l2[1] + ":" + l2[2] + "\n persons:" + persons self.photoviewer.setPixmap(pixmap) self.time.setText("<strong>Date:"+s+"</strong>") def back_button(self,MainWindow): pickle.dump("no",open("sc.dat","wb")) self.window = QtWidgets.QMainWindow() self.ui=cameras.Ui_CameraMainWindow() self.ui.setupUi(self.window) self.window.show() self.screen.close() if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_ScreenShots() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
StarcoderdataPython
162551
<gh_stars>0 # Filename: BotGlobals.py # Author: mfwass # Date: January 8th, 2017 # # The Legend of Pirates Online Software # Copyright (c) The Legend of Pirates Online. All rights reserved. # # All use of this software is subject to the terms of the revised BSD # license. You should have received a copy of this license along # with this source code in a file named "LICENSE." """ The BotGlobals class will serve as a central location of all global values in the TLOPO Discord Bot project. """ APP_DESCRIPTION_UPSTREAM = "Discord bot by TLOPO. <3 https://github.com/TheLegendofPiratesOnline/discord-bot" APP_DESCRIPTION_FORK = "Dynasty of Persia fork. https://github.com/jamebus/discord-bot" APP_DESCRIPTION = "%s\n%s" % (APP_DESCRIPTION_UPSTREAM, APP_DESCRIPTION_FORK) LOCAL_SETTINGS_FILENAME = 'local_settings.json' SETTINGS_FILENAME = 'settings.json' # API Docs: https://tlopo.com/docs/ API_URLS = { 'news_feed':'https://api.tlopo.com/news/feed/', 'news_notification':'https://api.tlopo.com/news/notification', 'shards':'https://api.tlopo.com/shards', 'system_status':'https://api.tlopo.com/system/status' } BOT_TASKS = { 'task_shards': { 'time': 25.0, 'api_url': API_URLS.get('shards') }, 'task_system_status': { 'time': 25.0, 'api_url': API_URLS.get('system_status') }, 'task_news_feed': { 'time': 25.0, 'api_url': API_URLS.get('news_feed') }, 'task_news_notification': { 'time': 25.0, 'api_url': API_URLS.get('news_notification') } } BASE_CHANNEL_TO_NAME = { '401000000': 'Abassa', '402000000': 'Andaba', '403000000': 'Bequermo', '404000000': 'Cortos', '405000000': 'Exuma', '406000000': 'Fragilles', '407000000': 'Juntos', '408000000': 'Kokojillo', '409000000': 'Levanta', '410000000': 'Nocivo', '411000000': 'Sabada', '412000000': 'Valor' } STATUS_ALIVE_SRV = 1 STATUS_MESSAGE_SRV = 2 STATUS_UPDATE_SRV = 4 STATUS_ERROR_SRV = 8 STATUS_FATAL_SRV = 16 STATUS_UNKNOWN_SRV = 32 SRV_CODE_TO_STATUS = { STATUS_ALIVE_SRV: "STATUS_ALIVE", STATUS_MESSAGE_SRV: "STATUS_MESSAGE", STATUS_UPDATE_SRV: "STATUS_UPDATE", STATUS_ERROR_SRV: "STATUS_ERROR", STATUS_FATAL_SRV: "STATUS_FATAL", STATUS_UNKNOWN_SRV: "STATUS_UNKNOWN" } STATUS_ALIVE_GLOB = 1 STATUS_MESSAGE_GLOB = 2 STATUS_UPDATE_GLOB = 3 STATUS_ERROR_GLOB = 4 STATUS_FATAL_GLOB = 5 STATUS_UNKNOWN_GLOB = 6 GLOB_CODE_TO_STATUS = { STATUS_ALIVE_GLOB: "STATUS_ALIVE", STATUS_MESSAGE_GLOB: "STATUS_MESSAGE", STATUS_UPDATE_GLOB: "STATUS_UPDATE", STATUS_ERROR_GLOB: "STATUS_ERROR", STATUS_FATAL_GLOB: "STATUS_FATAL", STATUS_UNKNOWN_GLOB: "STATUS_UNKNOWN" }
StarcoderdataPython
47243
from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound import collections import os import yaml from os.path import dirname, basename from .env import environ from ..log import logger def reader(fn): logger.debug('loading', f=fn) try: tmplenv = Environment(loader=FileSystemLoader(dirname(fn))) tmpl = tmplenv.get_template(str(basename(fn))) part = tmpl.render(**environ) data = yaml.load(part) return data except TemplateNotFound: logger.warn('Template not found', file=fn) except Exception: logger.exception('config')
StarcoderdataPython
1626247
<gh_stars>0 from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.keys import Keys import time from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By def setup(launching, profile): options = webdriver.ChromeOptions() driver = webdriver.Chrome(chrome_options=options) driver.get("https://accounts.google.com/signin/v2/identifier?hl=en&passive=true&continue=https%3A%2F%2F" "www.google.com%2F&flowName=GlifWebSignIn&flowEntry=ServiceLogin") driver.find_element_by_name("identifier").send_keys("<EMAIL>"+Keys.RETURN) time.sleep(1) if launching: driver.find_element_by_name("password").send_keys(profile["password"]+Keys.RETURN) return driver # setup() method initiates web driver, sets headless or not, and logs into google chrome # sleep_time is the amount of seconds user wants to type in their google password def add_to_cart(driver, item): WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, "inner-article"))) driver.find_elements_by_class_name("inner-article")[item].click() WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.NAME, "commit"))) driver.find_element_by_name("commit").click() WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.XPATH, "//*[contains(text(), 'checkout')]"))) time.sleep(1) driver.get("https://www.supremenewyork.com/checkout") # add_to_cart() selects item from list of "inner-article" objects then loads checkout page def checkout(driver, profile): checkout_fields = driver.find_elements_by_tag_name("input") # print (len(checkout_fields)) billing_info(driver, profile) card_info(driver, checkout_fields, profile) def billing_info(driver, profile): name = driver.find_element_by_id("order_billing_name") name.send_keys(profile["name"]) time.sleep(1000000) # checkout_fields[2].send_keys(profile["name"]) # checkout_fields[3].send_keys(profile["email"]) # checkout_fields[4].send_keys(profile["phone"]) # of phone string doesn't enter which is why we send 5 first # checkout_fields[5].send_keys(profile["address"]) # checkout_fields[7].send_keys(profile["zipcode"]) # checkout_fields[8].send_keys(profile["city"]) # billing_info() fills out checkout page up to zipcode def card_info(driver, checkout_fields, profile): card = checkout_fields[13] num = profile["card#"] card.send_keys(num[0:1]) card.send_keys(num[1:2]) card.send_keys(num[2:3]) card.send_keys(num[3:4]) card.send_keys(num[4:5]) card.send_keys(num[5:6]) card.send_keys(num[6:7]) card.send_keys(num[7:8]) card.send_keys(num[8:9]) card.send_keys(num[9:10]) card.send_keys(num[10:11]) card.send_keys(num[11:12]) card.send_keys(num[12:13]) card.send_keys(num[13:14]) card.send_keys(num[14:15]) card.send_keys(num[15:]) driver.find_elements_by_tag_name('ins')[1].click() driver.find_element_by_id("credit_card_month").click() # THESE ARE NOT REGULAR STRINGS FOR YEAR AND MONTH driver.find_element_by_xpath(profile["month"]).click() driver.find_element_by_id("credit_card_year").click() # THESE ARE NOT REGULAR STRINGS FOR YEAR AND MONTH driver.find_element_by_xpath(profile["year"]).click() # This string being passed is an xpath. Needs fixing # checkout_fields[14].send_keys(profile["cvv"]) # + Keys.RETURN) # card_info fills out card information and then sends Keys.RETURN to submit the order
StarcoderdataPython
1759227
# -*- coding: utf-8 -*- """ @author: WZM @time: 2021/1/2 19:50 @function: 对某一文件夹中的图片进行判断,并将结果写入到txt文件中 """ import os import sys import numpy as np import torch import argparse from model_timer import Timer # from net.ouy_net import Network from evaluate_model import evaluate_model import scipy.io as sio import time import global_models as gm from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, cohen_kappa_score import seaborn as sns import matplotlib.pyplot as plt def load_net(fname, net): import h5py h5f = h5py.File(fname, mode='r') for k, v in net.state_dict().items(): param = torch.from_numpy(np.asarray(h5f[k])) v.copy_(param) def get_args(): parser = argparse.ArgumentParser(description="get test args...") parser.add_argument("-test_path", help="test image folder", type=str) parser.add_argument("-test_txt", help="test txt file", type=str, default="test.txt") parser.add_argument("-model", help="model", type=str, default="ResNet34") parser.add_argument("-pretrained", help="whether use pretrained model", type=str, default='False') parser.add_argument("-use_spp", help="whether use spp", type=str, default='False') parser.add_argument("-best_name", help="best name", type=str) parser.add_argument("-dataloader", help="dataloader", type=str) parser.add_argument("-test_batchsize", help="test batchsize", type=int, default=8) parser.add_argument("-draw", help="whether draw pic", type=str, default='False') return parser.parse_args() def model_test(nIndex, model_name, test_loader, pretrained, use_spp, test_path): # test_model.model_test(61, best_modelName, test_loader) fw_predict = open(os.path.join(test_path, 'predict_result.txt'), 'w') if pretrained: from net.ouy_net_pretrained import Network else: from net.ouy_net import Network # path = "./model_save/model_" + nIndex + "_save/" # model_path = path + model_name model_path = model_name print('model_path', model_path) net = Network(nIndex, use_spp) trained_model = os.path.join(model_path) load_net(trained_model, net) device = torch.device('cuda:0') if torch.cuda.is_available(): net = net.to(device) net.eval() aprelable = [] alable = [] count = 0 total = 0 all_loader_length = len(test_loader) for i, blob in enumerate(test_loader): print("正在处理第 %d 个batch, 共 %d 个" % (i, all_loader_length)) im_data = blob[0] dem_data = blob[2] img_data = blob[1] file_data = blob[3] index = 61 pre_label = net(im_data, dem_data, img_data, index) pre_label = pre_label.data.cpu().numpy() label = pre_label.argmax(axis=1).flatten() num = len(label) for j in range(0, num): fw_predict.write(os.path.split(file_data[j])[1]) fw_predict.write(" ") fw_predict.write(str(label[j])) fw_predict.write("\n") aprelable.append(label[j]) # end=time.clock() # print (end-start)/300 label_pred = np.array(aprelable) # label_pred = label_pred.reshape(1, label_pred.shape[0]) # label_true = label_true.reshape(1, label_true.shape[0]) print('预测label:' + str(label_pred)) fw_predict.close() # sns.heatmap(matrix, annot=True) # plt.show() if __name__ == '__main__': args = get_args() test_path = args.test_path test_name = args.test_txt nIndex = args.model pretrained = args.pretrained pretrained = True if pretrained == 'True' else False use_spp = args.use_spp use_spp = True if use_spp == 'True' else False best_name = args.best_name test_batchsize = args.test_batchsize dataloader = args.dataloader draw_pic = args.draw draw_pic = True if draw_pic == 'True' else False from data_loader.ouy_dataloader_64_predict import TensorDataset # if dataloader == 'zism_dataloader': # from data_loader.zism_dataloader import TensorDataset # elif dataloader == 'ouy_dataloader': # from data_loader.ouy_dataloader import TensorDataset test_dataset = TensorDataset(test_path, test_name) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=test_batchsize, pin_memory=True, num_workers=8) model_test(nIndex, best_name, test_loader, pretrained, use_spp, test_path)
StarcoderdataPython
3215524
#!/usr/bin/env python import re import json import ipaddress class IfConfig(object): """ ifconfig parser class """ def __init__(self, output): """ :param output: ifconfig text output """ self.interfaces = [] # loop over blocks for block in output.split('\n\n'): if block.strip(): self.interfaces.append(self._parse_block(block)) def _parse_block(self, output): """ Parses an ifconfig block """ mo = re.search( r'^(?P<name>\S+)\s+' + r'Link encap:(?P<link_encap>\S+(\s\S+)?)' + r'(\s+HWaddr\s+(?P<hardware_address>\S+))?' + r'(\s+inet addr:(?P<inet>\S+))?' + r'(\s+Bcast:(?P<broadcast>\S+))?' + r'(\s+Mask:(?P<mask>\S+))?'+ r'(\s+inet6 addr: (?P<inet6>\S+)\s+Scope:(Global|Host))?' + r'(\s+inet6 addr: (?P<inet6_local>\S+)\s+Scope:Link)?' + r'((\s|\w)+MTU:(?P<mtu>\S+))?'+ r'(\s+Metric:(?P<metric>\S+))?'+ r'(\s+RX packets:(?P<rx_packets>\S+)\s+errors:\d+ dropped:\d+ overruns:\d+ frame:\d+)?'+ r'(\s+TX packets:(?P<tx_packets>\S+)\s+errors:\d+ dropped:\d+ overruns:\d+ carrier:\d+)?'+ r'(\s+collisions:(?P<collisions>\S+))?'+ r'(\s+txqueuelen:(?P<txqueuelen>\S+))?'+ r'(\s+RX bytes:(?P<rx_bytes>\S+)\s+\((\d|\s|\.|\w)+\))?'+ r'(\s+TX bytes:(?P<tx_bytes>\S+)\s+\((\d|\s|\.|\w)+\)?)?', output, re.MULTILINE|re.IGNORECASE ) if mo: d = mo.groupdict('') result = dict() for key in ['name', 'link_encap', 'hardware_address', 'inet', 'broadcast', 'mask', 'inet6', 'inet6_local', 'mtu', 'metric', 'rx_packets', 'tx_packets', 'collisions', 'txqueuelen', 'rx_bytes', 'tx_bytes']: if key in d: result[key] = d[key] return result else: return {} def to_python(self): """ returns python dictionary representation of ifconfig output """ return self.interfaces def to_json(self, **kwargs): """ returns json representation of ifconfig output """ return json.dumps(self.interfaces, **kwargs)
StarcoderdataPython
4837938
import os def run(dataset_name, idx, save_op): if dataset_name == 'YUD': index_file = '/n/fs/vl/xg5/Datasets/YUD/label/index_' + str(idx) + '.txt' img_type = 'jpg' elif dataset_name == 'ScanNet': index_file = '/n/fs/vl/xg5/Datasets/ScanNet/label/index_' + str(idx) + '.txt' img_type = 'png' elif dataset_name == 'SceneCityUrban3D': index_file = '/n/fs/vl/xg5/Datasets/SceneCityUrban3D/label/index_' + str(idx) + '.txt' img_type = 'png' elif dataset_name == 'SUNCG': index_file = '/n/fs/vl/xg5/Datasets/SUNCG/label/index_' + str(idx) + '.txt' img_type = 'png' elif dataset_name == 'ScanNet_aug': index_file = '/n/fs/vl/xg5/Datasets/ScanNet_aug/label/index_' + str(idx) + '.txt' img_type = 'jpg' elif dataset_name == 'SceneCityUrban3D_aug': index_file = '/n/fs/vl/xg5/Datasets/SceneCityUrban3D_aug/label/index_' + str(idx) + '.txt' img_type = 'jpg' elif dataset_name == 'SUNCG_aug': index_file = '/n/fs/vl/xg5/Datasets/SUNCG_aug/label/index_' + str(idx) + '.txt' img_type = 'png' else: raise ValueError('No such dataset!') data_path = '/n/fs/vl/xg5/workspace/baseline/horizon_detection/dataset/' \ + dataset_name + '/output' error_file = '/n/fs/vl/xg5/workspace/baseline/horizon_detection/logs/' \ + dataset_name + '_' + str(idx) + '_error.txt' error_file2 = '/n/fs/vl/xg5/workspace/baseline/horizon_detection/error_logs/' + dataset_name + '.txt' file_list = [] if os.path.isfile(error_file2): with open(error_file2, 'r') as op: lines = op.readlines() for line in lines: line_list = line.strip().split('/') image_path = line_list[-2] + '/' + line_list[-1] file_list.append(image_path) if os.path.isfile(error_file): with open(error_file, 'r') as op: lines = op.readlines() for line in lines: line_list = line.strip().split('/') image_path = line_list[-2] + '/' + line_list[-1] file_list.append(image_path) dir_list = os.listdir(data_path) for dirs in dir_list: dir_path = os.path.join(data_path, dirs) sub_dir_list = os.listdir(dir_path) for sub_dirs in sub_dir_list: sub_dir_path = os.path.join(dir_path, sub_dirs) image_path = dirs + '/' + sub_dirs + '.' + img_type file_list.append(image_path) with open(index_file, 'r') as op: lines = op.readlines() for line in lines: image_name = line.split()[0] if image_name not in file_list: # print(dataset_name) # print(idx) # print(image_name) # print('\n') string = image_name + '\n' save_op.write(string) if __name__ == '__main__': # YUD: 1, ScanNet: 265, SceneCityUrban3D: 23, SUNCG: 569 # data_list = ['YUD', 'ScanNet', 'SceneCityUrban3D', 'SUNCG'] # data_list = ['ScanNet', 'SUNCG', 'SceneCityUrban3D'] data_list = ['ScanNet_aug', 'SUNCG_aug', 'SceneCityUrban3D_aug'] num = 30 for data_name in data_list: save_file = 'error_case/' + data_name + '.txt' with open(save_file, 'w') as save_op: for idx in range(num): run(data_name, idx, save_op)
StarcoderdataPython
1777466
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tfg_webapp', '0002_reportsettings_info_blocks'), ] operations = [ migrations.AddField( model_name='reportsettings', name='language', field=models.CharField(default='en', max_length=2), ), ]
StarcoderdataPython
1666411
<filename>intro/part07-11_screen_time/src/screen_time.py # Write your solution here from datetime import datetime, time, timedelta filename = input("Filename: ") with open(filename, "w") as time_file: date_input = input("Starting date: ") no_of_day = int(input("How many days: ")) date_list = date_input.split(".") # get the start date start_date = datetime(int(date_list[2]), int(date_list[1]), int(date_list[0])) # get the last date last_date =start_date + timedelta(days=no_of_day-1) print("Please type in screen time in minutes on each day (TV computer mobile):") input_list = [] total = 0 for i in range(0, no_of_day): # input the screen time scr_time = input(f"Screen time {start_date + timedelta(days=i)}: ") #calculate the total minuties of screen time scr = scr_time.split(" ") scr_list = [int(k) for k in scr] total += sum(scr_list) # add string of date and screen time to a list screen_time_str = scr_time.replace(" ", "/") date = (start_date + timedelta(days=i)).strftime("%d.%m.%Y") input_list.append(f"{date}: {screen_time_str}") # add data to the file fDate_str = start_date.strftime("%d.%m.%Y") lDate_str = last_date.strftime("%d.%m.%Y") time_file.write(f"Time period: {fDate_str}-{lDate_str}\n") time_file.write(f"Total minutes: {total}\n") time_file.write(f"Average minutes: {total/no_of_day}\n") for i in range(len(input_list)): time_file.write(f"{input_list[i]}\n") print("Data stored in file late_june.txt")
StarcoderdataPython
3310422
import json import keras import numpy as np import keras.backend as K from data.vocab import TextEncoder from transformer.embedding import Embedding from keras.layers import Conv1D, Dropout, Add, Input from transformer.layers import MultiHeadAttention, Gelu, LayerNormalization class MultiHeadSelfAttention: def __init__(self, n_state: int, n_head: int, attention_dropout: float, use_attn_mask: bool, layer_id: int) -> None: assert n_state % n_head == 0 self.c_attn = Conv1D(3 * n_state, 1, name='layer_{}/c_attn'.format(layer_id)) self.attn = MultiHeadAttention(n_head, n_state, attention_dropout, use_attn_mask, name='layer_{}/self_attention'.format(layer_id)) self.c_attn_proj = Conv1D(n_state, 1, name='layer_{}/c_attn_proj'.format(layer_id)) def __call__(self, x, mask): output = self.c_attn(x) output = self.attn(output) if mask is None else self.attn([output, mask]) return self.c_attn_proj(output) class PositionWiseFF: def __init__(self, n_state: int, d_hid: int, layer_id: int) -> None: self.c_fc = Conv1D(d_hid, 1, name='layer_{}/c_fc'.format(layer_id)) self.activation = Gelu(name='layer_{}/gelu'.format(layer_id)) self.c_ffn_proj = Conv1D(n_state, 1, name='layer_{}/c_ffn_proj'.format(layer_id)) def __call__(self, x): output = self.activation(self.c_fc(x)) return self.c_ffn_proj(output) class EncoderLayer: def __init__(self, n_state: int, n_head: int, d_hid: int, residual_dropout: float, attention_dropout: float, use_attn_mask: bool, layer_id: int, **kwargs) -> None: self.attention = MultiHeadSelfAttention(n_state, n_head, attention_dropout, use_attn_mask, layer_id) self.drop1 = Dropout(residual_dropout, name='layer_{}/ln_1_drop'.format(layer_id)) self.add1 = Add(name='layer_{}/ln_1_add'.format(layer_id)) self.ln1 = LayerNormalization(name='layer_{}/ln_1'.format(layer_id)) self.ffn = PositionWiseFF(n_state, d_hid, layer_id) self.drop2 = Dropout(residual_dropout, name='layer_{}/ln_2_drop'.format(layer_id)) self.add2 = Add(name='layer_{}/ln_2_add'.format(layer_id)) self.ln2 = LayerNormalization(name='layer_{}/ln_2'.format(layer_id)) def __call__(self, x, mask): a = self.attention(x, mask) n = self.ln1(self.add1([x, self.drop1(a)])) f = self.ffn(n) return self.ln2(self.add2([n, self.drop2(f)])) def load_openai_transformer(path: str = './openai/model/', use_attn_mask: bool = True, use_one_embedding_dropout: bool = False, max_len: int = 512) -> keras.Model: with open(path + 'params_shapes.json') as f: shapes = json.load(f) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] init_params[0] = init_params[0][:min(512, max_len)] # add special token embedding to token embedding init_params[1] = np.concatenate( (init_params[1], np.random.randn(TextEncoder.SPECIAL_COUNT, 768).astype(np.float32) * 0.02), axis=0) init_params = [np.zeros((TextEncoder.NUM_SEGMENTS, 768)).astype(np.float32)] + init_params # segment embedding model = create_transformer(embedding_dim=768, embedding_dropout=0.1, vocab_size=40478, max_len=min(512, max_len), use_attn_mask=use_attn_mask, trainable_pos_embedding=True, num_heads=12, num_layers=12, use_one_embedding_dropout=use_one_embedding_dropout, d_hid=4 * 768, attention_dropout=0.1, residual_dropout=0.1) model.set_weights(init_params) return model def create_transformer(embedding_dim: int = 768, embedding_dropout: float = 0.1, vocab_size: int = 30000, max_len: int = 512, trainable_pos_embedding: bool = True, num_heads: int = 12, num_layers: int = 12, attention_dropout: float = 0.1, use_one_embedding_dropout: bool = False, d_hid: int = 768 * 4, residual_dropout: float = 0.1, use_attn_mask: bool = True) -> keras.Model: vocab_size += TextEncoder.SPECIAL_COUNT tokens = Input(batch_shape=(None, max_len), name='token_input', dtype='int32') segment_ids = Input(batch_shape=(None, max_len), name='segment_input', dtype='int32') pos_ids = Input(batch_shape=(None, max_len), name='position_input', dtype='int32') attn_mask = Input(batch_shape=(None, 1, max_len, max_len), name='attention_mask_input', dtype=K.floatx()) if use_attn_mask else None inputs = [tokens, segment_ids, pos_ids] embedding_layer = Embedding(embedding_dim, embedding_dropout, vocab_size, max_len, trainable_pos_embedding, use_one_embedding_dropout) x = embedding_layer(inputs) for i in range(num_layers): x = EncoderLayer(embedding_dim, num_heads, d_hid, residual_dropout, attention_dropout, use_attn_mask, i)(x, attn_mask) inputs = inputs + ([attn_mask] if use_attn_mask else []) return keras.Model(inputs=inputs, outputs=x, name='Transformer')
StarcoderdataPython
154371
<filename>gear/prfanalyze/base/run.py #! /usr/bin/env python from __future__ import print_function import json, os, sys, csv, pimms import nibabel as nib output_dir = '/flywheel/v0/output' input_dir = '/flywheel/v0/input' config_file = os.path.join(input_dir, 'config.json') bids_dir = os.path.join(input_dir, 'BIDS') # bids_link = '/running/out' # Fix so that it works in Singularity bids_link = os.path.join(output_dir,'out') opts_file = os.path.join(bids_link, 'options.json') verbose = os.environ.get('VERBOSE', '0').strip() == '1' force = os.environ.get('FORCE', '0').strip() == '1' bids_fields = os.environ.get('FIELDS', 'task-prf_acq-normal') solver_name = os.environ.get('PRF_SOLVER', None) bids_fieldmap = [ss.split('-') for ss in bids_fields.split('_')] bids_fields_noacq = '_'.join(['-'.join(ff) for ff in bids_fieldmap if ff[0] != 'acq']) if bids_fields != '': bids_fields = '_' + bids_fields if bids_fields_noacq != '': bids_fields_noacq = '_' + bids_fields_noacq # check for a separate config file if len(sys.argv) > 1: config_file = sys.argv[1] def die(*args): print(*args) sys.exit(1) def note(*args): if verbose: print(*args) return None if not os.path.isdir(bids_dir): die('no BIDS directory found!') try: with open(config_file, 'r') as fl: conf = json.load(fl) except Exception: die("Could not read config.json!") if not pimms.is_map(conf): die("config.json must contain a single dictionary") if 'subjectName' not in conf or not pimms.is_str(conf['subjectName']): die('config.json does not contain a valid "subjectName" entry') if 'sessionName' not in conf or not pimms.is_str(conf['sessionName']): die('config.json does not contain a valid "sessionName" entry') if 'isPRFSynthData' not in conf: note('Warning: "isPRFSynthData" not found in config JSON; assuming True.') conf['isPRFSynthData'] = True synthQ = True else: synthQ = False if 'solver' in conf: solver_name = conf['solver'] # now that we've read in the solver from the config we can process it. if solver_name is None: print("WARNING: The PRF_SOLVER environment variable is not set; using 'base'") solver_name = 'base' if not solver_name.startswith('prfanalyze-'): solver_name = 'prfanalyze-' + solver_name # we just have to find the relevant files then echo them for the calling script; in the case of the # config file, we write out a new one in the /running directory sub = conf['subjectName'] ses = conf['sessionName'] opts = conf.get('options', {}) note("Preparing solver \"%s\":" % (solver_name,)) note(" Subject: %s" % sub) note(" Session: %s" % ses) note(" Options: %s" % (opts,)) # find the relevant files in the BIDS dir; first, the BOLD image is easy to find: func_dir = os.path.join(bids_dir, 'sub-' + sub, 'ses-' + ses, 'func') # we get the stimulus filename from the events file: events_file = os.path.join(func_dir, 'sub-%s_ses-%s%s_events.tsv' % (sub, ses, bids_fields_noacq)) try: with open(events_file, 'r') as fl: rr = csv.reader(fl, delimiter='\t', quotechar='"') l0 = next(rr) if 'stim_file' not in l0: die('stim_file must be a column in the events file (%s)' % events_file) rows = [{k:v for (k,v) in zip(l0,r)} for r in rr] except Exception: die("Could not load events file: %s" % events_file) stim_file = set([r['stim_file'] for r in rows]) if len(stim_file) != 1: die("Multiple stimulus files found in events file (%s)" % events_file) stim_file = os.path.join(bids_dir, 'stimuli', list(stim_file)[0]) if not os.path.isfile(stim_file): die("Stimulus file (%s) not found" % stim_file) # Finally, we need to find the output directory (in the OUTPUT directory's BIDS directory) # To figure out how we name the directory, we use the PRF_SOLVER environment variable outbids_dir = os.path.join(output_dir, 'BIDS', 'derivatives', solver_name, 'sub-'+sub, 'ses-'+ses) # if this directory already exists, it's safe to assume that we need a temporary directory (unless # the force option is invoked): if not force and os.path.isdir(outbids_dir): outbids_dir = None for k in range(1000): p = os.path.join(output_dir, 'BIDS', 'derivatives', solver_name + '_temp%03d' % k) if not os.path.isdir(p): # we've found it! outbids_dir = os.path.join(p, 'sub-'+sub, 'ses-'+ses) print("WARNING: Using temporary output directory: %s" % p) break if outbids_dir is None: die("Could not find a valid temporary directory!") try: if not os.path.isdir(outbids_dir): os.makedirs(outbids_dir) except Exception: die("Error creating output BIDS directory: %s" % outbids_dir) note("Output BIDS directory: %s" % outbids_dir) # # we make a symlink from the output bids dir to /running # try: # if os.path.islink(bids_link): os.remove(bids_link) # os.symlink(outbids_dir, bids_link) # except Exception: # die("Could not create output link: %s" % bids_link) # Noahs solution didnt work, there should be a symlink for run.sh ## # we make a symlink from the output bids dir to /running try: if os.path.islink(bids_link): os.remove(bids_link) os.symlink(outbids_dir, bids_link) except Exception: die("Could not create output link: %s" % bids_link) # dump the options file in the output directory with open(opts_file, 'w') as fl: json.dump(opts, fl) # We may have any number of runs, find them all: bold_prefix = 'sub-%s_ses-%s%s_run-' % (sub, ses, bids_fields) bold_suffix = '_bold.nii.gz' (pn,sn) = (len(bold_prefix), len(bold_suffix)) processed = 0 for flnm in os.listdir(func_dir): if not (flnm.startswith(bold_prefix) and flnm.endswith(bold_suffix)): continue runid = flnm[pn:-sn] bold_image = os.path.join(func_dir, flnm) # if the data are from prfsynth, we also need the stimulus json file, which is in the # derivatives directory if synthQ: stimjs_file = os.path.join( bids_dir, 'derivatives', 'prfsynth', 'sub-'+sub, 'ses-'+ses, 'sub-%s_ses-%s%s_run-%s_bold.json' % (sub, ses, bids_fields, runid)) if not os.path.isfile(stimjs_file): die("Stimulus JSON file (%s) not found" % stimjs_file) else: print("[base/run.py] Using real data, not coming from prfsynthesize.") # otherwise, we assume that the required options are in the config.json file if 'stimulus' not in conf: die("In config.json, isPRFSynthData is False, but no stimulus settings were given.") stim = conf['stimulus'] if not isinstance(stim, dict): die('In config.json, stimulus data must be a dictionary') stim['isPRFSynthData'] = False # make a temporary file import tempfile (fl, stimjs_file) = tempfile.mkstemp(suffix='.json', text=True) print("[base/run.py] This is the temp file with stim info: ") print(stimjs_file) print("[base/run.py] This is the content: ") print(stim) with open(stimjs_file, 'w') as json_data: json.dump(stim, json_data) # json.dump(stim, fl) # close(fl) # okay, we have the files; run the solver script! try: pid = os.fork() if pid == 0: os.execl("/solve.sh", "/solve.sh", opts_file, bold_image, stim_file, stimjs_file, outbids_dir) else: note("Beginning os.wait() for /solve.sh, run=%s (child pid is %s)" % (runid, pid)) os.wait() except Exception: die("Failed to exec /solve.sh script!") nii_base = nib.load(bold_image) # If there are things to cleanup we do that; specifically, the estimates.json file: # estfl = os.path.join(bids_link, 'estimates.json') estfl = os.path.join(outbids_dir, 'estimates.json') if os.path.isfile(estfl): note("Processing estimates.json file...") import nibabel as nib, numpy as np with open(estfl, 'r') as fl: dat = json.load(fl) # decode the data... dat = {k: np.asarray([u[k] for u in dat]) for k in dat[0].keys()} for (k,v) in dat.items(): if len(v.shape) == 2: im = nib.Nifti2Image(np.reshape(v, (v.shape[0], 1, 1, v.shape[-1])), nii_base.affine, nii_base.header) else: im = nib.Nifti2Image(np.reshape(v, (-1, 1, 1, 1)), nii_base.affine, nii_base.header) print("Writing the estimates.json to nifti2 in outbids_dir: " + outbids_dir) # im.to_filename(os.path.join(bids_link, 'run-%s_%s.nii.gz' % (runid,k.lower()))) im.to_filename(os.path.join(outbids_dir, 'run-%s_%s.nii.gz' % (runid,k.lower()))) # os.rename(estfl, os.path.join(bids_link, 'run-%s_estimates.json' % (runid,))) os.rename(estfl, os.path.join(outbids_dir, 'run-%s_estimates.json' % (runid,))) else: note("No estimates.json file found.") # also rename results,.mat if it's there # resfli = os.path.join(bids_link, 'results.mat') # resflo = os.path.join(bids_link, 'run-%s_results.mat' % (runid,)) resfli = os.path.join(outbids_dir, 'results.mat') resflo = os.path.join(outbids_dir, 'run-%s_results.mat' % (runid,)) if os.path.isfile(resfli): os.rename(resfli, resflo) processed += 1 if processed == 0: die("No BOLD images found!") # exit happily sys.exit(0)
StarcoderdataPython
1644077
#!/bin/python3 import os # Complete the repeatedString function below. def repeatedString(s, n): num = 0 for i in range(len(s)): if s[i] == "a" and i <= n: num += 1 if (n - len(s)) > 0: num *= int(n / len(s)) if (n % len(s)) != 0: for i in (s[:(n % len(s))]): if i == "a": num += 1 return num if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') s = input() n = int(input()) result = repeatedString(s, n) fptr.write(str(result) + '\n') fptr.close()
StarcoderdataPython
116886
<reponame>chib0/asd-winter2019 import click from . import get_consumer @click.group() @click.option("-h", "--host", default="localhost") @click.option("-p", "--port", default="8000", type=int) @click.pass_context def cli(ctx, host, port): ctx.ensure_object(dict) #we're setting ctx.obj to match the signature of the api_consumer kwarg options for host, port, etc. ctx.obj['consumer'] = get_consumer(host, port) @cli.command() @click.pass_context def get_users(ctx): click.echo(ctx.obj['consumer'].get_users()) @cli.command() @click.argument('user_id') @click.pass_context def get_user(ctx, user_id): click.echo(ctx.obj['consumer'].get_user(user_id)) @cli.command() @click.argument('user_id') @click.pass_context def get_snapshots(ctx, user_id): click.echo(ctx.obj['consumer'].get_snapshots(user_id)) @cli.command() @click.argument('user_id') @click.argument('snapshot_id_or_timestamp') @click.pass_context def get_snapshot(ctx, user_id, snapshot_id_or_timestamp): click.echo(ctx.obj['consumer'].get_snapshot(user_id, snapshot_id_or_timestamp)) @cli.command() @click.argument('user_id') @click.argument('snapshot_id_or_timestamp') @click.argument('result') @click.pass_context def get_result(ctx, user_id, snapshot_id_or_timestamp, result): click.echo(ctx.obj['consumer'].get_result( user_id, snapshot_id_or_timestamp, result)) @cli.command() @click.argument('user_id') @click.argument('snapshot_id_or_timestamp') @click.argument('result') @click.pass_context def get_result_data(ctx, user_id, snapshot_id_or_timestamp, result): click.echo(ctx.obj['consumer'].get_result_data(user_id, snapshot_id_or_timestamp, result))
StarcoderdataPython
3211537
<reponame>olgam4/design3 import cv2 from vision.domain.iCamera import ICamera from vision.domain.iCameraFactory import ICameraFactory from vision.infrastructure.fallbackCamera import FallbackCamera from vision.infrastructure.openCvCamera import OpenCvCamera class OpenCvCameraFactory(ICameraFactory): def __init__(self, max_camera_count: int = 10) -> None: self._max_camera_count = max_camera_count self._cameras = [] self._find_all_camera() def create_camera(self) -> ICamera: if len(self._cameras) == 0: return FallbackCamera() index = self._cameras[0] return OpenCvCamera(index) def _find_all_camera(self) -> None: index = 0 while index < self._max_camera_count: cap = cv2.VideoCapture(index) if cap.isOpened(): cap.release() self._cameras.append(index) index += 1
StarcoderdataPython
1600186
<filename>src/pasio/dto/intervals.py from collections import namedtuple from ..utils.gzip_utils import open_for_read class ScoredInterval( namedtuple('ScoredInterval', ['start', 'stop', 'mean_count', 'log_marginal_likelyhood']) ): @property def length(self): return self.stop - self.start class BedgraphInterval(namedtuple('BedgraphInterval', ['chrom', 'start', 'stop', 'count'])): @property def length(self): return self.stop - self.start @classmethod def from_string(cls, line): chrom, start, stop, count = line.split()[0:4] start = int(start) stop = int(stop) count = int(count) return cls(chrom, start, stop, count) @classmethod def each_in_file(cls, filename): with open_for_read(filename) as stream: for interval in cls.each_in_stream(stream): yield interval @classmethod def each_in_stream(cls, stream): for line in stream: line = line.strip() if line == '': continue yield cls.from_string(line)
StarcoderdataPython
38639
<reponame>xnchu/PyTplot<gh_stars>10-100 import pytplot import numpy as np def split_vec(tvar, new_name=None, columns='all', suffix=None): """ Splits up 2D data into many 1D tplot variables. .. note:: This analysis routine assumes the data is no more than 2 dimensions. If there are more, they may become flattened! Parameters: tvar : str Name of tplot variable to split up newtvars : int/list, optional The names of the new tplot variables. This must be the same length as the number of variables created. columns : list of ints, optional The specific column numbers to grab from the data. The default is to split all columns. Returns: None Examples: >>> pytplot.store_data('b', data={'x':[2,5,8,11,14,17,20], 'y':[[1,1,1,1,1,1],[2,2,5,4,1,1],[100,100,3,50,1,1],[4,4,8,58,1,1],[5,5,9,21,1,1],[6,6,2,2,1,1],[7,7,1,6,1,1]]}) >>> pytplot.tplot_math.split_vec('b',['b1','b2','b3'],[0,[1,3],4]) >>> print(pytplot.data_quants['b2'].values) """ # Make sure the tvar is found if tvar not in pytplot.data_quants: print(f"Error: {tvar} not found in memory.") return # Give a default to the new name if new_name is None: new_name = tvar # Gather data from the tvar alldata = pytplot.get_data(tvar) time = alldata[0] data = alldata[1] dim = data.shape # If already size one, simply return if len(dim) == 1: return [tvar] vec_length = dim[1] # Determine what the suffix list will be if suffix is not None: if vec_length > len(suffix): print(f"split_vec error: number of columns ({vec_length}) is greater than the number of suffix entered") else: if vec_length == 3: suffix = ["_x", "_y", "_z"] else: suffix = [] for i in range(vec_length): suffix.append("_"+str(i)) created_variables = [] #grab column data if columns == 'all': columns = range(vec_length) for i in columns: #if not a list if isinstance(i,list): range_start = i[0] range_end = i[1] else: range_start = i range_end = i split_col = list(range(range_start,range_end+1)) split_name = new_name + suffix[i] created_variables = created_variables + [split_name] data_for_tplot = {'x':time, 'y':data[:,split_col].squeeze()} if not pytplot.store_data(split_name,data=data_for_tplot): raise Exception(f"Failed to store {split_name} in pytplot.") return created_variables
StarcoderdataPython
1797982
import requests from kibitzr.checker import Checker def test_server_is_alive(target): """Sanity check, that test environment is properly setup""" response = requests.get("http://{0}:{1}/index.html".format(*target)) assert response.status_code == 200 def test_simple_fetcher_with_pretty_json(target, json_conf): ok, content = Checker(json_conf).check() assert ok is True assert content == ( '{\n' ' "first name": "Peter",\n' ' "last name": "Demin"\n' '}' ) def test_tag_transformer(target, html_text_conf): html_text_conf['transform'].insert(0, { 'tag': 'div', }) ok, content = Checker(html_text_conf).check() assert ok is True assert content == 'Hello world!' def test_browser_css(target, html_text_conf): html_text_conf['transform'].insert(0, { 'css': '.footer', }) ok, content = Checker(html_text_conf).check() assert ok is True assert content == 'Footer content' def test_browser_xpath(target, html_text_conf): html_text_conf['transform'].insert(0, { 'xpath': './/*[@class="footer"]', }) ok, content = Checker(html_text_conf).check() assert ok is True assert content == 'Footer content' def test_scenario(target, html_text_conf): html_text_conf.update({ 'scenario': 'driver.find_element_by_id("page-link").click()', 'delay': 0.5, }) ok, content = Checker(html_text_conf).check() assert ok is True assert content == 'Another page' def test_valid_http_404(target, not_found_conf): not_found_conf.update({ 'valid_http': [404], }) ok, content = Checker(not_found_conf).check() assert ok is True assert '404' in content def test_python_script_sample(python_script_conf): ok, content = Checker(python_script_conf).check() assert ok is True assert content == "python"
StarcoderdataPython
1623908
import logging import copy import json import pytest from inspect import getmembers, isfunction from collections import defaultdict from tests.common.plugins.sanity_check import constants from tests.common.plugins.sanity_check import checks from tests.common.plugins.sanity_check.checks import * from tests.common.plugins.sanity_check.recover import recover from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.plugins.sanity_check.checks import check_monit logger = logging.getLogger(__name__) def is_check_item(member): ''' Function to filter for valid check items Used in conjuction with inspect.getmembers to make sure that only valid check functions/fixtures executed Valid check items must meet the following criteria: - Is a function - Is defined directly in sanity_checks/checks.py, NOT imported from another file - Begins with the string 'check_' Args: member (object): The object to checked Returns: (bool) True if 'member' is a valid check function, False otherwise ''' if isfunction(member): in_check_file = member.__module__ == 'tests.common.plugins.sanity_check.checks' starts_with_check = member.__name__.startswith('check_') return in_check_file and starts_with_check else: return False SUPPORTED_CHECKS = [member[0].replace('check_', '') for member in getmembers(checks, is_check_item)] def _item2fixture(item): return 'check_' + item def _update_check_items(old_items, new_items, supported_items): """ @summary: Update the items to be performed in sanity check @param old_items: Existing items to be checked. Should be a Set. @param new_items: Iterable. Items to be added or removed. @param supported_items: The sanity check items that are currently supported. """ updated_items = copy.deepcopy(old_items) for new_item in new_items: if not new_item: continue if new_item[0] in ["_", "-"]: # Remove default check item new_item = new_item[1:] if new_item in updated_items: logger.info("Skip checking '%s'" % new_item) updated_items.remove(new_item) else: # Add a check item if new_item[0] == "+": new_item = new_item[1:] if new_item in supported_items : if new_item not in updated_items: logger.info("Add checking '{}'".format(new_item)) updated_items.add(new_item) else: logger.warning('Check item "{}" no in supported check items: {}'.format(new_item, supported_items)) return updated_items def print_logs(duthosts): for dut in duthosts: logger.info("Run commands to print logs, logs to be collected on {}:\n{}"\ .format(dut.hostname, json.dumps(constants.PRINT_LOGS, indent=4))) for cmd in constants.PRINT_LOGS.values(): res = dut.shell(cmd, module_ignore_errors=True, verbose=False) logger.info("cmd='%s', output:\n%s" % (cmd, json.dumps(res["stdout_lines"], indent=4))) def do_checks(request, check_items): check_results = [] for item in check_items: check_fixture = request.getfixturevalue(_item2fixture(item)) results = check_fixture() if results and isinstance(results, list): check_results.extend(results) elif results: check_results.append(results) return check_results @pytest.fixture(scope="module", autouse=True) def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo): logger.info("Prepare pre-test sanity check") skip_sanity = False allow_recover = False recover_method = "adaptive" check_items = set(copy.deepcopy(SUPPORTED_CHECKS)) # Default check items post_check = False customized_sanity_check = None for m in request.node.iter_markers(): logger.info("Found marker: m.name=%s, m.args=%s, m.kwargs=%s" % (m.name, m.args, m.kwargs)) if m.name == "sanity_check": customized_sanity_check = m break if customized_sanity_check: logger.info("Process marker {} in script. m.args={}, m.kwargs={}" .format(customized_sanity_check.name, customized_sanity_check.args, customized_sanity_check.kwargs)) skip_sanity = customized_sanity_check.kwargs.get("skip_sanity", False) allow_recover = customized_sanity_check.kwargs.get("allow_recover", False) recover_method = customized_sanity_check.kwargs.get("recover_method", "adaptive") if allow_recover and recover_method not in constants.RECOVER_METHODS: pytest.warning("Unsupported recover method") logger.info("Fall back to use default recover method 'config_reload'") recover_method = "config_reload" check_items = _update_check_items(check_items, customized_sanity_check.kwargs.get("check_items", []), SUPPORTED_CHECKS) post_check = customized_sanity_check.kwargs.get("post_check", False) if request.config.option.skip_sanity: skip_sanity = True if skip_sanity: logger.info("Skip sanity check according to command line argument or configuration of test script.") yield return if request.config.option.allow_recover: allow_recover = True cli_items = request.config.getoption("--check_items") if cli_items: cli_items_list=str(cli_items).split(',') check_items = _update_check_items(check_items, cli_items_list, SUPPORTED_CHECKS) # ignore BGP check for particular topology type if tbinfo['topo']['type'] == 'ptf' and 'bgp' in check_items: check_items.remove('bgp') if 'dualtor' not in tbinfo['topo']['name']: check_items.remove('mux_simulator') logger.info("Sanity check settings: skip_sanity=%s, check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s" % \ (skip_sanity, check_items, allow_recover, recover_method, post_check)) if not check_items: logger.info("No sanity check item is specified, no pre-test sanity check") yield logger.info("No sanity check item is specified, no post-test sanity check") return # Dynamically attach selected check fixtures to node for item in check_items: request.fixturenames.append(_item2fixture(item)) print_logs(duthosts) logger.info("Start pre-test sanity checks") check_results = do_checks(request, check_items) logger.debug("Pre-test sanity check results:\n%s" % json.dumps(check_results, indent=4)) failed_results = [result for result in check_results if result['failed']] if failed_results: if not allow_recover: pt_assert(False, "!!!!!!!!!!!!!!!!Pre-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\ .format(json.dumps(failed_results, indent=4))) else: dut_failed_results = defaultdict(list) for failed_result in failed_results: if 'host' in failed_result: dut_failed_results[failed_result['host']].append(failed_result) for dut_name, dut_results in dut_failed_results.items(): recover(duthosts[dut_name], localhost, fanouthosts, dut_results, recover_method) logger.info("Run sanity check again after recovery") new_check_results = do_checks(request, check_items) logger.debug("Pre-test sanity check after recovery results:\n%s" % json.dumps(new_check_results, indent=4)) new_failed_results = [result for result in new_check_results if result['failed']] if new_failed_results: pt_assert(False, "!!!!!!!!!!!!!!!! Pre-test sanity check after recovery failed: !!!!!!!!!!!!!!!!\n{}"\ .format(json.dumps(new_failed_results, indent=4))) logger.info("Done pre-test sanity check") yield if not post_check: logger.info("No post-test check is required. Done post-test sanity check") return logger.info("Start post-test sanity check") post_check_results = do_checks(request, check_items) logger.debug("Post-test sanity check results:\n%s" % json.dumps(post_check_results, indent=4)) post_failed_results = [result for result in post_check_results if result['failed']] if post_failed_results: pt_assert(False, "!!!!!!!!!!!!!!!! Post-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\ .format(json.dumps(post_failed_results, indent=4))) logger.info("Done post-test sanity check") return
StarcoderdataPython
1626203
from lenses import lens parse_json = lens.Json() new_food = lens.Get('food') if __name__ == '__main__': comb = parse_json & new_food print(comb.get()('{"food": {"dinner": []}}'))
StarcoderdataPython
3258339
#! /usr/bin/python2.7 ''' This package takes care of training of our model on tweets data. ''' import nltk import numpy as np import matplotlib as plt import time from copy import deepcopy from collections import Counter import pandas as pd from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from nltk.tokenize import TweetTokenizer from nltk.stem import PorterStemmer from nltk.corpus import stopwords import matplotlib.pyplot as plt tokenizer = TweetTokenizer() import gensim from gensim.models.word2vec import Word2Vec LabeledSentence = gensim.models.doc2vec.LabeledSentence data_base = "/home/padam/Documents/data/sentiment-data/" def ingest_tweets(): data = pd.read_csv(data_base+'train_tweet.csv') data.drop(['ItemID' , 'SentimentSource' ,'Date' , 'Blank'] , axis =1 , inplace = True) # data = data[datab.Sentiment.isnull == False] data['Sentiment'] = data['Sentiment'].map(int) data = data[data['SentimentText'].isnull() == False] data.reset_index(inplace=True) data.drop('index' , axis=1 , inplace=True) data['Sentiment'] = data['Sentiment'].map( {4:1, 0:0}) return data data = ingest_tweets() # print data.head(5) def tokenize_tweet(tweet): ''' Tokenizes the tweets and removes unwanted features like '#' , '@' , urls INPUT : tweet - string OUTPUT : tokenized tweet ''' try: tweet = unicode(tweet.decode('utf-8').lower()) tokens = tokenizer.tokenize(tweet) tokens = filter(lambda t : not t.startswith('@') , tokens) tokens = filter(lambda t : not t.startswith('#') , tokens) tokens = filter(lambda t : not t.startswith('http') , tokens) return tokens except: return 'NE' def post_process(data , num = 1200000): ''' Apply tokenize function to each SentimentText row. * Deepcopy fixes the issue of SettingWithCopyWarning * in pandas ''' data = deepcopy(data.head(num)) data['tokens'] = data['SentimentText'].map(tokenize_tweet) data = data[data.tokens != 'NE'] data.reset_index(inplace =True) data.drop('index' , axis = 1 , inplace=True) return data x = post_process(data) # print x.head(5) # Word2Vec model n_num = 1200000 X_train , X_test , Y_train , Y_test = train_test_split(np.array(x.head(n_num).tokens) , np.array(x.head(n_num).Sentiment), test_size = 0.2) def label_tweet(tweet , label): labeled = [] for i,v in tqdm(enumerate(tweet)): l = '%s_%s'%(label,i) labeled.append((LabeledSentence(v , [l]))) return labeled X_train = label_tweet(X_train , 'TRAIN') X_test = label_tweet(X_test,'TEST') ''' * Use this only to train a new model , word2vec_model is already trained on tweet data. w2v = Word2Vec(size=200 , min_count=10) w2v.build_vocab([x.words for x in tqdm(X_train)]) w2v.train([x.words for x in tqdm(X_train)] , total_examples = w2v.corpus_count , epochs=w2v.iter) # print w2v.most_similar('good') # Works well ! w2v.wv.save_word2vec_format('./word2vec_model.bin' , binary=True) # Loading of model >>> model = gensim.models.KeyedVectors.load_word2vec_format('./word2vec_model.bin', binary=True , unicode_errors='ignore') Make sure using unicode_errors='ignore' or 'replace' or else use coding -utf-8- shebang (not sure) ''' w2v_load = gensim.models.KeyedVectors.load_word2vec_format('/home/padam/Documents/git/Saachi/sentiment/word2vec_model.bin', binary=True , unicode_errors='ignore') print ("Building tf-idf matrix ... ... ") vectorizer = TfidfVectorizer(analyzer = lambda x :x , min_df = 10) matrix = vectorizer.fit_transform([x.words for x in X_train]) tfidf = dict(zip(vectorizer.get_feature_names() , vectorizer.idf_)) print 'vocab size : ',len(tfidf) def word_vector(tokens , size = 200): vec = np.zeros(size).reshape(1 , size) count = 0 for word in tokens: try: vec += w2v_load[word].reshape(1,size)*tfidf[word] count += 1 except KeyError: # Handle when token not id corpus continue if count != 0: vec /= count return vec # Normalizing the data from sklearn.preprocessing import scale size = 200 train_vecs_w2v = np.concatenate([word_vector(z ,size) for z in tqdm(map(lambda x : x.words , X_train))]) train_vecs_w2v = scale(train_vecs_w2v) test_vecs_w2v = np.concatenate([word_vector(z,size) for z in tqdm(map(lambda x:x.words , X_test))]) test_vecs_w2v = scale(test_vecs_w2v) # Classifier using Feed Forward Neual Network import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable learning_rate = 0.001 num_epochs = 60 D_in , H , D_out = 200 , 32 ,1 class net(nn.Module): def __init__(self): super(net , self).__init__() self.l1 = nn.Linear(200, 32) self.relu = nn.ReLU() self.l2 = nn.Linear(32 , 1) def forward(self , x): x = self.relu(self.l1(x)) x = self.l2(x) x = F.sigmoid(x) return x criterion = nn.CrossEntropyLoss() # optimizer = torch.optim.RMSprop(net.parameters() ,lr = learning_rate) inputs = torch.autograd.Variable(torch.from_numpy(train_vecs_w2v).float()) targets = torch.autograd.Variable(torch.from_numpy(Y_train).float() , requires_grad=False) type(test_vecs_w2v) net = net() criterion = nn.MSELoss() optimizer = torch.optim.RMSprop(net.parameters() ,lr = learning_rate) for epoch in range(num_epochs): optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs , targets) loss.backward() optimizer.step() if(epoch+1)%5 ==0: print('Epoch [%d/%d] , Loss : %.4f'%(epoch+1 , num_epochs , loss.data[0])) torch.save(net.state_dict() , '/home/padam/Documents/git/Saachi/sentiment/model/base_model')
StarcoderdataPython
51864
<gh_stars>1-10 class BaseCloudController: pass
StarcoderdataPython
3350300
<reponame>esra-sengul/hazelcast-python-client import typing from hazelcast.protocol.codec import ( transactional_multi_map_get_codec, transactional_multi_map_put_codec, transactional_multi_map_remove_codec, transactional_multi_map_remove_entry_codec, transactional_multi_map_size_codec, transactional_multi_map_value_count_codec, ) from hazelcast.proxy.base import TransactionalProxy from hazelcast.types import KeyType, ValueType from hazelcast.util import check_not_none, thread_id, ImmutableLazyDataList class TransactionalMultiMap(TransactionalProxy, typing.Generic[KeyType, ValueType]): """Transactional implementation of :class:`~hazelcast.proxy.multi_map.MultiMap`. """ def put(self, key: KeyType, value: ValueType) -> bool: """Transactional implementation of :func:`MultiMap.put(key, value) <hazelcast.proxy.multi_map.MultiMap.put>` Args: key: The key to be stored. value: The value to be stored. Returns: ``True`` if the size of the multimap is increased, ``False`` if the multimap already contains the key-value tuple. """ check_not_none(key, "key can't be none") check_not_none(value, "value can't be none") key_data = self._to_data(key) value_data = self._to_data(value) request = transactional_multi_map_put_codec.encode_request( self.name, self.transaction.id, thread_id(), key_data, value_data ) return self._invoke(request, transactional_multi_map_put_codec.decode_response) def get(self, key: KeyType) -> typing.Optional[typing.List[ValueType]]: """Transactional implementation of :func:`MultiMap.get(key) <hazelcast.proxy.multi_map.MultiMap.get>` Args: key: The key whose associated values are returned. Returns: The collection of the values associated with the key. """ check_not_none(key, "key can't be none") def handler(message): return ImmutableLazyDataList( transactional_multi_map_get_codec.decode_response(message), self._to_object ) key_data = self._to_data(key) request = transactional_multi_map_get_codec.encode_request( self.name, self.transaction.id, thread_id(), key_data ) return self._invoke(request, handler) def remove(self, key: KeyType, value: ValueType) -> bool: """Transactional implementation of :func:`MultiMap.remove(key, value) <hazelcast.proxy.multi_map.MultiMap.remove>` Args: key: The key of the entry to remove. value: The value of the entry to remove. Returns: ``True`` if the item is removed, ``False`` otherwise. """ check_not_none(key, "key can't be none") check_not_none(value, "value can't be none") key_data = self._to_data(key) value_data = self._to_data(value) request = transactional_multi_map_remove_entry_codec.encode_request( self.name, self.transaction.id, thread_id(), key_data, value_data ) return self._invoke(request, transactional_multi_map_remove_entry_codec.decode_response) def remove_all(self, key: KeyType) -> typing.List[ValueType]: """Transactional implementation of :func:`MultiMap.remove_all(key) <hazelcast.proxy.multi_map.MultiMap.remove_all>` Args: key: The key of the entries to remove. Returns: The collection of the values associated with the key. """ check_not_none(key, "key can't be none") def handler(message): return ImmutableLazyDataList( transactional_multi_map_remove_codec.decode_response(message), self._to_object ) key_data = self._to_data(key) request = transactional_multi_map_remove_codec.encode_request( self.name, self.transaction.id, thread_id(), key_data ) return self._invoke(request, handler) def value_count(self, key: KeyType) -> int: """Transactional implementation of :func:`MultiMap.value_count(key) <hazelcast.proxy.multi_map.MultiMap.value_count>` Args: key: The key whose number of values is to be returned. Returns: The number of values matching the given key in the multimap. """ check_not_none(key, "key can't be none") key_data = self._to_data(key) request = transactional_multi_map_value_count_codec.encode_request( self.name, self.transaction.id, thread_id(), key_data ) return self._invoke(request, transactional_multi_map_value_count_codec.decode_response) def size(self) -> int: """Transactional implementation of :func:`MultiMap.size() <hazelcast.proxy.multi_map.MultiMap.size>` Returns: The number of key-value tuples in the multimap. """ request = transactional_multi_map_size_codec.encode_request( self.name, self.transaction.id, thread_id() ) return self._invoke(request, transactional_multi_map_size_codec.decode_response)
StarcoderdataPython
3345138
#!/usr/bin/python3 __author__ = "yang.dd" """ example 072 """ if __name__ == '__main__': num = [] for i in range(3): num.append(int(input("请输入一个数字:"))) print(num)
StarcoderdataPython
1675275
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. r""" Batch Knowledge Gradient (KG) via one-shot optimization as introduced in [Balandat2019botorch]_. For broader discussion of KG see also [Frazier2008knowledge]_, [Wu2016parallelkg]_. .. [Balandat2019botorch] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. BoTorch: Programmable Bayesian Optimziation in PyTorch. ArXiv 2019. .. [Frazier2008knowledge] <NAME>, <NAME>, and <NAME>. A Knowledge-Gradient policy for sequential information collection. SIAM Journal on Control and Optimization, 2008. .. [Wu2016parallelkg] <NAME> and <NAME>. The parallel knowledge gradient method for batch bayesian optimization. NIPS 2016. """ from copy import deepcopy from typing import Callable, Optional, Tuple, Union import torch from torch import Tensor from .. import settings from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import match_batch_shape, t_batch_mode_transform from .acquisition import AcquisitionFunction, OneShotAcquisitionFunction from .analytic import PosteriorMean from .cost_aware import CostAwareUtility from .monte_carlo import MCAcquisitionFunction, qSimpleRegret from .objective import AcquisitionObjective, MCAcquisitionObjective, ScalarizedObjective class qKnowledgeGradient(MCAcquisitionFunction, OneShotAcquisitionFunction): r"""Batch Knowledge Gradient using one-shot optimization. This computes the batch Knowledge Gradient using fantasies for the outer expectation and either the model posterior mean or MC-sampling for the inner expectation. In addition to the design variables, the input `X` also includes variables for the optimal designs for each of the fantasy models. For a fixed number of fantasies, all parts of `X` can be optimized in a "one-shot" fashion. """ def __init__( self, model: Model, num_fantasies: Optional[int] = 64, sampler: Optional[MCSampler] = None, objective: Optional[AcquisitionObjective] = None, inner_sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, current_value: Optional[Tensor] = None, ) -> None: r"""q-Knowledge Gradient (one-shot optimization). Args: model: A fitted model. Must support fantasizing. num_fantasies: The number of fantasy points to use. More fantasy points result in a better approximation, at the expense of memory and wall time. Unused if `sampler` is specified. sampler: The sampler used to sample fantasy observations. Optional if `num_fantasies` is specified. objective: The objective under which the samples are evaluated. If `None` or a ScalarizedObjective, then the analytic posterior mean is used, otherwise the objective is MC-evaluated (using inner_sampler). inner_sampler: The sampler used for inner sampling. Ignored if the objective is `None` or a ScalarizedObjective. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. current_value: The current value, i.e. the expected best objective given the observed points `D`. If omitted, forward will not return the actual KG value, but the expected best objective given the data set `D u X`. """ if sampler is None: if num_fantasies is None: raise ValueError( "Must specify `num_fantasies` if no `sampler` is provided." ) # base samples should be fixed for joint optimization over X, X_fantasies sampler = SobolQMCNormalSampler( num_samples=num_fantasies, resample=False, collapse_batch_dims=True ) elif num_fantasies is not None: if sampler.sample_shape != torch.Size([num_fantasies]): raise ValueError( f"The sampler shape must match num_fantasies={num_fantasies}." ) else: num_fantasies = sampler.sample_shape[0] super(MCAcquisitionFunction, self).__init__(model=model) # if not explicitly specified, we use the posterior mean for linear objs if isinstance(objective, MCAcquisitionObjective) and inner_sampler is None: inner_sampler = SobolQMCNormalSampler( num_samples=128, resample=False, collapse_batch_dims=True ) if objective is None and model.num_outputs != 1: raise UnsupportedError( "Must specify an objective when using a multi-output model." ) self.sampler = sampler self.objective = objective self.set_X_pending(X_pending) self.inner_sampler = inner_sampler self.num_fantasies = num_fantasies self.current_value = current_value @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qKnowledgeGradient on the candidate set `X`. Args: X: A `b x (q + num_fantasies) x d` Tensor with `b` t-batches of `q + num_fantasies` design points each. We split this X tensor into two parts in the `q` dimension (`dim=-2`). The first `q` are the q-batch of design points and the last num_fantasies are the current solutions of the inner optimization problem. `X_fantasies = X[..., -num_fantasies:, :]` `X_fantasies.shape = b x num_fantasies x d` `X_actual = X[..., :-num_fantasies, :]` `X_actual.shape = b x q x d` Returns: A Tensor of shape `b`. For t-batch b, the q-KG value of the design `X_actual[b]` is averaged across the fantasy models, where `X_fantasies[b, i]` is chosen as the final selection for the `i`-th fantasy model. NOTE: If `current_value` is not provided, then this is not the true KG value of `X_actual[b]`, and `X_fantasies[b, : ]` must be maximized at fixed `X_actual[b]`. """ X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=self.num_fantasies) # We only concatenate X_pending into the X part after splitting if self.X_pending is not None: X_actual = torch.cat( [X_actual, match_batch_shape(self.X_pending, X_actual)], dim=-2 ) # construct the fantasy model of shape `num_fantasies x b` fantasy_model = self.model.fantasize( X=X_actual, sampler=self.sampler, observation_noise=True ) # get the value function value_function = _get_value_function( model=fantasy_model, objective=self.objective, sampler=self.inner_sampler ) # make sure to propagate gradients to the fantasy model train inputs with settings.propagate_grads(True): values = value_function(X=X_fantasies) # num_fantasies x b if self.current_value is not None: values = values - self.current_value # return average over the fantasy samples return values.mean(dim=0) def get_augmented_q_batch_size(self, q: int) -> int: r"""Get augmented q batch size for one-shot optimzation. Args: q: The number of candidates to consider jointly. Returns: The augmented size for one-shot optimzation (including variables parameterizing the fantasy solutions). """ return q + self.num_fantasies def extract_candidates(self, X_full: Tensor) -> Tensor: r"""We only return X as the set of candidates post-optimization. Args: X_full: A `b x (q + num_fantasies) x d`-dim Tensor with `b` t-batches of `q + num_fantasies` design points each. Returns: A `b x q x d`-dim Tensor with `b` t-batches of `q` design points each. """ return X_full[..., : -self.num_fantasies, :] class qMultiFidelityKnowledgeGradient(qKnowledgeGradient): r"""Batch Knowledge Gradient for multi-fidelity optimization. A version of `qKnowledgeGradient` that supports multi-fidelity optimization via a `CostAwareUtility` and the `project` and `expand` operators. If none of these are set, this acquisition function reduces to `qKnowledgeGradient`. """ def __init__( self, model: Model, num_fantasies: Optional[int] = 64, sampler: Optional[MCSampler] = None, objective: Optional[AcquisitionObjective] = None, inner_sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, current_value: Optional[Tensor] = None, cost_aware_utility: Optional[CostAwareUtility] = None, project: Callable[[Tensor], Tensor] = lambda X: X, expand: Callable[[Tensor], Tensor] = lambda X: X, ) -> None: r"""Multi-Fidelity q-Knowledge Gradient (one-shot optimization). Args: model: A fitted model. Must support fantasizing. num_fantasies: The number of fantasy points to use. More fantasy points result in a better approximation, at the expense of memory and wall time. Unused if `sampler` is specified. sampler: The sampler used to sample fantasy observations. Optional if `num_fantasies` is specified. objective: The objective under which the samples are evaluated. If `None` or a ScalarizedObjective, then the analytic posterior mean is used, otherwise the objective is MC-evaluated (using inner_sampler). inner_sampler: The sampler used for inner sampling. Ignored if the objective is `None` or a ScalarizedObjective. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. current_value: The current value, i.e. the expected best objective given the observed points `D`. If omitted, forward will not return the actual KG value, but the expected best objective given the data set `D u X`. cost_aware_utility: A CostAwareUtility computing the cost-transformed utility from a candidate set and samples of increases in utility. project: A callable mapping a `batch_shape x q x d` tensor of design points to a tensor of the same shape projected to the desired target set (e.g. the target fidelities in case of multi-fidelity optimization). expand: A callable mapping a `batch_shape x q x d` input tensor to a `batch_shape x (q + q_e)' x d`-dim output tensor, where the `q_e` additional points in each q-batch correspond to additional ("trace") observations. """ if current_value is None and cost_aware_utility is not None: raise UnsupportedError( "Cost-aware KG requires current_value to be specified." ) super().__init__( model=model, num_fantasies=num_fantasies, sampler=sampler, objective=objective, inner_sampler=inner_sampler, X_pending=X_pending, current_value=current_value, ) self.cost_aware_utility = cost_aware_utility self.project = project self.expand = expand self._cost_sampler = None @property def cost_sampler(self): if self._cost_sampler is None: # Note: Using the deepcopy here is essential. Removing this poses a # problem if the base model and the cost model have a different number # of outputs or test points (this would be caused by expand), as this # would trigger re-sampling the base samples in the fantasy sampler. # By cloning the sampler here, the right thing will happen if the # the sizes are compatible, if they are not this will result in # samples being drawn using different base samples, but it will at # least avoid changing state of the fantasy sampler. self._cost_sampler = deepcopy(self.sampler) return self._cost_sampler @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qMultiFidelityKnowledgeGradient on the candidate set `X`. Args: X: A `b x (q + num_fantasies) x d` Tensor with `b` t-batches of `q + num_fantasies` design points each. We split this X tensor into two parts in the `q` dimension (`dim=-2`). The first `q` are the q-batch of design points and the last num_fantasies are the current solutions of the inner optimization problem. `X_fantasies = X[..., -num_fantasies:, :]` `X_fantasies.shape = b x num_fantasies x d` `X_actual = X[..., :-num_fantasies, :]` `X_actual.shape = b x q x d` In addition, `X` may be augmented with fidelity parameteres as part of thee `d`-dimension. Projecting fidelities to the target fidelity is handled by `project`. Returns: A Tensor of shape `b`. For t-batch b, the q-KG value of the design `X_actual[b]` is averaged across the fantasy models, where `X_fantasies[b, i]` is chosen as the final selection for the `i`-th fantasy model. NOTE: If `current_value` is not provided, then this is not the true KG value of `X_actual[b]`, and `X_fantasies[b, : ]` must be maximized at fixed `X_actual[b]`. """ X_actual, X_fantasies = _split_fantasy_points(X=X, n_f=self.num_fantasies) # We only concatenate X_pending into the X part after splitting if self.X_pending is not None: X_eval = torch.cat( [X_actual, match_batch_shape(self.X_pending, X_actual)], dim=-2 ) else: X_eval = X_actual # construct the fantasy model of shape `num_fantasies x b` # expand X (to potentially add trace observations) fantasy_model = self.model.fantasize( X=self.expand(X_eval), sampler=self.sampler, observation_noise=True ) # get the value function value_function = _get_value_function( model=fantasy_model, objective=self.objective, sampler=self.inner_sampler ) # make sure to propagate gradients to the fantasy model train inputs # project the fantasy points with settings.propagate_grads(True): values = value_function(X=self.project(X_fantasies)) # num_fantasies x b if self.current_value is not None: values = values - self.current_value if self.cost_aware_utility is not None: values = self.cost_aware_utility( X=X_actual, deltas=values, sampler=self.cost_sampler ) # return average over the fantasy samples return values.mean(dim=0) def _get_value_function( model: Model, objective: Optional[Union[MCAcquisitionObjective, ScalarizedObjective]] = None, sampler: Optional[MCSampler] = None, ) -> AcquisitionFunction: r"""Construct value function (i.e. inner acquisition function).""" if isinstance(objective, MCAcquisitionObjective): return qSimpleRegret(model=model, sampler=sampler, objective=objective) else: return PosteriorMean(model=model, objective=objective) def _split_fantasy_points(X: Tensor, n_f: int) -> Tuple[Tensor, Tensor]: r"""Split a one-shot optimization input into actual and fantasy points Args: X: A `batch_shape x (q + n_f) x d`-dim tensor of actual and fantasy points Returns: 2-element tuple containing - A `batch_shape x q x d`-dim tensor `X_actual` of input candidates. - A `n_f x batch_shape x 1 x d`-dim tensor `X_fantasies` of fantasy points, where `X_fantasies[i, batch_idx]` is the i-th fantasy point associated with the batch indexed by `batch_idx`. """ if n_f > X.size(-2): raise ValueError( f"n_f ({n_f}) must be less than the q-batch dimension of X ({X.size(-2)})" ) split_sizes = [X.size(-2) - n_f, n_f] X_actual, X_fantasies = torch.split(X, split_sizes, dim=-2) # X_fantasies is b x num_fantasies x d, needs to be num_fantasies x b x 1 x d # for batch mode evaluation with batch shape num_fantasies x b. # b x num_fantasies x d --> num_fantasies x b x d X_fantasies = X_fantasies.permute(-2, *range(X_fantasies.dim() - 2), -1) # num_fantasies x b x 1 x d X_fantasies = X_fantasies.unsqueeze(dim=-2) return X_actual, X_fantasies
StarcoderdataPython