seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19242121480 | import numpy as np
import torch
import cv2
import time
import copy
from trackron.structures.tracklet import Tracklet, BaseTrack, TrackState
from trackron.data.utils import resize_image, sample_target_brpadding, normalize_boxes
from .utils.matching import ious, iou_distance, linear_assignment, fuse_score
from .build import TRACKER_REGISTRY
from .siamese_tracker import SiameseTracker
from .base_tracker import PublicPostProcess, PostProcess
from .utils.kalman_filter import KalmanFilter
from .utils.preprocessing import numpy_to_torch
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(
mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(
multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_num):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(
self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_num == 1:
self.is_activated = True
# self.is_activated = True
self.frame_num = frame_num
self.start_frame = frame_num
def re_activate(self, new_track, frame_num, new_id=False):
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_num = frame_num
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
def update(self, new_track, frame_num):
"""
Update a matched track
:type new_track: STrack
:type frame_num: int
:type update_feature: bool
:return:
"""
self.frame_num = frame_num
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
@property
# @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame,
self.end_frame)
@TRACKER_REGISTRY.register()
class ByteTracker(SiameseTracker):
def setup(self):
### MOT parameters
super().setup()
self.num_classes = self.cfg.MODEL.NUM_CLASS
self.max_time_lost = 30
self.max_age = 30
self.score_thresh = self.det_thresh + 0.1
self.resize = (800, 1440)
self.post_process = PostProcess(image_sz=self.resize, box_fmt='xyxy', box_absolute=True, uni_scale=True)
def reset_mot(self, image):
self.tracked_stracks = []
self.lost_stracks = []
self.removed_stracks = []
self.kalman_filter = KalmanFilter()
self.tracks_dict = dict()
self.tracks = list()
self.unmatched_tracks = list()
self.ori_image_szs = torch.tensor([image.shape[:2]],
dtype=torch.float32,
device=self.device)
# self.resize = get_size_with_aspect_ratio(image.shape[:2])[::-1]
# self.resize = (image.shape[1], image.shape[0])
def init_mot(self, image, info: dict, category=None) -> dict:
# Time initialization
tic = time.time()
self.reset_mot(image)
self.category = category
return self.track_mot(image, info)
def track_mot(self, image, info):
rz_image = resize_image(image, self.resize)
im_tensor = numpy_to_torch(rz_image).to(self.device) / 255.0
mask_tensor = torch.zeros((1, *im_tensor.shape[-2:]),
dtype=torch.bool,
device=self.device)
if self.use_pub_detection:
### not supported currently
public_detections = torch.tensor(info['init_detections']).to(self.device)
public_detections[:, :4] = normalize_boxes(public_detections[:, :4],
image.shape[:2],
in_format='xywh',
out_format='cxcywh')
self.ref_info['public_detections'] = public_detections.unsqueeze(
0) ### batch dim
det_boxes = self.ref_info['public_detections'][..., :4]
det_logits = self.ref_info['public_detections'][..., 4]
with torch.no_grad():
outputs = self.net.track_mot(im_tensor)
results = self.post_process(outputs, self.ori_image_szs, self.category)
res_track = self.update_mot(results[0])
return res_track, results[0]
def update_mot(self, output_results, use_embedding=False):
scores = output_results["scores"].cpu().numpy()
bboxes = output_results["boxes"].cpu().numpy() # x1y1x2y2
remain_inds = scores > self.det_thresh
inds_low = scores > 0.1
inds_high = scores < self.det_thresh
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
if len(dets) > 0:
detections = [
STrack(STrack.tlbr_to_tlwh(box), score)
for box, score in zip(dets, scores_keep)
]
else:
detections = []
''' Add newly detected stracks to tracked_stracks'''
unconfirmed = []
tracked_stracks = []
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with high score detection boxes'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
STrack.multi_predict(strack_pool)
dists = iou_distance(strack_pool, detections)
dists = fuse_score(dists, detections)
matches, u_track, u_detection = linear_assignment(dists, thresh=0.9)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_num)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_num, new_id=False)
refind_stracks.append(track)
''' Step 3: Second association, with low score detection boxes'''
# association the untrack to the low score detections
if len(dets_second) > 0:
'''Detections'''
detections_second = [
STrack(STrack.tlbr_to_tlwh(tlbr), s)
for (tlbr, s) in zip(dets_second, scores_second)
]
else:
detections_second = []
r_tracked_stracks = [
strack_pool[i]
for i in u_track
if strack_pool[i].state == TrackState.Tracked
]
dists = iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_num)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_num, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = iou_distance(unconfirmed, detections)
dists = fuse_score(dists, detections)
matches, u_unconfirmed, u_detection = linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_num)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.score_thresh:
continue
track.activate(self.kalman_filter, self.frame_num)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_num - track.end_frame > self.max_age:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [
t for t in self.tracked_stracks if t.state == TrackState.Tracked
]
self.tracked_stracks = joint_stracks(self.tracked_stracks,
activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(
self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
# output_stracks = [
# track for track in self.tracked_stracks if track.is_activated
# ]
output_stracks = []
for track in self.tracked_stracks:
if track.is_activated:
track_obj = {
'tracking_id': track.track_id,
'bbox': track.tlbr,
'score': track.score,
'active': 1.0,
}
output_stracks.append(track_obj)
return output_stracks
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = list(), list()
for p, q in zip(*pairs):
timep = stracksa[p].frame_num - stracksa[p].start_frame
timeq = stracksb[q].frame_num - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if not i in dupa]
resb = [t for i, t in enumerate(stracksb) if not i in dupb]
return resa, resb
| Flowerfan/Trackron | trackron/trackers/bytetracker.py | bytetracker.py | py | 12,904 | python | en | code | 46 | github-code | 13 |
28109666320 | import torch
import math
# from idr import connection
from torchvision.datasets.vision import VisionDataset
from torchvision.transforms import ToTensor
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
from torchvision.datasets.folder import default_loader
from PIL import Image
from torchvision import transforms
def collate_none(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def get_test_image(dataset):
test_img = next(item for item in dataset if item is not None)
return test_img.unsqueeze(0)
def make_size(im, size=[2048, 2048]):
if list(im.size) < list(size):
image = pad_to(im, size)
else:
image = crop_to(im, size)
return image
def pad_to(im, size=[2048, 2048]):
left = int(size[0] / 2 - im.size[0] / 2)
top = int(size[1] / 2 - im.size[1] / 2)
image = Image.new(im.mode, size, 0)
image.paste(im, (left, top))
return image
def crop_to(im, size=[2048, 2048]):
left = int(size[0] / 2 - im.size[0] / 2)
upper = int(size[1] / 2 - im.size[1] / 2)
right = left + size[0]
lower = upper + size[1]
image = im.crop((left, upper, right, lower))
return image
def to_img(x):
x = 0.5 * (x + 1)
x = x.clamp(0, 1)
x = x.view(x.size(0), 1, 28, 28)
return x
def is_image_cropped(image):
if (
np.sum(
np.sum(image[:, 0]),
np.sum(image[:, -1]),
np.sum(image[0, :]),
np.sum(image[-1, :]),
)
== 0
):
return False
else:
return True | BioImage-Archive/ai_data | bia_vision/utils.py | utils.py | py | 1,699 | python | en | code | 0 | github-code | 13 |
40672541985 | import glob
import json
import logging
import os
import random
import sys
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, ConcatDataset
from tqdm import tqdm
from transformers import (
WEIGHTS_NAME,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
# BertConfig,
# BertForSequenceClassification,
BertTokenizer,
# DistilBertConfig,
# DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
# RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
# XLNetForSequenceClassification,
XLNetTokenizer,
)
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from src.transformers.configuration_distilbert import DistilBertConfig
from src.transformers.modeling_distilbert import DistilBertForSequenceClassification
sys.path.append("../../")
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from src.general import acc, f1_macro, precision_macro, recall_macro, create_dir
from src.glue.run_glue import compute_metrics, train
from src.metrics import uncertainty_metrics
from src.temperature_scaling import tune_temperature
from src.transformers.configuration_bert import BertConfig
from src.transformers.modeling_bert import BertForSequenceClassification
from src.transformers.processors import processors, output_modes, convert_examples_to_features
from sys_config import CACHE_DIR, DATA_DIR, CKPT_DIR, RES_DIR
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
# ALL_MODELS = sum(
# (
# tuple(conf.pretrained_config_archive_map.keys())
# for conf in (
# BertConfig,
# XLNetConfig,
# XLMConfig,
# RobertaConfig,
# DistilBertConfig,
# AlbertConfig,
# XLMRobertaConfig,
# FlaubertConfig,
# )
# ),
# (),
# )
# todo add more models
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
# "xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
# "xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
# "roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
# "albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
# "xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
# "flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def get_glue_dataset(args, data_dir, task, model_type, evaluate=False, test=False, contrast=False, ood=False):
"""
Loads original glue dataset.
:param data_dir: path ../data/[task]
:param task: glue task ("cola", "mnli", "mnli-mm", "mrpc", "sst-2", "sts-b", "qqp", "qnli", "rte", "wnli",
"ag_news", "dbpedia", "trec-6")
:param model_type: the type of the model we use (e.g. 'bert')
:param train: if True return dev set
:param evaluate: if True return dev set
:param test: if True return test set
:param test: if True return contrast set
:return:
"""
create_dir(data_dir)
processor = processors[task]()
output_mode = output_modes[task]
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir,
use_fast=args.use_fast_tokenizer,
)
# Dataset
# Load data features from cache or dataset file
if test:
filename = "cached_{}_{}_original".format("test_contrast", str(task)) if contrast else "cached_{}_{}_original".format("test", str(task))
if ood: filename += '_ood'
cached_dataset = os.path.join(
data_dir,
filename
)
else:
if evaluate and contrast:
filename = "cached_{}_{}_original".format("dev_contrast", str(task))
else:
filename = "cached_{}_{}_original".format("dev" if evaluate else "train", str(task))
cached_dataset = os.path.join(
data_dir,
filename,
)
if os.path.exists(cached_dataset):
logger.info("Loading dataset from cached file %s", cached_dataset)
dataset = torch.load(cached_dataset)
else:
logger.info("Creating dataset from dataset file at %s", data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
if test:
if ood:
examples = (
processor.get_test_examples_ood(data_dir)
)
else:
examples = (
processor.get_contrast_examples("test") if contrast else processor.get_test_examples(data_dir)
)
else:
if evaluate and contrast:
examples = (
processor.get_contrast_examples("dev")
)
else:
examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if task in ['sst-2', 'cola', 'ag_news', 'dbpedia', 'trec-6', 'imdb']:
X = [x.text_a for x in examples]
elif task in ['mrpc', 'mnli', 'qnli', 'rte', 'qqp', 'wnli']:
X = list(zip([x.text_a for x in examples], [x.text_b for x in examples]))
# elif task == 'mnli':
# X = list(zip([x.text_a for x in examples], [x.text_b for x in examples]))
else:
print(task)
NotImplementedError
y = [x.label for x in examples]
dataset = [X, y]
logger.info("Saving dataset into cached file %s", cached_dataset)
torch.save(dataset, cached_dataset)
# Save Tensor Dataset
if test:
filename = "test_contrast" if contrast else "test"
if ood: filename += '_ood'
features_dataset = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}_original".format(
# "test",
filename,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
else:
filename = "dev" if evaluate else "train"
if contrast: filename += "_contrast"
features_dataset = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}_original".format(
# "dev" if evaluate else "train",
filename,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
torch.save(features, features_dataset)
return dataset
def get_glue_tensor_dataset(X_inds, args, task, tokenizer, train=False,
evaluate=False, test=False, augm=False, X_orig=None, X_augm=None, y_augm=None,
augm_features=None, dpool=False,
contrast=False, contrast_ori=False,
ood=False, data_dir=None):
"""
Load tensor dataset (not original/raw).
:param X_inds: list of indices to keep in the dataset (if None keep all)
:param args: args
:param task: task name
:param tokenizer: tokenizer
:param train: if True train dataset
:param evaluate: if True dev dataset
:param test: if True test dataset
:param augm: if True augmented dataset
:param X: augmented text (inputs)
:param y: augmented labels (original if augmentation of labeled data) if unlabeled ?
:return:
"""
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if data_dir is None: data_dir = args.data_dir
processor = processors[task.lower()]()
output_mode = output_modes[task.lower()]
# Load data features from cache or dataset file
if test:
prefix = "test"
elif evaluate:
prefix="dev"
elif train:
prefix="train"
elif augm:
prefix="augm"
else:
prefix="???"
if contrast: prefix += "_contrast"
if contrast_ori: prefix += "_contrast_ori"
if ood: prefix += "_ood"
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}_original".format(
prefix,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file): #and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
if X_inds is not None:
logger.info("Selecting subsample...")
features = list(np.array(features)[X_inds])
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
if test:
if ood:
examples = (processor.get_test_examples(data_dir, ood))
else:
examples = (processor.get_contrast_examples("test", contrast_ori) if (contrast or contrast_ori) else processor.get_test_examples(data_dir))
elif evaluate:
examples = (processor.get_contrast_examples("dev") if contrast else processor.get_dev_examples(args.data_dir))
elif train:
examples = (processor.get_train_examples(args.data_dir))
elif augm:
if dpool:
augm_examples = (processor.get_augm_examples(X_augm, y_augm))
features = convert_examples_to_features(
augm_examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
else:
if X_orig is None: # DA for supervised learning
examples = (processor.get_augm_examples(X_augm, y_augm))
else: # DA for semi-supervised learning (consistency loss)
orig_examples = (processor.get_train_examples(args.data_dir))
orig_examples = [np.array(orig_examples)[i] for i in X_inds]
augm_examples = (processor.get_augm_examples(X_augm, y_augm))
assert len(orig_examples)==len(augm_examples), "orig len {}, augm len {}".format(len(orig_examples),
len(augm_examples))
all_input_ids = []
all_attention_mask = []
all_token_type_ids = []
all_labels = []
for examples in [orig_examples, augm_examples]:
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
all_input_ids.append(torch.tensor([f.input_ids for f in features], dtype=torch.long))
all_attention_mask.append(torch.tensor([f.attention_mask for f in features], dtype=torch.long))
all_token_type_ids.append(torch.tensor([f.token_type_ids for f in features], dtype=torch.long))
if output_mode == "classification":
all_labels.append(torch.tensor([f.label for f in features], dtype=torch.long))
elif output_mode == "regression":
all_labels.append(torch.tensor([f.label for f in features], dtype=torch.float))
dataset = TensorDataset(all_input_ids[0], all_attention_mask[0], all_token_type_ids[0],
all_input_ids[1], all_attention_mask[1], all_token_type_ids[1],
all_labels[0])
return dataset
################################################################
if X_inds is not None:
examples = list(np.array(examples)[X_inds])
################################################################
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
if augm_features is not None:
# append train + augmented features (for DA supervised learning)
features = features + augm_features
if augm and augm_features is None:
# return augmented features (to later append with trainset for DA supervised learning)
return features
else:
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if features[0].token_type_ids is not None:
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
if features[0].token_type_ids is not None:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
if X_inds is not None and augm_features is None:
assert len(dataset) == len(X_inds), 'dataset {}, X_inds {}'.format(len(dataset), len(X_inds))
return dataset
def my_evaluate(eval_dataset, args, model, prefix="", mc_samples=None,
return_bert_embs=False):
"""
Evaluate model using 'eval_dataset'.
:param eval_dataset: tensor dataset
:param args:
:param model:
:param prefix: -
:param al_test: if True then eval_dataset is Dpool
:param mc_samples: if not None, int with number of MC forward samples
:return:
"""
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
# eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Sequential sampler - crucial!!!
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
bert_output_list = None
if mc_samples is not None:
# MC dropout
test_losses = []
logits_list = []
for i in range(1, mc_samples + 1):
test_losses_mc = []
logits_mc = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.train()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[-1]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
logits_mc = logits
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
logits_mc = torch.cat((logits_mc, logits), 0)
test_losses_mc.append(eval_loss / nb_eval_steps)
# logits_mc.append(logits)
test_losses.append(test_losses_mc)
logits_list.append(logits_mc)
preds = None
eval_loss = np.mean(test_losses)
logits = logits_list
preds = torch.mean(torch.stack(logits), 0).detach().cpu().numpy()
else:
# Standard inference (no MC dropout)
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[-1]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
labels = inputs.pop("labels",None)
if return_bert_embs:
bert_output = model.bert(**inputs)[1]
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
# out_label_ids = inputs["labels"].detach().cpu().numpy()
out_label_ids = labels.detach().cpu().numpy()
if return_bert_embs:
# bert_output_list = bert_output.detach().cpu().numpy()
bert_output_list = bert_output
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)
if return_bert_embs:
# bert_output_list = np.append(bert_output_list, bert_output.detach().cpu().numpy(), axis=0)
bert_output_list = torch.cat((bert_output_list, bert_output),0)
eval_loss = eval_loss / nb_eval_steps
logits = torch.tensor(preds)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
# accuracy = round(acc(out_label_ids, preds), 4)
# f1 = round(f1_macro(out_label_ids, preds), 4)
# precision = round(precision_macro(out_label_ids, preds), 4)
# recall = round(recall_macro(out_label_ids, preds), 4)
# calibration scores
calibration_scores = uncertainty_metrics(logits, out_label_ids,
num_classes=args.num_classes)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
calibration_scores = {}
accuracy, f1, precision, recall = 0., 0., 0., 0.,
calibration_scores = None
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
# results.update({'f1_macro': f1, 'recall': recall, 'precision': precision, 'loss': eval_loss})
results.update(calibration_scores)
results.update({'bert_output': bert_output_list})
results.update({'gold_labels': out_label_ids.tolist()})
results['loss'] = eval_loss
# return results, eval_loss, accuracy, f1, precision, recall, logits
return results, logits
def train_transformer(args, train_dataset, eval_dataset, model, tokenizer):
"""
Train a transformer model.
:param args: args
:param train_dataset: train (tensor) dataset
:param eval_dataset: dev (tensor) dataset
:param model: model to train
:param tokenizer: tokenizer
:return:
"""
# 10% warmup
args.warmup_steps = int(len(train_dataset) / args.per_gpu_train_batch_size * args.num_train_epochs / 10)
if hasattr(args, "warmup_thr"):
if args.warmup_thr is not None:
args.warmup_steps = min(int(len(train_dataset) / args.per_gpu_train_batch_size * args.num_train_epochs / 10), args.warmup_thr)
print("warmup steps: {}".format(args.warmup_steps))
print("total steps: {}".format(int(len(train_dataset) / args.per_gpu_train_batch_size * args.num_train_epochs)))
print("logging steps: {}".format(args.logging_steps))
##############################
# Train model
##############################
_, model_class, _ = MODEL_CLASSES[args.model_type]
global_step, tr_loss, val_acc, val_loss = train(args, train_dataset, eval_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# AYTO GIATI TO KANW?
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
# checkpoints = [args.output_dir]
checkpoints = [args.current_output_dir]
# if args.eval_all_checkpoints:
# checkpoints = list(
# # os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
# os.path.dirname(c) for c in
# sorted(glob.glob(args.current_output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
# )
# logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, logits = my_evaluate(eval_dataset, args, model, prefix=prefix)
# result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
# results.update(result)
eval_loss = val_loss
return model, tr_loss, eval_loss, result
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
##########################################################################
# Setup args
##########################################################################
parser.add_argument("--local_rank",
type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--no_cuda", action="store_true",
help="Avoid using CUDA when available")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
##########################################################################
# Model args
##########################################################################
parser.add_argument("--model_type", default="bert", type=str, help="Pretrained model")
parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str, help="Pretrained ckpt")
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_fast_tokenizer",
default=True,
type=bool,
help="Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.",
)
parser.add_argument(
"--do_lower_case", action="store_true",
default=False,
help="Set this flag if you are using an uncased model.",
)
# parser.add_argument("--tapt", default=None, type=str,
# help="ckpt of tapt model")
##########################################################################
# Training args
##########################################################################
parser.add_argument("--do_train", default=True, type=bool, help="If true do train")
parser.add_argument("--do_eval", default=True, type=bool, help="If true do evaluation")
parser.add_argument("--overwrite_output_dir", default=True, type=bool, help="If true do evaluation")
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--num_train_epochs", default=5.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=0, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=1e-5, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("-seed", "--seed", required=False, type=int, help="seed")
parser.add_argument("--indicator", required=False,
default=None,
type=str,
help="experiment indicator")
parser.add_argument("-patience", "--patience", required=False, type=int, help="patience for early stopping (steps)")
parser.add_argument("--use_adapter", required=False, type=bool,
default=False,
help="if True finetune model with added adapter layers")
parser.add_argument("--use_bayes_adapter", required=False, type=bool,
default=False,
help="if True finetune model with added Bayes adapter layers")
parser.add_argument("--unfreeze_adapters", required=False, type=bool,
default=False,
help="if True add adapters and fine-tune all model")
##########################################################################
# Data args
##########################################################################
parser.add_argument("--dataset_name", default=None, required=True, type=str,
help="Dataset [mrpc, ag_news, qnli, sst-2, trec-6]")
parser.add_argument("--data_dir", default=None, required=False, type=str,
help="Datasets folder")
# parser.add_argument("--task_name", default=None, type=str, help="Task [MRPC, AG_NEWS, QNLI, SST-2]")
parser.add_argument("--max_seq_length", default=256, type=int, help="Max sequence length")
##########################################################################
# Uncertainty estimation args
##########################################################################
parser.add_argument("--unc_method",
default="vanilla",
type=str,
help="Choose uncertainty estimation method from "
"[vanilla, mc, ensemble, temp_scale, bayes_adapt, bayes_top]"
)
parser.add_argument("--test_all_uncertainty", required=False, type=bool, default=True,
help=" if True evaluate [vanilla, mc_3, mc_5, mc_10, mc_20, temp_scaling] "
"uncertainty methods for the model")
parser.add_argument("--bayes_output", required=False, type=bool, default=False,
help=" if True add Bayesian classification layer (UA)")
##########################################################################
# Server args
##########################################################################
parser.add_argument("-g", "--gpu", required=False,
default='0', help="gpu on which this experiment runs")
parser.add_argument("-server", "--server", required=True,
default='ford', help="server on which this experiment runs")
args = parser.parse_args()
# Setup
if args.server is 'ford':
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print("\nThis experiment runs on gpu {}...\n".format(args.gpu))
args.n_gpu = 1
args.device = torch.device('cuda:{}'.format(args.gpu))
else:
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = 0 if args.no_cuda else 1
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
print('device: {}'.format(args.device))
#########################################
# Setup args
#########################################
if args.seed == None:
seed = random.randint(1, 9999)
args.seed = seed
args.task_name = args.dataset_name.upper()
args.cache_dir = CACHE_DIR
if args.data_dir is None:
args.data_dir = os.path.join(DATA_DIR, args.task_name)
if args.dataset_name == 'cola': args.data_dir = os.path.join(DATA_DIR, "CoLA")
args.overwrite_cache = True
args.evaluate_during_training = True
# Output dir
output_dir = os.path.join(CKPT_DIR, '{}_{}'.format(args.dataset_name, args.model_type))
args.output_dir = os.path.join(output_dir, 'all_{}'.format(args.seed))
if args.use_adapter: args.output_dir += '-adapter'
if args.use_bayes_adapter: args.output_dir += '-bayes-adapter'
if args.indicator is not None: args.output_dir += '-{}'.format(args.indicator)
if args.patience is not None: args.output_dir += '-early{}'.format(int(args.num_train_epochs))
if args.bayes_output: args.output_dir += '-bayes-output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: args.output_dir += '-unfreeze'
args.current_output_dir = args.output_dir
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
#########################################
# Setup logging
#########################################
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
args.device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
#########################################
# Prepare GLUE task
#########################################
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
use_adapter=args.use_adapter,
use_bayes_adapter=args.use_bayes_adapter,
adapter_initializer_range=0.0002 if args.indicator=='identity_init' else 1,
bayes_output=args.bayes_output,
unfreeze_adapters=args.unfreeze_adapters
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
#########################################
# Check if experiment already done
#########################################
create_dir(RES_DIR)
path = os.path.join(RES_DIR, '{}_{}_100%'.format(args.task_name, args.model_type))
create_dir(path)
name = 'seed_{}_lr_{}_bs_{}_epochs_{}'.format(args.seed, args.learning_rate,
args.per_gpu_train_batch_size,
int(args.num_train_epochs))
# if args.use_adapter: name += '_adapters'
if args.indicator is not None: name += '_{}'.format(args.indicator)
print(name)
dirname = os.path.join(path, name)
if os.path.isdir(dirname) and os.listdir(dirname):
json_file = 'temp_scale_results.json' if not args.bayes_output else 'temp_scale_results_bayes_output.json'
if os.path.isfile(os.path.join(dirname, json_file)):
print('Experiment done!')
exit()
create_dir(dirname)
#########################################
# Load (raw) dataset
#########################################
X_train, y_train = get_glue_dataset(args, args.data_dir, args.task_name, args.model_type, evaluate=False)
X_val, y_val = get_glue_dataset(args, args.data_dir, args.task_name, args.model_type, evaluate=True)
X_test, y_test = get_glue_dataset(args, args.data_dir, args.task_name, args.model_type, test=True)
# test_dataset_ood = get_glue_tensor_dataset(None, args, 'twitterppdb', tokenizer, test=True,
# data_dir=os.path.join(DATA_DIR, 'TwitterPPDB'))
X_orig = X_train # original train set
y_orig = y_train # original labels
X_inds = list(np.arange(len(X_orig))) # indices to original train set
X_unlab_inds = [] # indices of ulabeled set to original train set
args.binary = True if len(set(y_train)) == 2 else False
args.num_classes = len(set(y_train))
# The following code is in case we want to undersample the dataset to evaluate uncertainty
# in low (data) resource scenarios
args.undersampling = False
if args.indicator is not None:
# Undersample training dataset (stratified sampling)
if "sample_" in args.indicator:
args.undersampling = True
num_to_sample = int(args.indicator.split("_")[1])
X_train_orig_after_sampling_inds, X_train_orig_remaining_inds, _, _ = train_test_split(
X_inds,
y_orig,
train_size=num_to_sample,
random_state=args.seed,
stratify=y_train)
X_inds = X_train_orig_after_sampling_inds # indices of train set to original train set
# X_train = list(np.array(X_train)[X_inds]) # train set
# y_train = list(np.array(y_train)[X_inds]) # labels
# Treat the rest of training data as unlabeled data
X_unlab_inds = X_train_orig_remaining_inds # indices of ulabeled set to original train set
assert len(X_unlab_inds) + len(X_inds) == len(X_orig)
assert bool(not (set(X_unlab_inds) & set(X_inds)))
assert max(X_inds) < len(X_orig)
if X_unlab_inds != []:
assert max(X_unlab_inds) < len(X_orig)
#########################################
# Load (tensor) dataset
#########################################
train_dataset = get_glue_tensor_dataset(X_inds, args, args.task_name, tokenizer, train=True)
assert len(train_dataset) == len(X_inds)
eval_dataset = get_glue_tensor_dataset(None, args, args.task_name, tokenizer, evaluate=True)
test_dataset = get_glue_tensor_dataset(None, args, args.task_name, tokenizer, test=True)
if args.dataset_name == 'mnli':
test_dataset_ood = get_glue_tensor_dataset(None, args, args.task_name, tokenizer, test=True, ood=True)
#######################
# Train setup
#######################
# select after how many steps will evaluate during training so that we will evaluate at least 5 times in one epoch
minibatch = int(len(X_inds) / (args.per_gpu_train_batch_size * max(1, args.n_gpu)))
args.logging_steps = min(int(minibatch / 5), 500)
if args.logging_steps < 1:
args.logging_steps = 1
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
#######################
# Train
#######################
model, tr_loss, val_loss, val_results = train_transformer(args, train_dataset, eval_dataset, model, tokenizer)
#######################
# Test uncertainty
#######################
print('Evaluate uncertainty on dev & test sets....')
if args.test_all_uncertainty:
# Vanilla
print('Evaluate vanilla....')
vanilla_results_val, vanilla_val_logits = my_evaluate(eval_dataset, args, model, mc_samples=None)
vanilla_results_test, vanilla_test_logits = my_evaluate(test_dataset, args, model, mc_samples=None)
vanilla_results = {"val_results": vanilla_results_val, "test_results": vanilla_results_test}
filename = 'vanilla_results'
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}.json'.format(filename)), 'w') as f:
json.dump(vanilla_results, f)
# Monte Carlo dropout
for m in [3,5,10,20]:
print('Evaluate MC dropout (N={})....'.format(m))
mc_results_val, _ = my_evaluate(eval_dataset, args, model, mc_samples=m)
mc_results_test, _ = my_evaluate(test_dataset, args, model, mc_samples=m)
mc_results = {"val_results": mc_results_val, "test_results": mc_results_test}
filename = 'mc{}_results'.format(m)
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}.json'.format(filename)), 'w') as f:
json.dump(mc_results, f)
# Temperature Scaling
print('Evaluate temperature scaling....')
temp_model = tune_temperature(eval_dataset, args, model, return_model_temp=True)
temp_scores_val = temp_model.temp_scale_metrics(args.task_name, vanilla_val_logits,
vanilla_results_val['gold_labels'])
temp_scores_test = temp_model.temp_scale_metrics(args.task_name, vanilla_test_logits,
vanilla_results_test['gold_labels'])
temp_scores_val['temperature'] = float(temp_model.temperature)
temp_scores = {"val_results": temp_scores_val, "test_results": temp_scores_test}
filename = 'temp_scale_results'
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}.json'.format(filename)), 'w') as f:
json.dump(temp_scores, f)
#######################
# Test uncertainty OOD
#######################
if args.dataset_name == 'mnli':
test_dataset_ood = get_glue_tensor_dataset(None, args, args.task_name, tokenizer, test=True, ood=True)
elif args.dataset_name == 'qqp':
# test_dataset_ood = get_glue_tensor_dataset(None, args, 'mrpc', tokenizer, test=True, data_dir=os.path.join(DATA_DIR, 'MRPC'))
test_dataset_ood = get_glue_tensor_dataset(None, args, 'twitterppdb', tokenizer, test=True, data_dir=os.path.join(DATA_DIR, 'TwitterPPDB'))
elif args.dataset_name == 'mrpc':
test_dataset_ood = get_glue_tensor_dataset(None, args, 'qqp', tokenizer, test=True,
data_dir=os.path.join(DATA_DIR, 'QQP'))
elif args.dataset_name == 'sst-2':
test_dataset_ood = get_glue_tensor_dataset(None, args, 'imdb', tokenizer, test=True,
data_dir=os.path.join(DATA_DIR, 'IMDB'))
elif args.dataset_name == 'imdb':
test_dataset_ood = get_glue_tensor_dataset(None, args, 'sst-2', tokenizer, test=True,
data_dir=os.path.join(DATA_DIR, 'SST-2'))
elif args.dataset_name == 'rte':
test_dataset_ood = get_glue_tensor_dataset(None, args, 'qnli', tokenizer, test=True,
data_dir=os.path.join(DATA_DIR, 'QNLI'))
elif args.dataset_name == 'qnli':
test_dataset_ood = get_glue_tensor_dataset(None, args, 'rte', tokenizer, test=True,
data_dir=os.path.join(DATA_DIR, 'RTE'))
else:
# return
raise NotImplementedError
print('Evaluate uncertainty on dev & test sets....')
if args.test_all_uncertainty:
# Vanilla
print('Evaluate OOD....')
vanilla_ood_results, vanilla_ood_logits = my_evaluate(test_dataset_ood, args, model, mc_samples=None)
vanilla_results = {"test_ood_results": vanilla_ood_results}
filename = 'vanilla_results'
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}_ood.json'.format(filename)), 'w') as f:
json.dump(vanilla_results, f)
# Monte Carlo dropout
for m in [3, 5, 10, 20]:
print('Evaluate MC dropout (N={})....'.format(m))
mc_ood_results, _ = my_evaluate(test_dataset_ood, args, model, mc_samples=m)
mc_results = {"test_ood_results": mc_ood_results}
filename = 'mc{}_results'.format(m)
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}_ood.json'.format(filename)), 'w') as f:
json.dump(mc_results, f)
# Temperature Scaling
print('Evaluate temperature scaling....')
temperature = temp_scores_val['temperature']
temp_model = tune_temperature(test_dataset_ood, args, model, return_model_temp=True)
temp_ood_scores = temp_model.temp_scale_metrics(args.task_name, vanilla_ood_logits,
vanilla_ood_results['gold_labels'],
temperature=temperature)
temp_scores = {"test_ood_results": temp_ood_scores}
filename = 'temp_scale_results'
if args.use_adapter: filename += '_adapter'
if args.use_bayes_adapter: filename += '_bayes_adapter'
if args.bayes_output: filename += '_bayes_output'
if (args.use_adapter or args.bayes_output) and args.unfreeze_adapters: filename += '_unfreeze'
with open(os.path.join(dirname, '{}_ood.json'.format(filename)), 'w') as f:
json.dump(temp_scores, f)
| mourga/transformer-uncertainty | main_transformer.py | main_transformer.py | py | 52,365 | python | en | code | 37 | github-code | 13 |
6999449973 | import logging
import time
from enum import Enum
from typing import Any, Dict, List, Iterable, Iterator, Optional
from typing import Union, Tuple
import requests
from requests import HTTPError
from requests.auth import HTTPBasicAuth
Auth = Union[requests.auth.AuthBase, Tuple[str, str]]
Verify = Union[bool, str]
LOGGER = logging.getLogger(__name__)
class JsonClient:
"""A wrapper for a requests session for JSON formatted requests.
This client handles appending endpoints on to a common hostname,
deserialising the response as JSON and raising an exception when an error
HTTP code is received.
"""
def __init__(
self, url: str, auth: Auth = None, verify: Verify = True
) -> None:
self.url = url
self.session = requests.Session()
if auth is not None:
self.session.auth = auth
self.session.verify = verify
def close(self) -> None:
self.session.close()
def get(self, endpoint: str = "") -> dict:
return self._request("GET", endpoint)
def post(self, endpoint: str, data: dict = None) -> dict:
return self._request("POST", endpoint, data)
def delete(self, endpoint: str = "") -> dict:
return self._request("DELETE", endpoint)
def my_raise_for_status(self, response):
msg = response.text
content = response.content
status_code = response.status_code
url = response.url
http_error_msg = ''
if isinstance(content, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = content.decode('utf-8')
except UnicodeDecodeError:
reason = content.decode('iso-8859-1')
else:
reason = content
if 400 <= status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s >>msg : %s >>reason : %s' % (
status_code, reason, url, msg, reason)
elif 500 <= status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s >>msg : %s >>reason : %s' % (
status_code, reason, url, msg, reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=response)
def _request(self, method: str, endpoint: str, data: dict = None) -> dict:
url = self.url.rstrip("/") + endpoint
response = self.session.request(method, url, json=data)
self.my_raise_for_status(response)
return response.json()
class LivyBatches(object):
def __init__(
self, url: str, auth: Auth = None, verify: Verify = True
) -> None:
self._client = JsonClient(url, auth, verify)
def close(self) -> None:
"""Close the underlying requests session."""
self._client.close()
def post_batches(self, data):
response = self._client.post(
f"/batches", data=data
)
LOGGER.info('post_batches response : {} '.format(response))
batchId = response['id']
state = response['state']
return batchId, state
def get_batches(self, batchId):
response = self._client.get(
f"/batches/{batchId}"
)
state = response['state']
return state
def get_batches_response(self, batchId):
"""
get_batches response : {'id': 240, 'name': 'com.akulaku.spur.hivewithjdbc.entrance.JDBC2Hive#wanglong#1602470783.557297', 'owner': 'huhh', 'proxyUser': 'hive', 'state': 'running', 'appId': 'application_1599449904081_2743', 'appInfo': {'driverLogUrl': 'http://cdh108.akulaku.com:8042/node/containerlogs/container_1599449904081_2743_01_000001/hive', 'sparkUiUrl': 'http://cdh109.akulaku.com:8088/proxy/application_1599449904081_2743/'}, 'log': ['\t queue: root.users.hive', '\t start time: 1602470802467', '\t final status: UNDEFINED', '\t tracking URL: http://cdh109.akulaku.com:8088/proxy/application_1599449904081_2743/', '\t user: hive', '20/10/12 02:46:42 INFO util.ShutdownHookManager: Shutdown hook called', '20/10/12 02:46:42 INFO util.ShutdownHookManager: Deleting directory /tmp/spark-32bdc131-9462-4f2b-a0f6-afca0fd6c0d7', '20/10/12 02:46:42 INFO util.ShutdownHookManager: Deleting directory /tmp/spark-a67fa5fb-d725-497d-94a9-9e486769cfbd', '\nstderr: ', '\nYARN Diagnostics: ']}
:param batchId:
:return:
"""
response = self._client.get(
f"/batches/{batchId}"
)
return response
def get_batches_state(self, batchId):
response = self._client.get(
f"/batches/{batchId}/state"
)
batches_id = response['id']
state = response['state']
def get_batches_log(self, batchId):
response = self._client.get(
f"/batches/{batchId}/log"
)
batches_id = response['id']
offset = response['from']
total = response['total']
log = response['log']
return log
def delete_batches(self, batchId):
self._client.delete(f"/batches/{batchId}")
def polling_intervals(
start: Iterable[float], rest: float, max_duration: float = None
) -> Iterator[float]:
def _intervals():
yield from start
while True:
yield rest
cumulative = 0.0
for interval in _intervals():
cumulative += interval
if max_duration is not None and cumulative > max_duration:
break
yield interval
class BatchesState(Enum):
WAITING = "waiting"
RUNNING = "running"
AVAILABLE = "available"
ERROR = "error"
CANCELLING = "cancelling"
CANCELLED = "cancelled"
SUCCESS = "success"
DEAD = "dead"
class Batches(object):
def __init__(
self,
url: str,
auth: Auth = None,
verify: Verify = True,
proxy_user: str = None,
jars: List[str] = None,
py_files: List[str] = None,
files: List[str] = None,
driver_memory: str = None,
driver_cores: int = None,
executor_memory: str = None,
executor_cores: int = None,
num_executors: int = None,
archives: List[str] = None,
queue: str = None,
name: str = None,
spark_conf: Dict[str, Any] = None,
echo: bool = True,
check: bool = True,
file: str = None,
className: str = None,
args: List[str] = None,
) -> None:
self.client = LivyBatches(url, auth, verify=verify)
self.proxy_user = proxy_user
self.jars = jars
self.py_files = py_files
self.files = files
self.driver_memory = driver_memory
self.driver_cores = driver_cores
self.executor_memory = executor_memory
self.executor_cores = executor_cores
self.num_executors = num_executors
self.archives = archives
self.queue = queue
self.name = name
self.spark_conf = spark_conf
self.echo = echo
self.check = check
self.batchId: Optional[int] = None
self.file = file
self.className = className
self.args = args
def __enter__(self) -> "Batches":
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def close(self) -> None:
"""Kill the managed Spark session."""
if self.batchId is not None:
self.client.delete_batches(self.batchId)
self.client.close()
def run(self,
proxy_user: str = None,
jars: List[str] = None,
py_files: List[str] = None,
files: List[str] = None,
driver_memory: str = None,
driver_cores: int = None,
executor_memory: str = None,
executor_cores: int = None,
num_executors: int = None,
archives: List[str] = None,
queue: str = None,
name: str = None,
spark_conf: Dict[str, Any] = None,
file: str = None,
className: str = None,
args: List[str] = None,
):
"""Run some code in the managed Spark session.
:param code: The code to run.
"""
# 初始化时的参数设置
body = {
"file": self.file,
"className": self.className,
"name": self.name,
}
if self.args is not None:
body["args"] = self.args
if self.proxy_user is not None:
body["proxyUser"] = self.proxy_user
if self.jars is not None:
body["jars"] = self.jars
if self.py_files is not None:
body["pyFiles"] = self.py_files
if self.files is not None:
body["files"] = self.files
if self.driver_memory is not None:
body["driverMemory"] = self.driver_memory
if self.driver_cores is not None:
body["driverCores"] = self.driver_cores
if self.executor_memory is not None:
body["executorMemory"] = self.executor_memory
if self.executor_cores is not None:
body["executorCores"] = self.executor_cores
if self.num_executors is not None:
body["numExecutors"] = self.num_executors
if self.archives is not None:
body["archives"] = self.archives
if self.queue is not None:
body["queue"] = self.queue
if self.spark_conf is not None:
body["conf"] = self.spark_conf
# 运行时的参数设置
if name is not None:
body["name"] = name
if file is not None:
body["file"] = file
if className is not None:
body["className"] = className
if args is not None:
body["args"] = args
if proxy_user is not None:
body["proxyUser"] = proxy_user
if jars is not None:
body["jars"] = jars
if py_files is not None:
body["pyFiles"] = py_files
if files is not None:
body["files"] = files
if driver_memory is not None:
body["driverMemory"] = driver_memory
if driver_cores is not None:
body["driverCores"] = driver_cores
if executor_memory is not None:
body["executorMemory"] = executor_memory
if executor_cores is not None:
body["executorCores"] = executor_cores
if num_executors is not None:
body["numExecutors"] = num_executors
if archives is not None:
body["archives"] = archives
if queue is not None:
body["queue"] = queue
if spark_conf is not None:
body["conf"] = spark_conf
app_state, app_log = self._execute(body)
return app_state, app_log
def _execute(self, body):
batchId, state = self.client.post_batches(body)
LOGGER.info('batchId : {} '.format(batchId))
LOGGER.info('state : {} '.format(state))
self.batchId = batchId
intervals = polling_intervals([1, 2, 3, 5, 8], 10)
while state not in [BatchesState.SUCCESS.value, BatchesState.DEAD.value]:
time.sleep(next(intervals))
state = self.client.get_batches(self.batchId)
LOGGER.info('state : {} '.format(state))
if state in [BatchesState.SUCCESS.value]:
app_state = True
else:
app_state = False
app_log = self.client.get_batches_log(self.batchId)
return app_state, app_log
def post_batches(self,
proxy_user: str = None,
jars: List[str] = None,
py_files: List[str] = None,
files: List[str] = None,
driver_memory: str = None,
driver_cores: int = None,
executor_memory: str = None,
executor_cores: int = None,
num_executors: int = None,
archives: List[str] = None,
queue: str = None,
name: str = None,
spark_conf: Dict[str, Any] = None,
file: str = None,
className: str = None,
args: List[str] = None,
):
"""Run some code in the managed Spark session.
:param code: The code to run.
"""
# 初始化时的参数设置
body = {
"file": self.file,
"className": self.className,
"name": self.name,
}
if self.args is not None:
body["args"] = self.args
if self.proxy_user is not None:
body["proxyUser"] = self.proxy_user
if self.jars is not None:
body["jars"] = self.jars
if self.py_files is not None:
body["pyFiles"] = self.py_files
if self.files is not None:
body["files"] = self.files
if self.driver_memory is not None:
body["driverMemory"] = self.driver_memory
if self.driver_cores is not None:
body["driverCores"] = self.driver_cores
if self.executor_memory is not None:
body["executorMemory"] = self.executor_memory
if self.executor_cores is not None:
body["executorCores"] = self.executor_cores
if self.num_executors is not None:
body["numExecutors"] = self.num_executors
if self.archives is not None:
body["archives"] = self.archives
if self.queue is not None:
body["queue"] = self.queue
if self.spark_conf is not None:
body["conf"] = self.spark_conf
# 运行时的参数设置
if name is not None:
body["name"] = name
if file is not None:
body["file"] = file
if className is not None:
body["className"] = className
if args is not None:
body["args"] = args
if proxy_user is not None:
body["proxyUser"] = proxy_user
if jars is not None:
body["jars"] = jars
if py_files is not None:
body["pyFiles"] = py_files
if files is not None:
body["files"] = files
if driver_memory is not None:
body["driverMemory"] = driver_memory
if driver_cores is not None:
body["driverCores"] = driver_cores
if executor_memory is not None:
body["executorMemory"] = executor_memory
if executor_cores is not None:
body["executorCores"] = executor_cores
if num_executors is not None:
body["numExecutors"] = num_executors
if archives is not None:
body["archives"] = archives
if queue is not None:
body["queue"] = queue
if spark_conf is not None:
body["conf"] = spark_conf
batchId, state = self.client.post_batches(body)
LOGGER.info('batchId : {} '.format(batchId))
LOGGER.info('state : {} '.format(state))
return batchId, state
def get_batches(self, batchId):
return self.client.get_batches_response(batchId)
def get_batches_log(self, batchId):
return self.client.get_batches_log(batchId)
def delete_stop_task(self, batchId):
return self.client.delete_batches(batchId)
| itnoobzzy/EasyAirflow | plugins/hooks/LivyBatches.py | LivyBatches.py | py | 15,663 | python | en | code | 0 | github-code | 13 |
17592791124 | # This is 0 1 Knapsack Using recurssion and Top Down Approach
'''
Example :
Input :
n = 3
W = 4
val[] = {1,2,3}
wt[] = {4,5,1}
Output : 3
'''
def knapSack_Top_Down(self,W, wt, val, n):
t=[ [ 0 for i in range(W+1) ] for j in range(len(wt)+1) ]
for i in range(1,len(t)):
for j in range(1,len(t[0])):
if i==0 or j==0:
t[i][j]=0
if j>=wt[i-1]:
t[i][j]=max( val[i-1]+t[i-1][j-wt[i-1]],t[i-1][j] )
else:
t[i][j]=t[i-1][j]
return t[len(wt)][W]
def knapSack_Recurssion(self,W, wt, val, n):
if n==0 or W==0:
return 0
if wt[n-1]<=W:
return max(val[n-1]+self.knapSack(W-wt[n-1],wt,val,n-1) , self.knapSack(W,wt,val,n-1) )
else:
return self.knapSack(W,wt,val,n-1) | Mukesh-kanna/python-content-repo | knapsack_0_1.py | knapsack_0_1.py | py | 830 | python | en | code | 0 | github-code | 13 |
28541742109 | import time
import board
import busio
import adafruit_tcs34725
# 初始化I2C对象
I2C = busio.I2C(board.SCL, board.SDA)
# 创建一个tcs34725对象
tcs34725 = adafruit_tcs34725.TCS34725(I2C)
# 打印读取到的范围
def tcs34725_detect():
while True:
# 读取传感器的颜色、色温和照度
color = tcs34725.color_rgb_bytes
temp = tcs34725.color_temperature
lux = tcs34725.lux
print('颜色: {0}, {1}, {2}'.format(*color)) # RGB格式
print('色温: {0}K'.format(temp))
print('照度: {0}'.format(lux))
time.sleep(1)
if __name__ == '__main__':
try:
tcs34725_detect()
except KeyboardInterrupt:
print("程序结束!")
| cocpy/raspberrypi4 | 第8章/5/tcs34725.py | tcs34725.py | py | 729 | python | en | code | 0 | github-code | 13 |
5759056191 | import numpy as np
import matplotlib.pyplot as plt
class K_Mean_Algorithm:
def __init__(self, k, epochs):
self.k = k
self.epochs = epochs
def train(self, S_x, S_y):
# To define clusters, need to know the range
max_X, min_X = np.max(S_x), np.min(S_x)
max_Y, min_Y = np.max(S_y), np.min(S_y)
# Clusters
self.C_x = np.random.uniform(min_X,max_X, size=(self.k,1))
self.C_y = np.random.uniform(min_Y,max_Y, size=(self.k,1))
#print()
for m in range(1,self.epochs):
plt.figure()
plt.scatter(C_x, C_y, c = 'black')
# Error in obtaining Voronoi Cells
errors = np.zeros((self.k,1))
# Voronoi Cells
VoronoiCell = np.zeros((self.k,len(S_x),2))
abs_V = np.zeros((self.k,1))
for i in range(len(S_x)):
for j in range(self.k):
errors[j] = np.square(self.C_x[j] - S_x[i]) + np.square(self.C_y[j] - S_y[i])
ind = np.argmin(errors)
VoronoiCell[ind, i, :] = [S_x[i], S_y[i]]
# Calculating the new cluster centers
for i in range(self.k):
abs_V[i] = len(np.nonzero(VoronoiCell[i,:,0])[0])
if not abs_V[i] == 0:
self.C_x[i] = sum(VoronoiCell[i,:,0]) / abs_V[i]
self.C_y[i] = sum(VoronoiCell[i,:,1]) / abs_V[i]
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
for i in range(self.k):
plt.scatter(VoronoiCell[i,:,0],VoronoiCell[i,:,1], c = colors[i])
plt.legend(['Clusters'])
plt.show()
def predict(self,point):
X, Y = point[0], point[1]
error = np.zeros((self.k,1))
for i in range(self.k):
error[i] = np.square(self.C_x[j] - X) + np.square(self.C_y[j] - Y)
ind = np.argmin(error)
return [self.C_x[ind], self.C_y[ind]]
| SalihFurkan/KMeanAlgorithm | K_mean_Algorithm.py | K_mean_Algorithm.py | py | 1,700 | python | en | code | 0 | github-code | 13 |
70195468818 | import threading
class StoppableThread(threading.Thread):
def __init__(self, *args, **kwargs):
self._stop_event = kwargs.pop("stop_event", None)
if self._stop_event is None:
self._stop_event = threading.Event()
super(StoppableThread, self).__init__(*args, **kwargs)
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.isSet()
class CoordinatedThread(StoppableThread):
def __init__(self, coord, *args, **kwargs):
# Allow a tensorflow Coordinator object
if not callable(coord):
coord = coord.should_stop
self._coord = coord
super(CoordinatedThread, self).__init__(*args, **kwargs)
def stopped(self):
return self._coord() or super(CoordinatedThread, self).stopped()
class LoopThread(StoppableThread):
def __init__(self, iteration_func):
self._iteration_func = iteration_func
super(LoopThread, self).__init__(target=self._run)
def _run(self):
while not self.stopped():
self._iteration_func()
class CoordinatedLoopThread(CoordinatedThread):
def __init__(self, iteration_func, coord, *args, **kwargs):
self._iteration_func = iteration_func
super(CoordinatedLoopThread, self).__init__(coord, *args, **kwargs)
def _run(self):
while not self.stopped():
self._iteration_func()
global __profiling_stats__
global __profiling_stats_lock__
global __profiling_stats_filename__
__profiling_stats__ = None
__profiling_stats_lock__ = None
__profiling_stats_filename__ = None
def enable_thread_profiling(profiling_stats_filename):
"""Thanks to http://rjp.io/2013/05/16/thread-profiling-in-python/ for this function.
Monkey-patch Thread.run to enable global profiling.
Each thread creates a local profiler; statistics are pooled
to the global stats object on run completion."""
import cProfile
import pstats
global __profiling_stats_lock__
global __profiling_stats_filename__
__profiling_stats_lock__ = threading.Lock()
__profiling_stats_filename__ = profiling_stats_filename
threading.Thread.__original_run__ = threading.Thread.run
def run_with_profiling(self):
global __profiling_stats__
global __profiling_stats_lock__
global __profiling_stats_filename__
self._prof = cProfile.Profile()
self._prof.enable()
threading.Thread.__original_run__(self)
self._prof.disable()
with __profiling_stats_lock__:
if __profiling_stats__ is None:
__profiling_stats__ = pstats.Stats(self._prof)
else:
__profiling_stats__.add(self._prof)
if __profiling_stats_filename__ is not None:
__profiling_stats__.dump_stats(__profiling_stats_filename__)
threading.Thread.run = run_with_profiling
def disable_thread_profiling():
global __profiling_stats__
global __profiling_stats_lock__
if __profiling_stats__ is None:
raise RuntimeError("Thread profiling has not been enabled or no threads have finished running.")
threading.Thread.run = threading.Thread.__original_run__
del threading.Thread.__original_run__
__profiling_stats__ = None
__profiling_stats_lock__ = None
def get_thread_profiling_stats():
global __profiling_stats__
if __profiling_stats__ is None:
raise RuntimeError("Thread profiling has not been enabled or no threads have finished running.")
return __profiling_stats__
| bennihepp/pybh | pybh/thread_utils.py | thread_utils.py | py | 3,573 | python | en | code | 0 | github-code | 13 |
6366358576 | import math
from station_logic.train_station import TrainStation
from train_logic.train import Train
from train_logic.train_state import TrainState
class Entrepot(TrainStation):
""" Entrepot station where oil is unloaded """
def __init__(self,
station_name: str,
oil_volume: int,
tracks_num: int,
emptying_speed: int,
filling_speed: int,
storage_volume: int,
unload_limit: int):
"""
Parameters
----------
station_name
A name of the station. Must be unique
oil_volume
Initial oil amount in storage
tracks_num
Number of railway tracks
emptying_speed
Speed of station storage emptying
filling_speed
Speed of station storage filling
storage_volume
Maximum oil amount that station can store
unload_limit
Unloader train storage size
"""
super().__init__(station_name, oil_volume, tracks_num)
self._emptying_speed = emptying_speed
self._filling_speed = filling_speed
self._storage_volume = storage_volume
self._unload_limit = unload_limit
self._unloader_train = None
self._last_collected_oil_per_track = [None] * tracks_num
def get_info(self) -> dict:
""" Get entrepot condition info
Returns
-------
dict
<oil_amt> int: amount of oil in station storage
<tracks> list: list of tracks where elements consist of -
<train_name> str: name of train on the track. None if track is free
<oil_collected> int: amount of train's collected oil during the last step
<storage> int: amount of oil in train storage
"""
tracks_info = []
for i, track in enumerate(self._tracks):
elem = {'train_name': None, 'oil_collected': self._last_collected_oil_per_track[i], 'storage': None}
if track is not None:
elem['train_name'] = track.name
elem['storage'] = track.oil_volume
tracks_info.append(elem)
info = {'oil_amt': self._oil_volume,
'tracks': tracks_info}
return info
def __pre_simulate(self, train: Train) -> bool:
""" Preliminary simulation of train adding process to the track
Parameters
----------
train
Train for add simulation process
Returns
-------
bool
True if train can be added successfully, False otherwise
"""
# Calculating the total amount of oil (the amount of oil in trains + storage + arriving train)
# and the number of free railway tracks
sum_oil_volume = self._oil_volume + train.oil_volume
free_tracks_num = 0
for train in self._tracks:
if train is not None:
sum_oil_volume += train.oil_volume
else:
free_tracks_num += 1
can_add = True
# Check if there is a free track on the station
if free_tracks_num == 0:
can_add = False
# Checking that the total volume of oil does not exceed the storage capacity
elif sum_oil_volume <= self._storage_volume:
# We check that the incoming train will be able to unload without interfering with the unloading train
if sum_oil_volume >= self._unload_limit:
if self._unloader_train is None:
if free_tracks_num < 2:
can_add = False
else: # otherwise exceeds storage capacity
can_add = False
return can_add
def add_train_to_track(self, train: Train) -> bool:
""" Add a train to the track
Parameters
----------
train
Train to add
Returns
-------
bool
True if train was added successfully, False otherwise
"""
is_added = False
# Trying to add a train to the track by doing a presimulation
if self.__pre_simulate(train):
is_added = True
for i, track in enumerate(self._tracks):
# Looking for the first free track
if track is None:
# Put a train to this track
train.state = TrainState.In_cargo_process
self._tracks[i] = train
break
return is_added
def __unloader_train_adding_logic(self):
""" Logic of adding an unloader train to the station """
# Calculating the total amount of oil (the volume of oil in trains + storage)
# and the number of free railway tracks
sum_oil_volume = self._oil_volume
free_tracks_num = 0
for train in self._tracks:
if train is not None:
sum_oil_volume += train.oil_volume
else:
free_tracks_num += 1
# Checks that there is no unloader train and there is free space for it
if self._unloader_train is None and free_tracks_num > 0:
is_added = False
# Checks that the amount of oil in trains and station storages >=
# the volume of the storage of the unloader train
if sum_oil_volume >= self._unload_limit:
# Calculating the minimum storage filling speed
sum_speed = self._filling_speed - self._emptying_speed
if sum_speed < 0:
has_steps = self._oil_volume // abs(sum_speed)
need_steps = math.ceil(self._unload_limit / self._emptying_speed)
# Checks that there are enough steps to fill the storage for the required number
if has_steps >= need_steps:
is_added = True
else:
is_added = True
if is_added:
# Create an unloader train
unloader_train = create_unload_train(self._station_name, self._unload_limit)
for i, track in enumerate(self._tracks):
# Looking for the first free track
if track is None:
# Adding an unloader train
self._unloader_train = unloader_train
# Put unloader train to the track
self._tracks[i] = unloader_train
break
def __fill_storage(self):
""" Fill the station storage """
# Collecting the oil
collected_oil = 0
self._last_collected_oil_per_track = [None] * len(self._tracks)
for i, train in enumerate(self._tracks):
if train is not None:
# Checking if the train is an unloader train
if train == self._unloader_train:
# Loading oil into the unloader train
oil_amt = self._emptying_speed - train.fill_storage(self._emptying_speed)
collected_oil -= oil_amt
# Logging logic
self._last_collected_oil_per_track[i] = oil_amt
else:
# Unloading oil from train
oil_amt = train.empty_storage(self._filling_speed)
collected_oil += oil_amt
# Logging logic
self._last_collected_oil_per_track[i] = oil_amt
# Filling the storage with the collected value
self._oil_volume += collected_oil
def __send_trains(self):
""" Departure trains from the tracks """
for i, train in enumerate(self._tracks):
if train is not None:
# Checking if the train is an unloader train
if train == self._unloader_train:
# Is the train storage full
if train.is_full():
# Removing the train from the track
self._tracks[i] = None
# Removing the unloader train
self._unloader_train = None
else:
# Is the train storage empty
if train.is_empty():
# Update train state to "Ready"
train.state = TrainState.Ready
# Removing the train from the track
self._tracks[i] = None
def update(self):
# Add unloader train
self.__unloader_train_adding_logic()
# Loading/unloading trains and fill the station storage
self.__fill_storage()
# Trains departing
self.__send_trains()
def create_unload_train(station_name: str, storage_volume: int):
""" Creates an unloader train
Parameters
----------
station_name
Name of entrepot station
storage_volume
Returns
-------
Train
New unloader train with 0 oil volume
"""
train = Train(name='Разгрузочный',
load_station_name=station_name,
unload_station_name='',
velocity=0,
storage_volume=storage_volume,
state=TrainState.In_cargo_process)
return train
| Bumstern/train_simulator | station_logic/entrepot.py | entrepot.py | py | 9,413 | python | en | code | 0 | github-code | 13 |
2913477351 | import pytesseract
import cv2
from PIL import Image
import os
import logging
if os.name == 'nt':
TESSERACT_PATH = "C:/Users/soludev5/AppData/Local/Programs/Tesseract-OCR/tesseract.exe" # <------ /!\ CHANGE THIS /!\
pytesseract.pytesseract.tesseract_cmd = TESSERACT_PATH
NUMBER_OF_IMAGE_IN_CAPTCHA = 4
def extractText(textImg):
"""This function returns a string which represents the name of the image that the text in `textImg` is describing.
Parameters
----------
textImg : PIL.Image
a picture that contains text, which can be OCR-ed by tesseract.
Returns
-------
captchaImgTitle : str
a string representing the name of the image that `textImg` is describing.
"""
new_img = Image.new("RGB", textImg.size, (0, 0, 0))
new_img.paste(textImg, mask=textImg.split()[3]) # 3 is the alpha channel
# Perform text extraction
data = pytesseract.image_to_string(new_img, lang='eng')
# Format
data.strip()
sentence = data[:-3]
logging.info(sentence)
# format string
captchaImgTitle = sentence.split('onto')[0].split('Drag the')[-1].strip()
return captchaImgTitle
def solveCaptcha(textImg, captchaImg, collectionPath):
"""This function return an integer in the range [0,3]. This integer represents the ordinal position of the image described textually in `textImg`, found in `collectionPath` within `captchaImg`.
Parameters
----------
textImg : PIL.Image
a picture that contains text, which can be OCR-ed by tesseract.
captchaImg : numpy.ndarray
a picture that contains within itself 4 pictures. This function will search for the index of the image described in `textImg` within this image and return it.
Returns
-------
resultReturn : int
an integer representing the index of the image described in `textImg` within `captchaImg`.
"""
##############
# Read Text #
##############
captchaImgTitle = extractText(textImg)
# print string
logging.info(captchaImgTitle)
################################
# Search for img in collection #
################################
imgName = captchaImgTitle.lower().replace(' ', '_') + ".png"
assert os.path.isfile(collectionPath + imgName), "Image not found"
logging.info("Image found in collection :D")
#########################
# Detect img in Captcha #
#########################
method = cv2.TM_SQDIFF_NORMED
# Read the images from the file
small_image = cv2.imread(collectionPath + imgName)
large_image = captchaImg
result = cv2.matchTemplate(small_image, large_image, method)
# We want the minimum squared difference
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
# Extract the coordinates of our best match
MPx, MPy = mnLoc
# Get the size of the template. This is the same size as the match.
trows, tcols = small_image.shape[:2]
# Get the coordinates of the template center on large_image
centerPointx = MPx + int(tcols/2)
centerPointy = MPy + int(trows/2)
#################
# Return number #
#################
# Get the width of large_image
largeWidth = large_image.shape[1]
# Get the width of 1/N large_image
widthQuarter = largeWidth/NUMBER_OF_IMAGE_IN_CAPTCHA
# Check the location of the centerPointx
for i in range(0, NUMBER_OF_IMAGE_IN_CAPTCHA):
if centerPointx >= widthQuarter*i and centerPointx < widthQuarter*(i+1):
resultReturn = i
break
logging.info("img n°", resultReturn+1)
return resultReturn
| Xaalek/IkabotCaptchaSolver | SolveCaptcha.py | SolveCaptcha.py | py | 3,622 | python | en | code | 3 | github-code | 13 |
42166473750 | import time, pytest
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
import enums
class Test:
#================================================================================================================================
# @Author: Michal Zomper
# Test Name : Home page playlist
# Test description:
# The function create playlist in kms.
# in admin page the playlist is added to the playlist list of home page.
# then in kms admin page we check that the playlist name and entries are correct
#================================================================================================================================
testNum = "1573"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Fail"
driver = None
common = None
# Test variables
entryName1 = None
entryName2 = None
entryName3 = None
entryDescription = "Description"
entryTags = "Tags,"
filePath1 = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_4.png'
filePath2 = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_2.png'
filePath3 = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_3.png'
expectedQRCode1 = 4
expectedQRCode2 = 2
expectedQRCode3 = 3
playlistName = None
playlistID = None
playlistType = "Custom Playlist"
categoryList = [("Apps Automation Category")]
channelList = ""
categoryName = None
whereToPublishFrom = "Entry Page"
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
########################### TEST SETUP ###########################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)
self.common = Common(self.driver)
if localSettings.LOCAL_SETTINGS_IS_NEW_UI == True:
self.entryName1 = clsTestService.addGuidToString("Playlist 1", self.testNum)
self.entryName2 = clsTestService.addGuidToString("Playlist 2", self.testNum)
self.entryName3 = clsTestService.addGuidToString("Playlist 3", self.testNum)
elif localSettings.LOCAL_SETTINGS_IS_NEW_UI == False:
self.entryName1 = clsTestService.addGuidToString("1", self.testNum)
self.entryName2 = clsTestService.addGuidToString("2", self.testNum)
self.entryName3 = clsTestService.addGuidToString("3", self.testNum)
self.entriesList = [self.entryName3, self.entryName2, self.entryName1]
self.playlistName = clsTestService.addGuidToString("Home Page Playlist", self.testNum)
##################### TEST STEPS - MAIN FLOW #####################
writeToLog("INFO","Step 1: Going to upload entry number 1")
if self.common.upload.uploadEntry(self.filePath1, self.entryName1, self.entryDescription, self.entryTags) == None:
writeToLog("INFO","Step 1: FAILED failed to upload entry number 1")
return
writeToLog("INFO","Step 2: Going to upload entry number 2")
if self.common.upload.uploadEntry(self.filePath2, self.entryName2, self.entryDescription, self.entryTags) == None:
writeToLog("INFO","Step 2: FAILED failed to upload entry number 2")
return
writeToLog("INFO","Step 3: Going to upload entry number 3")
if self.common.upload.uploadEntry(self.filePath3, self.entryName3, self.entryDescription, self.entryTags) == None:
writeToLog("INFO","Step 3: FAILED failed to upload entry number 3")
return
writeToLog("INFO","Step 4: Going to create new playlist with entries")
if self.common.myPlaylists.addEntriesToPlaylist(self.entriesList, self.playlistName, True) == False:
writeToLog("INFO","Step 4: FAILED to create new playlist '" + self.playlistName + "'")
return
writeToLog("INFO","Step 5: Going to get playlist id")
self.playlistID = self.common.myPlaylists.getPlaylistID(self.playlistName)
if self.playlistID == False:
writeToLog("INFO","Step 5: FAILED to get playlist '" + self.playlistName + "' id")
return
writeToLog("INFO","Step 6: Going to set playlist in admin")
if self.common.admin.setPlaylistToHomePage(self.playlistName, self.playlistID , self.playlistType) == False:
writeToLog("INFO","Step 6: FAILED add playlist in admin")
return
writeToLog("INFO","Step 7: Going to navigate to home page")
if self.common.base.navigate(localSettings.LOCAL_SETTINGS_TEST_BASE_URL) == False:
writeToLog("INFO","Step 7: FAILED navigate to home page")
return
sleep(5)
writeToLog("INFO","Step 8: Going verify home page playlist name")
tmp_playlist_name = (self.common.home.HOME_PLAYLIST[0], self.common.home.HOME_PLAYLIST[1].replace('PLAYLIST', self.playlistName))
if self.common.base.is_visible(tmp_playlist_name) == False:
writeToLog("INFO","Step 8: FAILED to find and verify playlist name in home page: " + self.playlistName)
return
writeToLog("INFO","Step 9: Going to verify the left entry in the playlist")
if localSettings.LOCAL_SETTINGS_IS_NEW_UI == True:
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName3, self.expectedQRCode3, 8.21, 1.73, 3.91, 1.22) == False:
writeToLog("INFO","Step 9: FAILED to verify left entry '" + self.entryName3 + "' in playlist '" + self.playlistName + "'")
return
writeToLog("INFO","Step 10: Going to verify the middle entry in the playlist")
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName2, self.expectedQRCode2, 2.34, 1.73, 1.35, 1.22) == False:
writeToLog("INFO","Step 10: FAILED to verify middle entry '" + self.entryName2 + "' in playlist '" + self.playlistName + "'")
return
writeToLog("INFO","Step 11: Going to verify the right entry in the playlist")
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName1, self.expectedQRCode1, 1.71, 1.73, 1.15, 1.22) == False:
writeToLog("INFO","Step 11: FAILED to verify right entry '" + self.entryName1 + "' in playlist '" + self.playlistName + "'")
return
elif localSettings.LOCAL_SETTINGS_IS_NEW_UI == False:
sleep(3)
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName3, self.expectedQRCode3, 0.84, 1.15, 0.56, 0.54) == False:
writeToLog("INFO","Step 9: FAILED to verify left entry '" + self.entryName3 + "' in playlist '" + self.playlistName + "'")
return
sleep(2)
writeToLog("INFO","Step 10: Going to verify the middle entry in the playlist")
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName2, self.expectedQRCode2, 0.21, 0.48, 0.189, 0.35) == False:
writeToLog("INFO","Step 10: FAILED to verify middle entry '" + self.entryName2 + "' in playlist '" + self.playlistName + "'")
return
sleep(2)
writeToLog("INFO","Step 11: Going to verify the right entry in the playlist")
if self.common.home.verifyEntyNameAndThumbnailInHomePagePlaylist(self.entryName1, self.expectedQRCode1, 0.169, 0.48, 0.154, 0.35) == False:
writeToLog("INFO","Step 11: FAILED to verify right entry '" + self.entryName1 + "' in playlist '" + self.playlistName + "'")
return
##################################################################
self.status = "Pass"
writeToLog("INFO","TEST PASSED: 'Home Page Playlist' was done successfully")
# if an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
sleep(2)
self.common.myMedia.deleteEntriesFromMyMedia([self.entryName1, self.entryName2, self.entryName3], showAllEntries=True)
self.common.myPlaylists.deletePlaylist(self.playlistName)
self.common.admin.deletePlaylistFromHomePage(self.playlistName)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line') | NadyaDi/kms-automation | web/tests/HomePage/test_1573.py | test_1573.py | py | 10,161 | python | en | code | 0 | github-code | 13 |
29673427 | def fatorial(num):
calc = 1
cont = int(num)
for i in range(1, cont+1):
calc *= i*1
return calc
def super_fatorial(num):
cont = int(num)
superFatorial = 1
for c in range(0, cont+1):
superFatorial *= fatorial(c)
return superFatorial
num = input(' digite um numero: ')
if num.isdigit():
num = int(num)
print(super_fatorial(num))
else:
print('digite um numero válido!') | Sancheslipe/atividades_python_basico_ao_avancado_secao_08 | ex36.py | ex36.py | py | 435 | python | en | code | 0 | github-code | 13 |
6549130628 | # -*- coding: utf-8 -*-
# Extension by Lionel Chalet
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.errors import SphinxError
import os, sys, copy, hashlib, random
__version__ = '0.1'
question_number = 0
alternative_number = 0
language = 'en'
translations = {
'fr': {
'verify_title': u'Verifiez vos réponses',
'verify': u'Vérifier'
},
'en': {
'verify_title': 'Verify your answers',
'verify': 'Verify'
}
}
def setup(app):
app.add_config_value('mcq_nb_prop', -1, '')
app.add_config_value('mcq_nb_rows', 7, '')
app.add_config_value('mcq_upload_url', '', '')
app.add_config_value('mcq_inginious_url', '', '')
app.add_node(Question, html=(html_visit_question, html_depart), latex=(latex_visit_question, pass_visit))
app.add_node(Query, html=(html_visit_query, html_depart), latex=(pass_visit, pass_visit))
app.add_node(Positive, html=(html_visit_positive, html_depart_alternative), latex=(latex_visit_posneg, latex_depart_posneg))
app.add_node(Negative, html=(html_visit_negative, html_depart_alternative), latex=(latex_visit_posneg, latex_depart_posneg))
app.add_node(Textbox, html=(html_visit_textbox, html_depart), latex=(latex_visit_textbox, None))
app.add_node(Comment, html=(html_visit_comment, html_depart), latex=(skip_visit, None))
app.add_directive('question', QuestionDirective)
app.add_directive('positive', PositiveDirective)
app.add_directive('negative', NegativeDirective)
app.add_directive('textbox', TextboxDirective)
app.add_directive('comment', CommentDirective)
app.connect('builder-inited', add_dependencies)
app.connect('doctree-resolved', verify_structure)
app.connect('doctree-resolved', html_add_content)
app.connect('doctree-resolved', latex_add_content)
app.connect('doctree-resolved', latex_shuffle)
app.connect('doctree-resolved', epub_add_javascript)
class CopyableNode(nodes.General, nodes.Element):
def deepcopy(self):
"""
Nodes attributes aren't available with the LaTeX builder after the 'doctree-read' event
This is some kind of patch I suppose ...
"""
return copy.copy(self)
class Question(CopyableNode):
id = None
nb_pos = 1
nb_prop = -1
class Alternative(CopyableNode):
pass
class Query(Alternative):
pass
class Positive(Alternative):
pass
class Negative(Alternative):
pass
class Textbox(Alternative):
nb_rows = 7
class Comment(CopyableNode):
pass
def html_visit_question(self, node):
global question_number, alternative_number
question_number += 1
alternative_number = 0
classes = 'question'
if not node.id:
node.id = 'questionId' + str(question_number)
else:
classes += ' inginious'
self.body.append(self.starttag(node, 'div', CLASS=classes, IDS=[str(node.id)]))
self.body.append("<input type='hidden' class='nb_pos' value='" + str(node.nb_pos) + "' />")
self.body.append("<input type='hidden' class='nb_prop' value='" + str(node.nb_prop) + "' />")
def html_visit_query(self, node):
self.body.append(self.starttag(node, 'div', CLASS='query'))
def html_visit_positive(self, node):
global alternative_number
self.body.append(self.starttag(node, 'div', CLASS='positive', IDS=[str(alternative_number)]))
html_visit_alternative(self, node)
alternative_number += 1
def html_visit_negative(self, node):
global alternative_number
self.body.append(self.starttag(node, 'div', CLASS='negative', IDS=[str(alternative_number)]))
html_visit_alternative(self, node)
alternative_number += 1
def html_visit_alternative(self, node):
if node.parent.nb_pos > 1:
self.body.append("<input type='checkbox' class='choice' name='" + str(question_number) + "' />")
else:
self.body.append("<input type='radio' class='choice' name='" + str(question_number) + "' />")
self.body.append(self.starttag(node, 'div', CLASS='content'))
def html_visit_textbox(self, node):
self.body.append(self.starttag(node, 'div', CLASS='textbox'))
self.body.append('<textarea rows="' + str(node.nb_rows) + '" cols="65"></textarea>')
def html_visit_comment(self, node):
self.body.append(self.starttag(node, 'div', CLASS='comment', STYLE='display:none'))
def html_depart(self, node):
self.body.append('</div>')
def html_depart_alternative(self, node):
for x in range(2):
html_depart(self, node)
def skip_visit(self, node):
raise nodes.SkipNode
def pass_visit(self, node):
pass
def latex_visit_question(self, node):
pass
def latex_visit_posneg(self, node):
latex_visit_posneg.count += 1
self.body.append('\n\\needspace{3\\baselineskip}'
'\n%\\CheckBox[name=' + str(latex_visit_posneg.count) + ',bordercolor=0 0 0]{}'
'\n$\\square$'
'\n\\vspace{-0.7cm}'
'\n\\begin{addmargin}[0.8cm]{0cm}')
latex_visit_posneg.count = 0
def latex_depart_posneg(self, node):
self.body.append('\\end{addmargin}\n')
def latex_visit_textbox(self, node):
self.body.append('\n\TextFieldFill[multiline=true,height=' + str(node.nb_rows) + '\\baselineskip,bordercolor=0 0 0]{}')
raise nodes.SkipNode
class BaseDirective(Directive):
has_content = True
# This has to be replaced in subclasses
node_class = None
def run(self):
node = self.node_class()
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class QuestionDirective(BaseDirective):
optional_arguments = 1
option_spec = {
'nb_pos': int,
'nb_prop': int
}
node_class = Question
def run(self):
node = super(QuestionDirective, self).run()[0]
if len(self.arguments) > 0:
node.id = self.arguments[0]
query = Query()
for child in node.children[:]: # Must make a copy to remove while iterating
if not isinstance(child, Alternative):
node.remove(child)
query += child
node.insert(0, query)
app = self.state.document.settings.env.app
node.nb_prop = app.config.mcq_nb_prop
for option, value in self.options.items():
setattr(node, option, value)
validate_question_options(app, node)
return [node]
class PositiveDirective(BaseDirective):
node_class = Positive
class NegativeDirective(BaseDirective):
node_class = Negative
class TextboxDirective(BaseDirective):
option_spec = {
'nb_rows': int
}
node_class = Textbox
def run(self):
node = super(TextboxDirective, self).run()
app = self.state.document.settings.env.app
if 'nb_rows' in self.options:
node[0].nb_rows = validate_nb_rows(app, self.options['nb_rows'])
else:
node[0].nb_rows = validate_nb_rows(app, app.config.mcq_nb_rows)
return node
class CommentDirective(BaseDirective):
node_class = Comment
def add_dependencies(app):
global language
if app.config.language == 'fr':
language = 'fr'
preamble = ('\\usepackage{scrextend}'
'\n\\usepackage{hyperref}'
'\n\\usepackage{needspace}'
'\n\\usepackage{amssymb}'
'\n\n\\newlength\\TextFieldLength'
'\n\\newcommand\\TextFieldFill[2][]{%'
'\n\t\\setlength\\TextFieldLength{\\linewidth}%'
'\n\t\\settowidth{\\dimen0}{#2 }%'
'\n\t\\addtolength\\TextFieldLength{-\\dimen0}%'
'\n\t\\addtolength\\TextFieldLength{-2.22221pt}%'
'\n\t\\TextField[#1,width=\\TextFieldLength]{\\raisebox{2pt}{#2 }}%'
'\n}')
if 'preamble' in app.config.latex_elements:
app.config.latex_elements['preamble'] += '\n' + preamble
else:
app.config.latex_elements['preamble'] = preamble
app.add_javascript('jquery-shuffle.js')
app.add_javascript('rst-form.js')
app.add_stylesheet('ext.css')
def validate_question_options(app, node):
if node.nb_pos < 1:
app.warn('The number of positive answers to display must be greater than 0.')
node.nb_pos = 1
if node.nb_prop < node.nb_pos:
app.warn('The number of propositions to display in a question ('+str(node.nb_prop)+') must be greater or equal than the number of positive answers ('+str(node.nb_pos)+') to display.')
nb_prop = app.config.mcq_nb_prop
if nb_prop < node.nb_pos:
node.nb_prop = sys.maxint
else:
node.nb_prop = nb_prop
if node.nb_prop == node.nb_pos:
app.warn('The number of positive answers shouldn\'t be the same as the number of propositions. It\'s like giving the answer.')
def validate_nb_rows(app, nb_rows):
if nb_rows < 1:
app.warn('The number of rows in a textbox must be greater than 0.')
return 1
return nb_rows
class StructureError(SphinxError):
category = 'Wrong document structure'
def verify_structure(app, doctree, docname):
verify_alternatives(app, doctree)
verify_comments(app, doctree)
verify_textbox(app, doctree)
verify_questions(app, doctree)
def verify_alternatives(app, doctree):
for node in doctree.traverse(Alternative):
if type(node.parent) != Question:
raise StructureError('Every "positive", "negative" and "textbox" directives must be direct children to a "question" directive.')
if type(node) != Textbox and len(node.children) < 1:
raise StructureError('Every "question", "positive" and "negative" directives must have content.')
def verify_comments(app, doctree):
for node in doctree.traverse(Comment):
parent_type = type(node.parent)
if len(parent_type.__bases__) < 1 or parent_type.__bases__[0] != Alternative:
raise StructureError('Every "comment" directive must be a direct child of a "question", "positive", "negative" or "textbox" directive.')
if len(node.children) < 1:
raise StructureError('Every "comment" directive must have content.')
if len(node.traverse(condition=Comment, descend=False, siblings=True)) > 1:
raise StructureError('A "comment" directive cannot have a "comment" directive sibling.')
def verify_textbox(app, doctree):
for node in doctree.traverse(Textbox):
if len(node.children) > 1:
raise StructureError('A "textbox" directive can only contain one directive (of type "comment").')
def verify_questions(app, doctree):
for node in doctree.traverse(Question):
if len(node.children) < 2:
raise StructureError('A question must have some content and (a "textbox" or at least one "positive" directive).')
if len(node.children[0].traverse(Question)) > 0:
raise StructureError('A question cannot contain another question, you fool!')
query_count, positive_count, negative_count, textbox_count = count_children(node)
if len(node.children) == 2:
if query_count != 1 or positive_count != 1 and textbox_count != 1:
raise StructureError('A "question" directive must have at least some content and (a "positive" or "textbox" directive).')
else:
if query_count != 1:
raise StructureError('Internal error. This should never happen. This is a huge bug in this program.')
if positive_count < 1:
raise StructureError('A "question" directive must contain at least one "positive" directive. (or only one "textbox" directive)')
if positive_count < node.nb_pos:
raise StructureError('A "question" directive must have at least the given number of "positive" directives children.')
if negative_count < 1:
app.warn('Not giving any negative proposition in a question is the same as giving the answer.')
def count_children(node):
query_count, positive_count, negative_count, textbox_count = 0, 0, 0, 0
for child in node.children:
child_type = type(child)
if len(child_type.__bases__) < 1 or child_type.__bases__[0] != Alternative:
raise StructureError('Internal error. This should never happen. This is a huge bug in this program.')
if child_type == Query:
query_count += 1
elif child_type == Positive:
positive_count += 1
elif child_type == Negative:
negative_count += 1
else:
textbox_count += 1
return query_count, positive_count, negative_count, textbox_count
def html_add_content(app, doctree, docname):
field_list = doctree.next_node(nodes.field_list)
task_id = ''
if field_list:
for field in field_list.traverse(nodes.field):
field_name = field.next_node(nodes.field_name).astext()
if field_name == 'task_id':
task_id = field.next_node(nodes.field_body).astext()
field_list.parent.remove(field_list)
builder = app.builder
if not hasattr(builder, 'format') or builder.format != 'html':
return
h = hashlib.md5(str(doctree)).hexdigest()
title = ''
node = doctree
for t in doctree.traverse(nodes.title):
title = t.children[0].astext()
node = t.parent
break
section = nodes.section(ids=["checker"], name=["checker"])
section += nodes.title(text=translations[language]['verify_title'])
text = u'<div id="results" style="display: none;"></div>'
if app.config.mcq_inginious_url and task_id:
text += '<input type="submit" value="' + translations[language]['verify'] + '" id="submit" />'
section += nodes.raw(format='html', text=text)
node += section
js = nodes.raw(format='html')
js += nodes.Text(u'\n<script type="text/javascript">var language = "' + unicode(language) + '";'
u' var upload_url = "' + unicode(app.config.mcq_upload_url) + '";'
u' var hash = "' + unicode(h) + '"; var title = "' + unicode(title) + '";'
u' var html_title = "' + unicode(app.config.html_title) + '";')
if app.config.mcq_inginious_url and task_id:
js += nodes.Text(u' var task_id = "' + unicode(task_id) + '"; var inginious_url = "' + unicode(app.config.mcq_inginious_url) + '";')
js += nodes.Text(u'</script>');
doctree += js
def latex_add_content(app, doctree, docname):
node_begin = nodes.raw(format='latex')
node_end = nodes.raw(format='latex')
node_begin += nodes.Text('\n\\begin{Form}')
node_end += nodes.Text('\n\\end{Form}')
doctree.insert(0, node_begin)
doctree.append(node_end)
for q in doctree.traverse(Question):
q.parent.children.insert(0, nodes.raw(format='latex', text='\n\\needspace{6\\baselineskip}\n'))
def latex_shuffle(app, doctree, docname):
builder = app.builder
if not hasattr(builder, 'format') or builder.format != 'latex':
return # The rest of this function is done in JS with the HTML writer
for q in doctree.traverse(Question):
query_node = None
pos_nodes = []
neg_nodes = []
textbox_node = None
for node in q.children:
node_type = type(node)
if node_type == Negative:
neg_nodes.append(node)
elif node_type == Positive:
pos_nodes.append(node)
elif node_type == Query:
query_node = node
else:
textbox_node = node
children = []
random.shuffle(pos_nodes)
random.shuffle(neg_nodes)
children += pos_nodes[:q.nb_pos]
children += neg_nodes[:q.nb_prop - q.nb_pos]
random.shuffle(children)
children.insert(0, query_node)
if textbox_node:
children.append(textbox_node)
q.children = children
def epub_add_javascript(app, doctree, docname):
builder = app.builder
if not hasattr(builder, 'name') or not builder.name.startswith('epub'):
return
# Current epub3 builders does not include .js files in the .epub
builder.media_types.update({'.js': 'text/javascript'})
# The page.html template used does not include javascript if embedded
builder.globalcontext['embedded'] = False
| obonaventure/cnp3 | book-2nd/mcq-ex/mcq/mcq.py | mcq.py | py | 15,086 | python | en | code | 500 | github-code | 13 |
16179911565 | import mdtraj as md
import numpy as np
from mdtraj.geometry.alignment import rmsd_qcp, compute_average_structure
from mdtraj.testing import eq
np.random.seed(52)
def test_trajectory_rmsd(get_fn):
t = md.load(get_fn('traj.h5'))
for parallel in [True, False]:
calculated = md.rmsd(t, t, 0, parallel=parallel)
reference = np.zeros(t.n_frames)
for i in range(t.n_frames):
reference[i] = rmsd_qcp(t.xyz[0], t.xyz[i])
eq(calculated, reference, decimal=3)
def test_precentered_1(get_fn):
# test rmsd against the numpy version, using the same trajectory
# as target and reference
t1 = md.load(get_fn('traj.h5'), stride=10)
t2 = md.load(get_fn('traj.h5'), stride=10)
# don't center t1, and use it without precentered
# explicitly center t2, and use *with* precentered
for parallel in [True, False]:
t2.center_coordinates()
eq(t1.n_frames, t2.n_frames)
for i in range(t1.n_frames):
ref = np.zeros(t1.n_frames)
for j in range(t1.n_frames):
ref[j] = rmsd_qcp(t1.xyz[j], t1.xyz[i])
val1 = md.rmsd(t1, t1, i, parallel=parallel, precentered=False)
val2 = md.rmsd(t2, t2, i, parallel=parallel, precentered=True)
eq(ref, val1, decimal=3)
eq(val1, val2)
def test_precentered_2(get_fn):
# test rmsd against the numpy version, using the difference
# trajectories as target and reference
t1_a = md.load(get_fn('traj.h5'), stride=10)
t2_a = md.load(get_fn('traj.h5'), stride=10)
t1_b = md.load(get_fn('traj.h5'), stride=10)
t2_b = md.load(get_fn('traj.h5'), stride=10)
# don't center t1, and use it without precentered
# explicitly center t2, and use *with* precentered
t2_a.center_coordinates()
t2_b.center_coordinates()
for parallel in [True, False]:
for i in range(t1_b.n_frames):
ref = np.zeros(t1_a.n_frames)
for j in range(t1_a.n_frames):
ref[j] = rmsd_qcp(t1_a.xyz[j], t1_b.xyz[i])
val1 = md.rmsd(t1_a, t1_b, i, parallel=parallel, precentered=False)
val2 = md.rmsd(t2_a, t2_b, i, parallel=parallel, precentered=True)
eq(ref, val1, decimal=3)
eq(val1, val2, decimal=4)
def test_superpose_0(get_fn):
t1 = md.load(get_fn('traj.h5'))
reference_rmsd = md.rmsd(t1, t1, 0)
t1.superpose(t1, 0)
displ_rmsd = np.zeros(t1.n_frames)
for i in range(t1.n_frames):
delta = t1.xyz[i] - t1.xyz[0]
displ_rmsd[i] = (delta ** 2.0).sum(1).mean() ** 0.5
eq(reference_rmsd, displ_rmsd, decimal=5)
def test_superpose_1():
# make one frame far from the origin
reference = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
reference_xyz = reference.xyz.copy()
for indices in [None, np.arange(90)]:
# make another trajectory in a similar rotational state
query = md.Trajectory(xyz=reference.xyz + 0.01 * np.random.randn(*reference.xyz.shape), topology=None)
query.superpose(reference, 0, atom_indices=indices)
assert eq(reference.xyz, reference_xyz)
new_centers = np.mean(query.xyz[0], axis=1)
assert 80 < new_centers[0] < 120
assert 80 < new_centers[1] < 120
assert 80 < new_centers[2] < 120
def test_superpose_2():
t1 = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
t2 = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
t2_copy = t2.xyz.copy()
t1.superpose(t2)
t1.superpose(t2, atom_indices=[1, 2, 3, 4, 5, 6, 7])
# make sure that superposing doesn't alter the reference traj
eq(t2.xyz, t2_copy)
def test_superpose_refinds():
# make one frame far from the origin
normal = np.random.randn(1, 100, 3)
normal_xyz = normal.copy()
flipped = np.zeros_like(normal)
flipped[:, :50, :] = normal[:, 50:, :]
flipped[:, 50:, :] = normal[:, :50, :]
flipped_xyz = flipped.copy()
normal = md.Trajectory(xyz=normal, topology=None)
flipped = md.Trajectory(xyz=flipped, topology=None)
normal.superpose(flipped, atom_indices=np.arange(0, 50), ref_atom_indices=np.arange(50, 100))
eq(normal.xyz, normal_xyz)
flipped.superpose(normal, atom_indices=np.arange(50, 100), ref_atom_indices=np.arange(0, 50))
eq(flipped.xyz, flipped_xyz)
normal.superpose(flipped)
assert not np.allclose(normal.xyz, normal_xyz)
def test_rmsd_atom_indices(get_fn):
native = md.load(get_fn('native.pdb'))
t1 = md.load(get_fn('traj.h5'))
atom_indices = np.arange(10)
dist1 = md.rmsd(t1, native, atom_indices=atom_indices)
t2 = md.load(get_fn('traj.h5'))
t2.restrict_atoms(atom_indices)
native.restrict_atoms(atom_indices)
dist2 = md.rmsd(t2, native)
eq(dist1, dist2)
def test_rmsd_ref_ainds(get_fn):
native = md.load(get_fn('native.pdb'))
t1 = md.load(get_fn('traj.h5'))
atom_indices = np.arange(10)
dist1 = md.rmsd(t1, native, atom_indices=atom_indices,
ref_atom_indices=atom_indices)
bad_atom_indices = np.arange(10, 20)
t2 = md.load(get_fn('traj.h5'))
dist2 = md.rmsd(t2, native, atom_indices=atom_indices,
ref_atom_indices=bad_atom_indices)
assert np.all(dist2 > dist1)
def test_average_structure(get_fn):
traj = md.load(get_fn('frame0.dcd'), top=get_fn('frame0.pdb'))
average = compute_average_structure(traj.xyz)
# The mean RMSD to the average structure should be less than to any individual frame.
sum1 = 0
sum2 = 0
for i in range(traj.n_frames):
sum1 += rmsd_qcp(traj.xyz[0], traj.xyz[i])
sum2 += rmsd_qcp(average, traj.xyz[i])
assert sum2 < sum1
def test_trajectory_rmsf(get_fn):
t = md.load(get_fn('traj.h5'))
for parallel in [True, False]:
calculated = md.rmsf(t, t, 0, parallel=parallel)
t.superpose(t, 0)
avg_xyz = np.average(t.xyz, axis=0)
reference = np.sqrt(3*np.mean((t.xyz - avg_xyz)**2, axis=(0, 2)))
assert np.sum(np.abs(calculated)) > 0 # check trivial error
eq(calculated, reference, decimal=3)
def test_trajectory_rmsf_aligned(get_fn):
t = md.load(get_fn('traj.h5'))
for parallel in [True, False]:
# testing different set of atoms for alignment and RMSF calculation
atom_indices = range(int(t.n_atoms/2))
rmsf_indices = range(int(t.n_atoms/2), t.n_atoms)
t.superpose(t, 99, atom_indices=atom_indices, parallel=False)
calculated = md.rmsf(t, None, atom_indices=rmsf_indices, parallel=parallel)
avg_xyz = np.average(t.xyz, axis=0)
reference = np.sqrt(3*np.mean((t.xyz - avg_xyz)**2, axis=(0, 2)))[rmsf_indices]
assert np.sum(np.abs(calculated)) > 0 # check trivial error
eq(calculated, reference, decimal=3)
def test_rmsd_atom_indices_vs_ref_indices():
n_frames = 1
n_atoms_1 = 1
n_atoms_2 = 2
top_1 = md.Topology()
top_1.add_chain()
top_1.add_residue('RS2', top_1.chain(0))
top_1.add_atom('A2', 'H', top_1.residue(0))
top_2 = md.Topology()
top_2.add_chain()
top_2.add_residue('RS1', top_2.chain(0))
top_2.add_atom('A1', 'H', top_2.residue(0))
top_2.add_chain()
top_2.add_residue('RS2', top_2.chain(1))
top_2.add_atom('A2', 'H', top_2.residue(1))
# here the 2nd chain in the top_2 is rmsd-compatible to the one in the top_1 so we should be able to compute rsmd between them.
trj_1 = md.Trajectory(np.random.RandomState(0).randn(n_frames, n_atoms_1, 3), top_1)
trj_2 = md.Trajectory(np.random.RandomState(0).randn(n_frames, n_atoms_2, 3), top_2)
md.rmsd(trj_1, trj_2, atom_indices=[0], ref_atom_indices=[1])
md.rmsd(trj_2, trj_1, atom_indices=[1], ref_atom_indices=[0])
# is this don't fail then it's good no matter the result
| mdtraj/mdtraj | tests/test_rmsd.py | test_rmsd.py | py | 7,876 | python | en | code | 505 | github-code | 13 |
23372452389 | import sys
sys.path.append('../../')
from slm.slm_classifier import SLMClassifier
from numpy import vstack, log
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.validation import check_X_y, column_or_1d
from common.metrics import METRICS_DICT
from slm.base_slm import BaseSLM
from collections import defaultdict
from inspect import signature
from operator import attrgetter
import timeit
from sklearn.utils.validation import check_random_state, check_array
from common.activation_functions import ACTIVATION_FUNCTIONS_DICT
from common.learning_step_functions import LEARNING_STEP_FUNCTIONS_DICT
from common.metrics import METRICS_DICT
from common.metrics import calculate_accuracy
from common.conv_neural_network_builder import ConvNeuralNetworkBuilder
from common.stopping_criteria import STOPPING_CRITERION_FUNCTIONS_DICT
from common.utilities import is_better
from dslm_new.algorithm.deep_semantic_learning_machine.initialiation import init_conv_standard
from dslm_new.algorithm.deep_semantic_learning_machine.mutation import simple_conv_mutation
class DSLMApp(SLMClassifier):
def __init__(self, sample_size, neighborhood_size, max_iter, learning_step, learning_step_solver, activation_function, activation_function_for_hidden_layers, activation_function_for_conv_layers,prob_activation_conv_layers,
prob_activation_hidden_layers, mutation_operator, init_minimum_layers, init_max_layers, init_maximum_neurons_per_layer, maximum_new_neurons_per_layer,
maximum_neuron_connection_weight, maximum_bias_weight, random_state, verbose, stopping_criterion, edv_threshold, tie_threshold, sparse,
minimum_sparseness, maximum_sparseness, early_stopping, validation_fraction, tol, n_iter_no_change, metric, prob_skip_connection):
self.sample_size=sample_size
self.neighborhood_size=neighborhood_size
self.max_iter=max_iter
self.learning_step=learning_step
self.learning_step_solver=learning_step_solver
self.activation_function=activation_function
self.activation_function_for_hidden_layers=activation_function_for_hidden_layers
self.activation_function_for_conv_layers=activation_function_for_conv_layers
self.prob_activation_hidden_layers=prob_activation_hidden_layers
self.prob_activation_conv_layers=prob_activation_conv_layers
self.mutation_operator=mutation_operator
self.init_minimum_layers=init_minimum_layers
self.init_max_layers=init_max_layers
self.init_maximum_neurons_per_layer=init_maximum_neurons_per_layer
self.maximum_new_neurons_per_layer=maximum_new_neurons_per_layer
self.maximum_neuron_connection_weight=maximum_neuron_connection_weight
self.maximum_bias_weight=maximum_bias_weight
self.random_state=random_state
self.verbose=verbose
self.stopping_criterion=stopping_criterion
self.edv_threshold=edv_threshold
self.tie_threshold=tie_threshold
self.sparse=sparse
self.minimum_sparseness=minimum_sparseness
self.maximum_sparseness=maximum_sparseness
self.early_stopping=early_stopping
self.validation_fraction=validation_fraction
self.tol=tol
self.n_iter_no_change=n_iter_no_change
self.metric=metric
self.prob_skip_connection=prob_skip_connection
super().__init__(sample_size,
neighborhood_size,
max_iter,
learning_step,
learning_step_solver,
activation_function,
activation_function_for_hidden_layers,
prob_activation_hidden_layers,
mutation_operator,
init_minimum_layers,
init_max_layers,
init_maximum_neurons_per_layer,
maximum_new_neurons_per_layer,
maximum_neuron_connection_weight,
maximum_bias_weight,
random_state,
verbose,
stopping_criterion,
edv_threshold,
tie_threshold,
sparse,
minimum_sparseness,
maximum_sparseness,
early_stopping,
validation_fraction,
tol,
n_iter_no_change,
metric,
prob_skip_connection)
if self.mutation_operator == 'simple_conv_mutation':
self._mutation_operator = simple_conv_mutation
if self.activation_function_for_conv_layers:
self.cnn_activation_functions_ids = self._get_activation_functions_ids(self.activation_function_for_conv_layers)
else:
self.prob_activation_cnn_layers = None
self.cnn_activation_functions_ids = None
def fit(self, X, y, time_print=False):
"""Fit the model to a data matrix X and a target matrix y.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels).
Returns
-------
self : returns a trained SLM model.
"""
# Validate X and y arrays:
X, y = self._validate_input_and_target(X, y)
# Get number of neurons for output layer (overwrites super class' attributes number_output_neurons):
self._number_output_neurons = self._get_number_output_neurons(y)
# Set validation sets for early_stopping:
if self.early_stopping:
# Should not stratify in multilabel classification:
if self._number_output_neurons == 1:
X, X_val, y, y_val = train_test_split(X, y, random_state=self._random_state, test_size=self.validation_fraction, stratify=y)
else:
X, X_val, y, y_val = train_test_split(X, y, random_state=self._random_state, test_size=self.validation_fraction, stratify=None)
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val, y_val = None, None
# Update input_data and target_vector attributes:
self._input_data = X
self._target_vector = y.astype(float)
# Check validation datasets for early_stopping:
if X_val is not None and y_val is not None:
self._X_validation = X_val
self._y_validation = y_val
# Create list with input neurons and passes the input data as semantics:
input_layer = ConvNeuralNetworkBuilder.create_input_neurons(self._input_data)
# Generate N initial random NNs:
for _ in range(self.sample_size):
# Note: when neural networks are added to the sample, their semantics and predictions have been already calculated
self._sample.append(init_conv_standard(self._input_data, self._target_vector, input_layer, self._random_state)) #todo add here much more parameters from main file ...
# Get current best NN:
self._current_best = self._evaluate_sample(self._target_vector)
self._free_components()
if self.verbose:
self._print_epoch()
# Initialize early_stopping process:
if self.early_stopping:
self._update_no_improvement_count()
# Main loop (run N-1 epochs, with N = self.max_iter). The first epoch is considered to be the initialization previously done:
for self._current_iteration in range(1, self.max_iter):
iteration_start_time = timeit.default_timer()
# Empty sample:
self._sample.clear()
# Apply GSM-NN mutation on the solutions sample:
mutation_on_sample_start_time = timeit.default_timer()
self._sample = self._apply_mutation_on_sample()
mutation_on_sample_time = timeit.default_timer() - mutation_on_sample_start_time
if time_print:
print('\n\tmutation on sample = %.3f seconds' % (mutation_on_sample_time))
start_time = timeit.default_timer()
# Get new candidate to best NN:
self._next_best = self._evaluate_sample(self._target_vector)
time = timeit.default_timer() - start_time
if time_print:
print('\n\t_evaluate_sample = %.3f seconds' % (time))
# Compare offspring with parent solution (which is stored in self.current_best).
# This step is only required if 'EDV' is used as a stopping criterion:
if self.stopping_criterion == 'edv':
for offspring in self._sample:
if is_better(offspring.get_loss(), self._current_best.get_loss(),
greater_is_better=self._greater_is_better):
offspring.update_parent(is_better_than_parent=True)
else:
offspring.update_parent(is_better_than_parent=False)
# Check stopping criterion:
if self.stopping_criterion:
stop_training = self._apply_stopping_criterion()
if stop_training:
print('Training process stopped earlier due to', self.stopping_criterion.upper(), 'criterion')
self._print_epoch()
# Training process stops (exit from main loop):
break
# Check if there is a new best solution:
self._get_best_solution()
self._free_components_2()
# Check early stopping:
if self.early_stopping:
stop_training = self._update_no_improvement_count()
if stop_training:
print('Early stopping of training process.')
self._current_best = self._last_best_solution
self._print_epoch()
# Training process stops (exit from main loop):
break
iteration_time = timeit.default_timer() - iteration_start_time
if time_print:
print('\n\titeration time = %.3f seconds\n' % (iteration_time))
if self.verbose: # and (self._current_iteration == 0 or self._current_iteration % 10 == 0 or self._current_iteration == self.max_iter - 1):
# Print only every 10 generations or in the last iteration:
self._print_epoch()
self._current_iteration += 1
# Store best solution as main estimator and clear sample:
self.estimator_ = self._current_best
self._sample.clear()
self._current_best = None
self._next_best = None
if self.verbose:
print(self.estimator_.get_topology())
return self.estimator_
def _apply_mutation_on_sample(self):
"""Fills self._sample with N offspring generated from the current best solution, using
a mutation operator, and with N being equal to self.neighborhood_size.
Returns
-------
sample : array of shape (num_neighbors,)
Sample containing N mutated (and fully functional) neural networks.
"""
delta_target = self._get_delta_target() if self.learning_step == 'optimized' else None
# Make N copies/clones of current best solution and apply mutation on each copy to obtain the offspring:
for _ in range(self.neighborhood_size):
parent_neural_network = ConvNeuralNetworkBuilder.clone_neural_network(self._current_best)
# Generate one child using the mutation operator:
child = self._mutation_operator(parent_neural_network, self._input_data, self._random_state,
self.learning_step, self._sparseness, self.maximum_new_neurons_per_layer,
self.maximum_neuron_connection_weight, self.maximum_bias_weight,
self._target_vector, delta_target, self._learning_step_function,
self._hidden_activation_functions_ids, self.cnn_activation_functions_ids,
self.prob_activation_hidden_layers, self.prob_activation_conv_layers)
self._sample.append(child)
return self._sample
| OnHoliday/DSLM_NOVA | dslm_new/algorithm/deep_semantic_learning_machine/deep_semantic_learning_machine_app.py | deep_semantic_learning_machine_app.py | py | 12,701 | python | en | code | 0 | github-code | 13 |
86407521210 | """
data:2017-7-10
author:alancheg
本程序的主要作用是计算 kmeans 聚类后的数据中心点
输入:需要聚类的内容,聚类的中心点个数
输出:聚类的中心点坐标
"""
import csv
from sklearn.cluster import KMeans
import numpy as np
from time import time
CLUSTER_CENTER = 8
# 数据的格式
# img_name,feature_name,feature_num,cor_x,cor_y
def data_generate(path):
# 生成标准的位置信息
data = []
data_length = 0
with open(path, "r") as f:
reader = list(csv.reader(f))
data_length = len(reader) - 1
i = 0
for row in reader[1:]:
# 去掉首行的标签
[_, _, _, x, y] = row
x = int(x)
y = int(y)
data.append([x, y])
i += 1
return np.asarray(data), data_length
if __name__ == "__main__":
path = r"C:\Users\alan\Desktop\index_project\data\source.csv"
data, data_length = data_generate(path)
start_time = time()
kmeans = KMeans(n_clusters=CLUSTER_CENTER, random_state=0).fit(data)
end_time = time()
print("cluster_time = " + str(end_time - start_time))
print(kmeans.labels_)
print(kmeans.cluster_centers_)
| alancheg/VideoIndex | data_cluster.py | data_cluster.py | py | 1,212 | python | en | code | 0 | github-code | 13 |
28541524219 | import serial # 引入serial包
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1) # 打开端口
def main_loop():
"""主循环,打印读取到的数据"""
while True:
str_hello = "Hello Arduino,I am Raspberry."
b_hello = bytes(str_hello, encoding='utf-8') # 字符串转为字节
ser.write(b_hello) # 发送数据
response = ser.readall() # 读取返回值
print('接收到的返回数据是:', response)
if __name__ == '__main__':
try:
main_loop()
except KeyboardInterrupt:
print("程序结束!")
finally:
ser.close()
| cocpy/raspberrypi4 | 第12章/5/serial_test.py | serial_test.py | py | 624 | python | en | code | 0 | github-code | 13 |
26000569042 | from tkinter import *
import random
# the game data for the initial game state
def init():
data.playerX = 250
data.playerY = 550
data.circles = [] # store circles as [x, y, r, color]
data.gameOver = False
data.time = 0
data.score = 0
# events updating the game data
def keyPressed(event):
if event.keysym == "Right" and data.playerX < 550:
data.playerX += 5
elif event.keysym == "Left" and data.playerX > 0:
data.playerX -= 5
# the game data updating the game state
def timerFired():
if not data.gameOver:
data.time += 1
data.score += 5
if data.time % 3 == 0:
createNewCircle()
moveCircle()
for circle in data.circles:
if checkCollision(data.playerX, data.playerY,
circle[0], circle[1], 10, circle[2]):
data.gameOver = True
def createNewCircle():
x = random.randint(0, 550)
y = 0
r = random.randint(20, 40)
color = random.choice(
["orange", "yellow", "green", "blue", "purple", "cyan", "magenta"])
data.circles.append([x, y, r, color])
def moveCircle():
for circle in data.circles:
circle[1] += 10
def checkCollision(x1, y1, x2, y2, r1, r2):
distance = ((x2-x1)**2 + (y2 - y1)**2)**0.5
return distance <= r1 + r2
# the game state updating what is drawn
def redrawAll(canvas):
canvas.create_oval(data.playerX - 10, data.playerY - 10,
data.playerX + 10, data.playerY + 10,
fill="red")
scoreString = "Score: %d" % data.score
for circle in data.circles:
x, y, r, color = circle
canvas.create_oval(x - r, y - r, x + r, y + r, fill=color)
canvas.create_text(300, 30, text=scoreString, font="Arial 30 bold")
if data.gameOver:
canvas.create_text(300, 250, text="Game Over", font="Arial 20")
# animation setup code below here #
class Struct(object): pass
data = Struct()
def run(width=600, height=600):
def redrawAllWrapper(canvas):
canvas.delete(ALL)
redrawAll(canvas)
canvas.update()
def keyPressedWrapper(event, canvas):
keyPressed(event)
redrawAllWrapper(canvas)
def timerFiredWrapper(canvas):
timerFired()
redrawAllWrapper(canvas)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas)
# Set up data and call init
data.width = width
data.height = height
data.timerDelay = 200 # milliseconds
init()
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas))
timerFiredWrapper(canvas)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
run()
| Teknowledge/Curriculum | past_iterations/02_Homewood_Y/02_drawing/09_solution_advanced_circle_clash.py | 09_solution_advanced_circle_clash.py | py | 2,851 | python | en | code | 0 | github-code | 13 |
3235480924 | import numpy as np
import pandas as pd
from scipy.optimize import minimize
import matplotlib.pyplot as plt
def tgt_fn(x, matches):
"""
Cost we will optimize to get the ELO ratings.
:param x: current ratings of plots
:param matches: all pairings of plots and their outcomes
"""
xr = np.hstack([0, x])
prob = (1 / (1 + np.exp(xr[matches[:, 0]] - xr[matches[:, 1]])))
loss = matches[:, 2] * np.log(prob) + (1 - matches[:, 2]) * np.log(1 - prob)
return -np.sum(loss)
def tgt_grad(x, matches):
"""
Gradient of the cost we will optimize to get the ELO ratings.
:param x: current ratings of plots
:param matches: all pairings of plots and their outcomes
"""
xr = np.hstack([0, x])
prob = (1 / (1 + np.exp(xr[matches[:, 0]] - xr[matches[:, 1]])))
dt = prob - matches[:, 2]
grad_x = np.zeros(len(x))
for i in range(len(x)):
grad_x[i] = - np.sum(dt[np.where(matches[:, 1] == i + 1)]) + np.sum(dt[np.where(matches[:, 0] == i + 1)])
return - grad_x
def test_toy():
"""Testing our derivation of ELO ratings on toy dataset."""
# TOY DATASET
n = 10 # number of objects
m = 1000 # number of games
z = np.random.uniform(-1, 1, n) # latent strengths of objects
games = np.zeros([m, 3], dtype=int)
for i in range(m):
id1 = np.random.randint(0, 10)
id2 = np.random.randint(0, 10)
while id1 == id2:
id2 = np.random.randint(0, 9)
win = np.random.uniform(0, 1) < 1 / (1 + np.exp(z[id1] - z[id2]))
games[i, :] = id1, id2, win
x0 = np.zeros(n - 1)
res = minimize(tgt_fn, x0, games, method='L-BFGS-B', jac=tgt_grad)
# testing for toy data
plt.plot(z, np.hstack([0, res['x']]), '.')
plt.show()
for i in range(10):
print(z[i], np.mean(
np.hstack([1 - games[np.where(games[:, 0] == i), 2][0], games[np.where(games[:, 1] == i), 2][0]])))
if __name__ == '__main__':
# PLOT RATINGS
scores = pd.read_csv('data/final_scores.csv')
all_matches = pd.read_csv('data/all_matches.csv')
names_to_idx = dict(zip(scores['plot_name'].values, scores.index.values))
all_matches['id1'] = all_matches['name1'].map(names_to_idx)
all_matches['id2'] = all_matches['name2'].map(names_to_idx)
matches = all_matches[['id1', 'id2', 'win']].values
x0 = np.zeros(scores.shape[0] - 1)
# logistic regression for ELO ratings
res = minimize(tgt_fn, x0, matches, method='L-BFGS-B', jac=tgt_grad)
ratings = np.hstack([0, res['x']])
# mean ratings for plots with same number of wins -> test if ratings are correct
for i in range(10):
print(i, np.mean(ratings[scores[scores['score'] == i].index.values]))
# writing to csv
scores['elo'] = ratings
scores.to_csv('data/scores_elo.csv')
| AndrejHafner/how-good-is-my-plot | src/plot_quality_prediction/elo_ratings.py | elo_ratings.py | py | 2,825 | python | en | code | 4 | github-code | 13 |
40343154083 | # -*- coding: utf-8 -*-
import telnetlib, sys, select
from django.http import HttpResponse
from .auth import login_check
def _pre_process_cmd(cmd):
if cmd.endswith( b"\r\n" ):
return cmd
elif cmd[-1] == b"\r":
cmd += b"\n"
elif cmd[-1] == b"\n":
cmd = cmd[:-1] + b"\r\n"
else:
cmd += b"\r\n"
return cmd
class TelnetConsole(object):
def __init__( self, wsInst, host, port ):
"""
"""
self.wsInst = wsInst
self.host = host
self.port = port
self.consoleInst = None
def close( self ):
"""
"""
if self.consoleInst:
self.consoleInst.close()
self.consoleInst = None
if self.wsInst:
self.wsInst.close()
self.wsInst = None
self.host = ""
self.port = 0
def run( self ):
"""
"""
try:
self.consoleInst = telnetlib.Telnet( self.host, self.port )
except Exception:
self.wsInst.send("服务器连接失败!\n")
self.close()
return
self.onConnectedToConsole()
try:
tlfd = self.consoleInst.fileno()
wsfd = self.wsInst.protocol.sock.fileno()
rlist = [ tlfd, wsfd ]
while True:
rl, wl, xl = select.select(rlist, [], [], 0.1)
if tlfd in rl:
data = self.consoleInst.read_very_eager()
if not data:
break # socket closed
if not self.onReceivedConsoleData( data ):
break
if wsfd in rl:
data = self.wsInst.read()
if data is None:
break # socket closed
if len(data) == 0:
continue
if not self.onReceivedClientData( data ):
break
except:
sys.excepthook( *sys.exc_info() )
self.close()
#return HttpResponse("")
return
# test code
"""
try:
for message in self.wsInst:
self.wsInst.send(message)
except:
sys.excepthook( *sys.exc_info() )
return
"""
def onConnectedToConsole( self ):
"""
template method.
当成功连接上telnet控制台时回调
"""
pass
def onReceivedConsoleData( self, data ):
"""
template method.
当从telenet控制台收到了新数据以后回调
"""
self.wsInst.send( data )
return True
def onReceivedClientData( self, data ):
"""
template method.
当从客户端收到了新数据以后回调
"""
if data == ":quit":
self.wsInst.close()
return False
self.consoleInst.write( _pre_process_cmd( data ) )
return True
class ProfileConsole(TelnetConsole):
"""
用于性能分析的控制台类
"""
def __init__( self, wsInst, host, port, command, sec, password):
"""
"""
self.wsInst = wsInst
self.host = host
self.port = port
self.consoleInst = None
self.cmd = command.encode('utf-8')
self.sec = sec.encode('utf-8')
self.password = password.encode('utf-8')
def onConnectedToConsole( self ):
"""
template method.
当成功连接上telnet控制台时回调pytickprofile
"""
self.consoleInst.write( b"" + self.password + b"\r\n")
self.consoleInst.write( b":"+self.cmd+b" "+self.sec+b"\r\n")
def onReceivedConsoleData( self, data ):
"""
template method.
当从telenet控制台收到了新数据以后回调
"""
self.wsInst.send( data )
return True
def onReceivedClientData( self, data ):
"""
template method.
当从客户端收到了新数据以后回调
"""
if data == ":":
self.wsInst.close()
return False
self.consoleInst.write( _pre_process_cmd( data ) )
return True
| kbengine/kbengine | kbe/tools/server/webconsole/WebConsole/telnet_console.py | telnet_console.py | py | 3,306 | python | en | code | 5,336 | github-code | 13 |
24183689331 | import copy
import logging
import os
import pathlib
import sys
from unittest import TestCase
from unittest.mock import patch
from ws_sdk.client import WSClient
class TestWSClient(TestCase):
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
class TestWS(TestCase):
valid_token = "abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12"
@patch('ws_sdk.client.WSClient.is_latest_ua_semver')
@patch('ws_sdk.ws_utilities.convert_ua_conf_f_to_vars')
def setUp(self, mock_convert_ua_conf_f_to_vars, mock_is_latest_ua_semver):
logging.basicConfig(level=logging.DEBUG)
pathlib.Path("ws_constants.DEFAULT_UA_PATH").mkdir(parents=True, exist_ok=True)
mock_convert_ua_conf_f_to_vars.return_value.apiKey = None
mock_convert_ua_conf_f_to_vars.return_value.userKey = None
mock_convert_ua_conf_f_to_vars.return_value.ws_url = None
mock_is_latest_ua_semver.return_value = True
self.client = WSClient(user_key=os.environ.get('WS_USER_KEY', self.valid_token),
token=os.environ.get('WS_ORG_TOKEN', self.valid_token))
@patch('ws_sdk.client.WSClient._execute_ua')
def test_get_local_ua_semver(self, mock_execute_ua):
ua_ret_t = "21.6.3"
mock_execute_ua.return_value = (0, ua_ret_t)
res = self.client.get_local_ua_semver()
self.assertEqual(res, ua_ret_t)
def test_add_scan_comment(self):
key = "key1"
value = "value1"
compared_ua_conf = copy.copy(self.client.ua_conf)
self.client.add_scan_comment(key=key, value=value)
self.assertEqual(f"{compared_ua_conf.scanComment};key1:value1", self.client.ua_conf.scanComment)
def test_add_scan_comment(self):
key = "key1"
value = "value1"
compared_ua_conf = copy.copy(self.client.ua_conf)
self.client.add_scan_comment(key=key, value=value)
self.assertEqual(f"{compared_ua_conf.scanComment};key1:value1", self.client.ua_conf.scanComment)
if __name__ == '__main__':
TestCase.unittest.main()
| whitesource-ps/ws-sdk | ws_sdk/tests/test_client.py | test_client.py | py | 2,080 | python | en | code | 17 | github-code | 13 |
14257807133 | import codecs
import csv
from os import listdir
from os.path import isfile, join
def get_available_files():
# return the csv and ttl files from the "files/" directory, separated in two lists
files = [f for f in listdir("files") if isfile(join("files", f))]
csv_files = [f for f in files if f.endswith(".csv")]
ttl_files = [f for f in files if f.endswith(".ttl")]
return csv_files, ttl_files
def get_file_content(file_name):
try:
with codecs.open("files/" + file_name, "r", encoding="utf8") as file:
# get content as a list of lines (by splitting on line separators)
content = file.read().split("\n")
# return content as a string, by rejoining the lines (with line separators)
# but with line number at beginning of each line (for better display)
return '\n'.join(["{:02d} {}".format(i + 1, content[i]) for i in range(len(content))])
except FileNotFoundError:
return "Could not load file '{}'".format(file_name)
def triplify(title_line_number, data_first_line_number, data_last_line_number, separator,
data_prefix, predicate_prefix, input_file, output_file):
# files are contained in the "files/" directory
input_file = "files/" + input_file
output_file = "files/" + output_file
if not isfile(input_file): # Checking input file exists (always the case except if the user manually erase it)
return False, "Selected input file was not found"
with codecs.open(input_file, "r", encoding="utf8") as csv_file:
file_content = list(csv.reader(csv_file, delimiter=separator))
if not file_content:
return False, "Input file is empty"
title_line = None
if not title_line_number:
# There is a title line but the user do not know where
# We loop through the file until first non empty row
i = 1 # Starting from first line of the file
while not file_content[i - 1]: # while observed line is empty
i += 1 # skipping to next one
title_line_number = i
title_line = file_content[title_line_number - 1]
else:
title_line_number = int(title_line_number) # parsing the value
# checking validity
if title_line_number < 0:
return False, "Invalid title line number: cannot be negative"
elif title_line_number > len(file_content):
return False, "Invalid title line number: cannot be bigger than file length"
elif title_line_number == 0:
pass # user specified code to indicate there is no title line
else: # the user specified the title line number
# shifting because the first line in the file is 1 and first value of list is 0
title_line = file_content[title_line_number - 1]
if not title_line:
return False, "No titles found at specified line: {}".format(title_line_number)
if data_first_line_number:
data_first_line_number = int(data_first_line_number) # parsing the value
if title_line_number and data_first_line_number < title_line_number: # checking validity
return False, "Data first line should be after title line"
else:
# The use did not specify data first line number,
# we loop through the file until first non empty row
i = title_line_number + 1 # starting after the title line or from 1st line if title line does not exist
while not file_content[i - 1]: # while observed line is empty
i += 1 # skipping to next one
data_first_line_number = i
if data_last_line_number:
data_last_line_number = int(data_last_line_number) # parsing value
if data_last_line_number < data_first_line_number: # checking validity
return False, "Data last line should be after data first line"
else:
data_last_line_number = len(file_content) # by default the last line of the file
data = file_content[data_first_line_number - 1:data_last_line_number] # data range recovery
return generate_output_file(title_line, data, output_file, data_prefix, predicate_prefix)
def generate_output_file(title_line, data, output_file, data_prefix, predicate_prefix):
triplets = []
# we do not have any way to name the subject (except with its line number which is not very informative)
# so we use a blank node
triplet_format = "[] {attributes}" # each subject will have a list of "attributes" (predicate + object)
attribute_format = "p:{predicate} {object}"
spacing_line = " " # tabulation preceding each attribute (there is a newline for each attribute)
for i in range(len(data)):
row = data[i]
if title_line and not (len(row) == len(title_line)): # checking line validity
return False, "Invalid columns number for data line n°{}".format(i + 1)
attributes = []
for j in range(len(row)):
obj = row[j]
if not check_float(obj): # if value is a float, we consider it does not refer an existing object
obj = "d:" + obj.replace(" ", "_") # Make value url friendly
if title_line: # if there is a title line, we use the column name for the predicate
predicate = "has{}".format(title_line[j].replace(" ", "_").capitalize())
else: # else we use a generic predicate
predicate = "hasAttribute"
attribute = attribute_format.format(predicate=predicate, object=obj)
attributes.append(attribute)
# we add the triplet, the attributes need to be separated by a ';' (for syntax) and a newline (for display)
triplets.append(triplet_format.format(attributes=(";\n" + spacing_line).join(attributes)))
with codecs.open(output_file, "w", encoding="utf8") as ttl_file:
# Adding the two prefixes, separated by a newline and an additional newline before triplets
ttl_file.write("@prefix d: <{}> .\n".format(data_prefix))
ttl_file.write("@prefix p: <{}> .\n\n".format(predicate_prefix))
# Triplets are separated by a "." (syntax) and a newline (display)
ttl_file.writelines([str(line) + " .\n" for line in triplets])
return True, "Successfully triplified file !"
def check_float(potential_float):
# return True if the given object (str in our case), is parsable as a float (includes integers)
try:
float(potential_float)
return True
except ValueError:
return False
| mdaubie/Triplifier | utils.py | utils.py | py | 6,546 | python | en | code | 0 | github-code | 13 |
11672372818 | import requests
from imutils import paths
import argparse
import cv2
import os
argparser = argparse.ArgumentParser()
argparser.add_argument("-u", "--urls", required=True,
help="path to file containing image URLs")
argparser.add_argument("-o", "--output", required=True,
help="path to output directory of images")
args = vars(argparser.parse_args())
urls = open(args["urls"]).read().strip().split("\n")
no_img = 0
total_img = len(urls)
print("Downloading images...")
#download images
for url in urls:
try:
request = requests.get(url, timeout = 60)
path = os.path.sep.join([args["output"], "{}.jpg".format(str(no_img).zfill(4))])
file = open(path, "wb")
file.write(request.content)
file.close()
no_img += 1
print("{}/{}".format(no_img, total_img))
except:
print("Error occured on : " + url)
print("Checking images...")
#check if image can be opened with openCV
for image_path in paths.list_images(args["output"]):
delete = False
try:
image = cv2.imread(image_path)
if image is None:
delete = True
except:
print("Cannot open image")
delete = True
if delete:
os.remove(image_path)
print("FINISHED") | robiColt/image-classifier-CNN-SVM | image_downloader.py | image_downloader.py | py | 1,262 | python | en | code | 0 | github-code | 13 |
4511226972 | #!python
# -*- coding: utf8 -*-
rootElement = 'elements'
topElement = 'element'
encoding = 'utf8'
deletetext = 'delete'
extra = '_anno'
# entities show in the radiobox bar
entities = [(u'产地', 'place'), (u'品种', 'type'), (u'等级', 'rank'), (u'其它', 'other')]
| aisensiy/AnnotationTool | config.py | config.py | py | 273 | python | en | code | 0 | github-code | 13 |
33804659941 | from django.db import models
SEMESTRES = [
('1', '1er'),
('2', '2do'),
('3', '3er'),
('4', '4to'),
('5', '5to'),
('6', '6to'),
('7', '7mo'),
('8', '8vo'),
('9', '9no'),
('10', '10mo'),
]
DIAS = [
('1', 'Lunes'),
('2', 'Martes'),
('3', 'Miercoles'),
('4', 'Jueves'),
('5', 'Viernes'),
('6', 'Sabado'),
]
class Horario(models.Model):
clave = models.BigAutoField('Clave', primary_key=True)
materia = models.ForeignKey("materias.Materia", verbose_name='Materia', on_delete=models.CASCADE)
docente = models.CharField('Docente', max_length=200)
semestre = models.CharField('Semestre' , max_length=2, choices=SEMESTRES)
dia = models.CharField('Dia' , max_length=2, choices=DIAS)
hora = models.CharField('Hora', max_length=200)
salon = models.CharField('Salon', max_length=200)
def __str__(self):
return self.materia
| DanielMCastillo/Frameworks | inscripciones/horarios/models.py | models.py | py | 924 | python | en | code | 0 | github-code | 13 |
16178935095 | from __future__ import print_function, division
import os
import itertools
import numpy as np
from mdtraj.utils import ensure_type, cast_indices, in_units_of
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils.six import string_types, PY3
from mdtraj.utils.six.moves import xrange
__all__ = ['MDCRDTrajectoryFile', 'load_mdcrd']
##############################################################################
# Classes
##############################################################################
class _EOF(IOError):
pass
@FormatRegistry.register_loader('.mdcrd')
@FormatRegistry.register_loader('.crd')
def load_mdcrd(filename, top=None, stride=None, atom_indices=None, frame=None):
"""Load an AMBER mdcrd file.
Parameters
----------
filename : path-like
Path of AMBER mdcrd file.
top : {str, Trajectory, Topology}
The BINPOS format does not contain topology information. Pass in either
the path to a pdb file, a trajectory, or a topology to supply this
information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
See Also
--------
mdtraj.MDCRDTrajectoryFile : Low level interface to MDCRD files
"""
from mdtraj.core.trajectory import _parse_topology, Trajectory
# we make it not required in the signature, but required here. although this
# is a little wierd, its good because this function is usually called by a
# dispatch from load(), where top comes from **kwargs. So if its not supplied
# we want to give the user an informative error message
if top is None:
raise ValueError('"top" argument is required for load_mdcrd')
if not isinstance(filename, (string_types, os.PathLike)):
raise TypeError('filename must be of type path-like for load_mdcrd. '
'you supplied %s' % type(filename))
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with MDCRDTrajectoryFile(filename, topology.n_atoms) as f:
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(topology, n_frames=n_frames, stride=stride,
atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.mdcrd')
@FormatRegistry.register_fileobject('.crd')
class MDCRDTrajectoryFile(object):
"""Interface for reading and writing to an AMBER mdcrd files.
This is a file-like object, that both reading or writing depending
on the `mode` flag. It implements the context manager protocol,
so you can also use it with the python 'with' statement.
The conventional units in the mdcrd file are angstroms. The format only
supports storing the cartesian coordinates and box lengths.
Parameters
----------
filename : path-like
The filename to open. A path to a file on disk.
n_atoms : int
The number of atoms in the system. This is _required_ when mode == 'r'
and irrelevant when mode == 'w'.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for
write.
has_box = 'detect'
Does the mdcrd file contain box length information? This is optional
when mode == 'r' (and irrelevant when mode == 'w'). The presence or
absence of box information can generally be inferred from the file,
but there might be corner cases in which this is not possible,
because of limitations in the mdcrd format.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
"""
distance_unit = 'angstroms'
def __init__(self, filename, n_atoms=None, mode='r', has_box='detect',
force_overwrite=True):
"""Open an AMBER mdcrd file for reading/writing.
"""
self._is_open = False
self._filename = filename
self._n_atoms = n_atoms
self._mode = mode
self._w_has_box = None
self._frame_index = 0
self._has_box = has_box
# track which line we're on. this is not essential, but its useful
# when reporting errors to the user to say what line it occured on.
self._line_counter = 0
if has_box not in [True, False, "detect"]:
raise ValueError('has_box must be one of [True, False, "detect"]')
if mode == 'r':
if n_atoms is None:
raise ValueError('To open a mdcrd file in mode="r", you must '
'supply the number of atoms, "n_atoms"')
if not os.path.exists(filename):
raise IOError("The file '%s' doesn't exist" % filename)
self._fh = open(filename, 'rb')
self._is_open = True
self._fh.readline() # read comment
self._line_counter += 1
elif mode == 'w':
if os.path.exists(filename) and not force_overwrite:
raise IOError('"%s" already exists' % filename)
self._fh = open(filename, 'wb')
self._is_open = True
else:
raise ValueError('mode must be one of "r" or "w". '
'you supplied "%s"' % mode)
def close(self):
"""Close the mdcrd file"""
if self._is_open:
self._fh.close()
self._is_open = False
def __del__(self):
self.close()
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
def read_as_traj(self, topology, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from a mdcrd file
Parameters
----------
topology : Topology
The system topology
n_frames : int, optional
If positive, then read only the next `n_frames` frames. Otherwise read all
of the frames in the file.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object containing the loaded portion of the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
initial = int(self._frame_index)
xyz, cell_lengths = self.read(n_frames=n_frames, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
in_units_of(xyz, self.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(cell_lengths, self.distance_unit, Trajectory._distance_unit, inplace=True)
if cell_lengths is None:
cell_angles = None
else:
# Assume that its a rectilinear box
cell_angles = 90.0 * np.ones_like(cell_lengths)
if stride is None:
stride = 1
time = (stride*np.arange(len(xyz))) + initial
t = Trajectory(xyz=xyz, topology=topology, time=time)
t.unitcell_lengths = cell_lengths
t.unitcell_angles = cell_angles
return t
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a mdcrd file
Parameters
----------
n_frames : int, None
The number of frames you would like to read from the file.
If None, all of the remaining frames will be loaded.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates
from the file.
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3), dtype=np.float32
The cartesian coordinates, in angstroms
cell_lengths : {np.ndarray, None}
If the file contains unitcell lengths, they will be returned as an
array of shape=(n_frames, 3). Otherwise, unitcell_angles will be
None.
"""
if not self._mode == 'r':
raise ValueError('read() is only available when file is opened '
'in mode="r"')
if n_frames is None:
frame_counter = itertools.count()
else:
frame_counter = xrange(n_frames)
if stride is None:
stride = 1
coords, boxes = [], []
for i in frame_counter:
try:
coord, box = self._read()
if atom_indices is not None:
coord = coord[atom_indices, :]
except _EOF:
break
coords.append(coord)
boxes.append(box)
for j in range(stride - 1):
# throw away these frames
try:
self._read()
except _EOF:
break
coords = np.array(coords)
if all(b is None for b in boxes):
# if there was no box information in any frame, that's cool
return coords, None
if not all(b is not None for b in boxes):
# but if some of them had box information and others didn't
# that probably means there was a bug in the parsing.
raise IOError('Inconsistent box information. Try manually '
'setting has_box? Your mdcrd file might be '
'corrupt.')
return coords, np.array(boxes, dtype=np.float32)
def _read(self):
"Read a single frame"
i = 0
coords = np.empty(self._n_atoms*3, dtype=np.float32)
box = None
while i < self._n_atoms * 3:
line = self._fh.readline()
self._line_counter += 1
if line == b'':
raise _EOF()
try:
items = [float(line[j:j+8])
for j in range(0, len(line.rstrip()), 8)]
assert 0 < len(items) <= 10
except Exception:
raise IOError('mdcrd parse error on line %d of "%s". This file '
'does not appear to be a valid mdcrd file.' % \
(self._line_counter, self._filename))
length = len(items)
if i + length > len(coords):
raise IOError(
'mdcrd parse error: specified n_atoms (%d) is likely incorrect. '
'Incorrect buffer size encountered on line=%d' % (
self._n_atoms, self._line_counter))
coords[i:i+length] = items
i += length
if i == self._n_atoms * 3:
if self._has_box is False:
break
# peek ahead for box
here = self._fh.tell()
line = self._fh.readline()
peek = [float(elem) for elem in line.strip().split()]
if len(peek) == 3:
box = peek
else:
if self._has_box is True:
raise IOError('Box information not found in file.')
self._fh.seek(-len(line), 1)
self._fh.seek(here)
break
self._frame_index += 1
return coords.reshape(self._n_atoms, 3), box
def write(self, xyz, cell_lengths=None):
"""Write one or more frames of data to a mdcrd file
Parameters
----------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms to write. By convention, the
lengths should be in units of angstroms.
cell_lengths : np.ndarray, shape=(n_frames, 3), dtype=float32, optional
The length of the periodic box in each frame, in each direction,
`a`, `b`, `c`. By convention the lengths should be in units
of angstroms.
"""
if not self._mode == 'w':
raise ValueError('write() is only available when file is opened '
'in mode="w"')
xyz = ensure_type(xyz, np.float32, 3, 'xyz', can_be_none=False,
shape=(None, None, 3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_lengths = ensure_type(cell_lengths, np.float32, 2, 'cell_lengths',
can_be_none=True, shape=(len(xyz), 3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if self._w_has_box is None:
# this is the first write()
self._n_atoms = xyz.shape[1]
comment = 'TITLE : Created by MDTraj with %d atoms\n' % self._n_atoms
if PY3:
comment = comment.encode('ascii')
self._fh.write(comment)
if cell_lengths is None:
self._w_has_box = False
else:
self._w_has_box = True
elif self._w_has_box is True:
if cell_lengths is None:
raise ValueError('This mdcrd file must contain unitcell '
'information')
elif self._w_has_box is False:
if cell_lengths is not None:
raise ValueError('This mdcrd file must not contain unitcell '
'information')
else:
raise RuntimeError()
for i in range(xyz.shape[0]):
for j, coord in enumerate(xyz[i].reshape(-1)):
lfdone = False
out = "%8.3f" % coord
if len(out) > 8:
raise ValueError('Overflow error')
if PY3:
out = out.encode('ascii')
self._fh.write(out)
if (j+1) % 10 == 0:
self._fh.write(b"\n")
lfdone = True
if not lfdone:
self._fh.write(b"\n")
if cell_lengths is not None:
line = "%8.3f %8.3f %8.3f\n" % tuple(cell_lengths[i])
if PY3:
line = line.encode('ascii')
self._fh.write(line)
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
if self._mode == 'r':
advance, absolute = None, None
if whence == 0 and offset >= 0:
if offset >= self._frame_index:
advance = offset - self._frame_index
else:
absolute = offset
elif whence == 1 and offset >= 0:
advance = offset
elif whence == 1 and offset < 0:
absolute = offset + self._frame_index
elif whence == 2 and offset <= 0:
raise NotImplementedError('offsets from the end are not supported yet')
else:
raise IOError('Invalid argument')
if advance is not None:
for i in range(advance):
self._read() # advance and throw away these frames
elif absolute is not None:
self._fh.close()
self._fh = open(self._filename, 'rb')
self._fh.readline() # read comment
self._frame_index = 0
self._line_counter = 1
for i in range(absolute):
self._read()
else:
raise RuntimeError()
else:
raise NotImplementedError('offsets in write mode are not supported yet')
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return int(self._frame_index)
def __len__(self):
"Number of frames in the file"
raise NotImplementedError()
| mdtraj/mdtraj | mdtraj/formats/mdcrd.py | mdcrd.py | py | 17,213 | python | en | code | 505 | github-code | 13 |
71071815379 | import os
import json
import cv2 as cv
import numpy as np
# dataset_path_list = ["E:/PythonCodes/bbox3d_annotation_tools/session0_center_data",
# "E:/PythonCodes/bbox3d_annotation_tools/session0_right_data",
# "E:/PythonCodes/bbox3d_annotation_tools/session6_right_data"]
raw_fps = 50.00
raw_frame_start = 5*60
frame_len = int(25.00 * 5*60)
img_root_dir = "E:/PythonCodes/bbox3d_annotation_tools/session0_right_data"
gt_pos_json_file = "../gt_pos_json/system_dubska_bmvc14_session0_right.json"
start_frame_index = int(raw_frame_start*raw_fps)
with open(gt_pos_json_file, "r") as f:
gt_pos_data_dict = json.load(f)
# 5min - gt visualization
# frames id posX posY
list_cars = gt_pos_data_dict["cars"]
img_index = 0
all_list_frames = []
all_ids = []
all_pos_xs = []
all_pos_ys = []
indices = []
for single_car_dict in list_cars:
list_frames = np.array(list(single_car_dict["frames"])).astype(np.int)
id = np.array(int(single_car_dict["id"])).astype(np.int)
list_pos_xs = np.array(list(single_car_dict["posX"])).astype(np.float)
list_pos_ys = np.array(list(single_car_dict["posY"])).astype(np.float)
all_list_frames.append(list_frames)
all_ids.append(id)
all_pos_xs.append(list_pos_xs)
all_pos_ys.append(list_pos_ys)
np_list_frames = np.array(all_list_frames)
np_ids = np.array(all_ids)
np_pos_xs = np.array(all_pos_xs)
np_pos_ys = np.array(all_pos_ys)
for frm_idx in range(img_index, img_index + frame_len):
indices.clear()
img_path = os.path.join(img_root_dir, img_root_dir.split("/")[-1][:-4] + "%06d" % (frm_idx) + ".jpg")
img = cv.imread(img_path)
for i in range(len(np_list_frames)):
for j in range(len(np_list_frames[i])):
if start_frame_index == np_list_frames[i][j]:
indices.append((i, j))
for index in indices:
x, y = index
pos_x, pos_y = int(np_pos_xs[x][y]), int(np_pos_ys[x][y])
cv.circle(img, (pos_x, pos_y), 3, (0, 0, 255), -1)
cv.imshow("test", img)
cv.waitKey(50)
start_frame_index += 1
print("ok")
# start_frame_index
# for frame_index in list_frames:
# img_path = os.path.join(img_root_dir, img_root_dir.split("/")[-1][:-4] + "%06d" % (img_index) + ".jpg")
# img = cv.imread(img_path)
# index = list_frames.index(frame_index)
# pos_x = int(list_pos_xs[index])
# pos_y = int(list_pos_ys[index])
# cv.circle(img, (pos_x, pos_y), 3, (0, 0, 255), -1)
# cv.imshow("test", img)
# cv.waitKey(50)
print(gt_pos_data_dict)
| stjuliet/CenterLoc3D | utils/visualize_gt_pos.py | visualize_gt_pos.py | py | 2,589 | python | en | code | 10 | github-code | 13 |
4913836591 | import aepp
from dataclasses import dataclass
from aepp import connector
from copy import deepcopy
from typing import Union
import time
import logging
import pandas as pd
import json
import re
from .configs import ConnectObject
json_extend = [
{
"op": "replace",
"path": "/meta:intendedToExtend",
"value": [
"https://ns.adobe.com/xdm/context/profile",
"https://ns.adobe.com/xdm/context/experienceevent",
],
}
]
@dataclass
class _Data:
def __init__(self):
self.schemas = {}
self.schemas_id = {}
self.schemas_altId = {}
self.fieldGroups_id = {}
self.fieldGroups_altId = {}
self.fieldGroups = {}
class Schema:
"""
This class is a wrapper around the schema registry API for Adobe Experience Platform.
More documentation on these endpoints can be found here :
https://www.adobe.io/apis/experienceplatform/home/api-reference.html#!acpdr/swagger-specs/schema-registry.yaml
When Patching a schema, you can use the PATCH_OBJ reference to help you.
"""
schemas = {} # caching
## logging capability
loggingEnabled = False
logger = None
_schemaClasses = {
"event": "https://ns.adobe.com/xdm/context/experienceevent",
"profile": "https://ns.adobe.com/xdm/context/profile",
}
PATCH_OBJ = [{"op": "add", "path": "/meta:immutableTags-", "value": "union"}]
DESCRIPTOR_TYPES =["xdm:descriptorIdentity","xdm:alternateDisplayInfo","xdm:descriptorOneToOne","xdm:descriptorReferenceIdentity","xdm:descriptorDeprecated"]
def __init__(
self,
containerId: str = "tenant",
config: Union[dict,ConnectObject] = aepp.config.config_object,
header=aepp.config.header,
loggingObject: dict = None,
**kwargs,
):
"""
Copy the token and header and initiate the object to retrieve schema elements.
Arguments:
containerId : OPTIONAL : "tenant"(default) or "global"
loggingObject : OPTIONAL : logging object to log messages.
config : OPTIONAL : config object in the config module.
header : OPTIONAL : header object in the config module.
possible kwargs:
x-sandbox-name : name of the sandbox you want to use (default : "prod").
"""
if loggingObject is not None and sorted(
["level", "stream", "format", "filename", "file"]
) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}")
self.logger.setLevel(loggingObject["level"])
if type(loggingObject["format"]) == str:
formatter = logging.Formatter(loggingObject["format"])
elif type(loggingObject["format"]) == logging.Formatter:
formatter = loggingObject["format"]
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
if type(config) == dict: ## Supporting either default setup or passing a ConnectObject
config = config
elif type(config) == ConnectObject:
header = config.getConfigHeader()
config = config.getConfigObject()
self.connector = connector.AdobeRequest(
config=config,
header=header,
loggingEnabled=self.loggingEnabled,
logger=self.logger,
)
self.header = self.connector.header
self.header["Accept"] = "application/vnd.adobe.xed+json"
self.connector.header['Accept'] = "application/vnd.adobe.xed+json"
if kwargs.get('sandbox',None) is not None: ## supporting sandbox setup on class instanciation
self.sandbox = kwargs.get('sandbox')
self.connector.config["sandbox"] = kwargs.get('sandbox')
self.header.update({"x-sandbox-name":kwargs.get('sandbox')})
self.connector.header.update({"x-sandbox-name":kwargs.get('sandbox')})
else:
self.sandbox = self.connector.config["sandbox"]
self.header.update(**kwargs)
self.endpoint = (
aepp.config.endpoints["global"] + aepp.config.endpoints["schemas"]
)
self.container = containerId
self.data = _Data()
def getResource(
self,
endpoint: str = None,
params: dict = None,
format: str = "json",
save: bool = False,
**kwargs,
) -> dict:
"""
Template for requesting data with a GET method.
Arguments:
endpoint : REQUIRED : The URL to GET
params: OPTIONAL : dictionary of the params to fetch
format : OPTIONAL : Type of response returned. Possible values:
json : default
txt : text file
raw : a response object from the requests module
"""
if endpoint is None:
raise ValueError("Require an endpoint")
if self.loggingEnabled:
self.logger.debug(f"Starting getResource")
res = self.connector.getData(endpoint, params=params, format=format)
if save:
if format == "json":
aepp.saveFile(
module="catalog",
file=res,
filename=f"resource_{int(time.time())}",
type_file="json",
encoding=kwargs.get("encoding", "utf-8"),
)
elif format == "txt":
aepp.saveFile(
module="catalog",
file=res,
filename=f"resource_{int(time.time())}",
type_file="txt",
encoding=kwargs.get("encoding", "utf-8"),
)
else:
print(
"element is an object. Output is unclear. No save made.\nPlease save this element manually"
)
return res
def updateSandbox(self, sandbox: str = None) -> None:
"""
Update the sandbox used in your request.
Arguments:
sandbox : REQUIRED : name of the sandbox to be used
"""
if self.loggingEnabled:
self.logger.debug(f"Starting updateSandbox")
if not sandbox:
raise ValueError("`sandbox` must be specified in the arguments.")
self.header["x-sandbox-name"] = sandbox
self.sandbox = sandbox
def getStats(self) -> list:
"""
Returns a list of the last actions realized on the Schema for this instance of AEP.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getStats")
path = "/stats/"
res = self.connector.getData(self.endpoint + path, headers=self.header)
return res
def getTenantId(self) -> str:
"""
Return the tenantID for the AEP instance.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getTenantId")
res = self.getStats()
tenant = res["tenantId"]
return tenant
def getBehaviors(self)->list:
"""
Return a list of behaviors.
"""
path = "/global/behaviors"
res = self.connector.getData(self.endpoint + path)
data = res.get("results",[])
return data
def getBehavior(self,behaviorId:str=None)->dict:
"""
Retrieve a specific behavior for class creation.
Arguments:
behaviorId : REQUIRED : the behavior ID to be retrieved.
"""
if behaviorId is None:
raise Exception("Require a behavior ID")
path = f"/global/behaviors/{behaviorId}"
res = self.connector.getData(self.endpoint + path)
return res
def getSchemas(
self,
classFilter: str = None,
excludeAdhoc: bool = False,
output: str = 'raw',
**kwargs
) -> list:
"""
Returns the list of schemas retrieved for that instances in a "results" list.
Arguments:
classFilter : OPTIONAL : filter to a specific class.
Example :
https://ns.adobe.com/xdm/context/experienceevent
https://ns.adobe.com/xdm/context/profile
https://ns.adobe.com/xdm/data/adhoc
excludeAdhoc : OPTIONAL : exclude the adhoc schemas
output : OPTIONAL : either "raw" for a list or "df" for dataframe
Possible kwargs:
debug : if set to true, will print the result when error happens
format : if set to "xed", returns the full JSON for each resource (default : "xed-id" - short summary)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getSchemas")
path = f"/{self.container}/schemas/"
start = kwargs.get("start", 0)
params = {"start": start}
if classFilter is not None:
params["property"] = f"meta:intendedToExtend=={classFilter}"
if excludeAdhoc:
params["property"] = "meta:extends!=https://ns.adobe.com/xdm/data/adhoc"
verbose = kwargs.get("debug", False)
privateHeader = deepcopy(self.header)
format = kwargs.get("format", "xed-id")
privateHeader["Accept"] = f"application/vnd.adobe.{format}+json"
res = self.connector.getData(
self.endpoint + path, params=params, headers=privateHeader, verbose=verbose
)
if kwargs.get("debug", False):
if "results" not in res.keys():
print(res)
data = res.get("results",[])
if len(data) == 0:
return res
page = res.get("_page",{})
nextPage = page.get('next',None)
while nextPage is not None:
params['start'] = nextPage
res = self.connector.getData(
self.endpoint + path, params=params, headers=privateHeader, verbose=verbose
)
data += res.get('results',[])
page = res.get("_page",{'next':None})
nextPage = page.get('next',None)
self.data.schemas_id = {schem["title"]: schem["$id"] for schem in data}
self.data.schemas_altId = {
schem["title"]: schem["meta:altId"] for schem in data
}
if output == 'df':
df = pd.DataFrame(data)
return df
return data
def getSchema(
self,
schemaId: str = None,
version: int = 1,
full: bool = True,
desc: bool = False,
deprecated:bool=False,
schema_type: str = "xdm",
flat: bool = False,
save: bool = False,
**kwargs,
) -> dict:
"""
Get the Schema. Requires a schema id.
Response provided depends on the header set, you can change the Accept header with kwargs.
Arguments:
schemaId : REQUIRED : $id or meta:altId
version : OPTIONAL : Version of the Schema asked (default 1)
full : OPTIONAL : True (default) will return the full schema.False just the relationships.
desc : OPTIONAL : If set to True, return the identity used as the descriptor.
deprecated : OPTIONAL : Display the deprecated field from that schema
flat : OPTIONAL : If set to True, return a flat schema for pathing.
schema_type : OPTIONAL : set the type of output you want (xdm or xed) Default : xdm.
save : OPTIONAL : save the result in json file (default False)
Possible kwargs:
Accept : Accept header to change the type of response.
# /Schemas/lookup_schema
more details held here : https://www.adobe.io/apis/experienceplatform/home/api-reference.html
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getSchema")
privateHeader = deepcopy(self.header)
if schemaId is None:
raise Exception("Require a schemaId as a parameter")
update_full,update_desc,update_flat,update_deprecated="","","",""
if full:
update_full = "-full"
if desc:
update_desc = "-desc"
if flat:
update_flat = "-flat"
if deprecated:
update_deprecated = "-deprecated"
if schema_type != "xdm" and schema_type != "xed":
raise ValueError("schema_type parameter can only be xdm or xed")
if self.loggingEnabled:
self.logger.debug(f"Starting getSchema")
privateHeader['Accept'] = f"application/vnd.adobe.{schema_type}{update_full}{update_desc}{update_flat}{update_deprecated}+json; version={version}"
if kwargs.get("Accept", None) is not None:
privateHeader["Accept"] = kwargs.get("Accept", self.header["Accept"])
privateHeader["Accept-Encoding"] = "identity"
if schemaId.startswith("https://"):
from urllib import parse
schemaId = parse.quote_plus(schemaId)
path = f"/{self.container}/schemas/{schemaId}"
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if "title" not in res.keys() and "notext" not in privateHeader["Accept"]:
print("Issue with the request. See response.")
return res
if save:
aepp.saveFile(
module="schema", file=res, filename=res["title"], type_file="json"
)
if "title" in res.keys():
self.data.schemas[res["title"]] = res
else:
print("no title in the response. Not saved in the data object.")
return res
def getSchemaPaths(
self, schemaId: str, simplified: bool = True, save: bool = False
) -> list:
"""
Returns a list of the path available in your schema. BETA.
Arguments:
schemaId : REQUIRED : The schema you want to retrieve the paths for
simplified : OPTIONAL : Default True, only returns the list of paths for your schemas.
save : OPTIONAL : Save your schema paths in a file. Always the NOT simplified version.
"""
if schemaId is None:
raise Exception("Require a schemaId as a parameter")
if self.loggingEnabled:
self.logger.debug(f"Starting getSchemaPaths")
res = self.getSchema(schemaId, flat=True)
keys = res["properties"].keys()
paths = [
key.replace("/", ".").replace("xdm:", "").replace("@", "_") for key in keys
]
if save:
aepp.saveFile(
module="schema",
file=res,
filename=f"{res['title']}_paths",
type_file="json",
)
if simplified:
return paths
return res
def getSchemaSample(
self, schemaId: str = None, save: bool = False, version: int = 1
) -> dict:
"""
Generate a sample data from a schema id.
Arguments:
schema_id : REQUIRED : The schema ID for the sample data to be created.
save : OPTIONAL : save the result in json file (default False)
version : OPTIONAL : version of the schema to request
"""
privateHeader = deepcopy(self.header)
import random
if self.loggingEnabled:
self.logger.debug(f"Starting getSchemaSample")
rand_number = random.randint(1, 10e10)
if schemaId is None:
raise Exception("Require an ID for the schema")
if schemaId.startswith("https://"):
from urllib import parse
schemaId = parse.quote_plus(schemaId)
path = f"/rpc/sampledata/{schemaId}"
accept_update = f"application/vnd.adobe.xed+json; version={version}"
privateHeader["Accept"] = accept_update
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if save:
schema = self.getSchema(schemaId=schemaId, full=False)
aepp.saveFile(
module="schema",
file=res,
filename=f"{schema['title']}_{rand_number}",
type_file="json",
)
return res
def patchSchema(self, schemaId: str = None, changes: list = None, **kwargs) -> dict:
"""
Enable to patch the Schema with operation.
Arguments:
schema_id : REQUIRED : $id or meta:altId
change : REQUIRED : List of changes that need to take place.
Example:
[
{
"op": "add",
"path": "/allOf",
"value": {'$ref': 'https://ns.adobe.com/emeaconsulting/mixins/fb5b3cd49707d27367b93e07d1ac1f2f7b2ae8d051e65f8d',
'type': 'object',
'meta:xdmType': 'object'}
}
]
information : http://jsonpatch.com/
"""
if schemaId is None:
raise Exception("Require an ID for the schema")
if type(changes) == dict:
changes = list(changes)
if schemaId.startswith("https://"):
from urllib import parse
schemaId = parse.quote_plus(schemaId)
if self.loggingEnabled:
self.logger.debug(f"Starting patchSchema")
path = f"/{self.container}/schemas/{schemaId}"
res = self.connector.patchData(
self.endpoint + path, data=changes)
return res
def putSchema(self, schemaId: str = None, schemaDef: dict = None, **kwargs) -> dict:
"""
A PUT request essentially re-writes the schema, therefore the request body must include all fields required to create (POST) a schema.
This is especially useful when updating a lot of information in the schema at once.
Arguments:
schemaId : REQUIRED : $id or meta:altId
schemaDef : REQUIRED : dictionary of the new schema.
It requires a allOf list that contains all the attributes that are required for creating a schema.
#/Schemas/replace_schema
More information on : https://www.adobe.io/apis/experienceplatform/home/api-reference.html
"""
if schemaId is None:
raise Exception("Require an ID for the schema")
if schemaId.startswith("https://"):
from urllib import parse
schemaId = parse.quote_plus(schemaId)
if self.loggingEnabled:
self.logger.debug(f"Starting putSchema")
path = f"/{self.container}/schemas/{schemaId}"
res = self.connector.putData(
self.endpoint + path, data=schemaDef, headers=self.header
)
return res
def deleteSchema(self, schemaId: str = None, **kwargs) -> str:
"""
Delete the request
Arguments:
schema_id : REQUIRED : $id or meta:altId
It requires a allOf list that contains all the attributes that are required for creating a schema.
#/Schemas/replace_schema
More information on : https://www.adobe.io/apis/experienceplatform/home/api-reference.html
"""
if schemaId is None:
raise Exception("Require an ID for the schema")
if schemaId.startswith("https://"):
from urllib import parse
schemaId = parse.quote_plus(schemaId)
if self.loggingEnabled:
self.logger.debug(f"Starting deleteSchema")
path = f"/{self.container}/schemas/{schemaId}"
res = self.connector.deleteData(self.endpoint + path)
return res
def createSchema(self, schema: dict = None) -> dict:
"""
Create a Schema based on the data that are passed in the Argument.
Arguments:
schema : REQUIRED : The schema definition that needs to be created.
"""
path = f"/{self.container}/schemas/"
if type(schema) != dict:
raise TypeError("Expecting a dictionary")
if "allOf" not in schema.keys():
raise Exception(
"The schema must include an ‘allOf’ attribute (a list) referencing the $id of the base class the schema will implement."
)
if self.loggingEnabled:
self.logger.debug(f"Starting createSchema")
res = self.connector.postData(
self.endpoint + path, data=schema
)
return res
def createExperienceEventSchema(
self,
name: str = None,
mixinIds: Union[list, dict] = None,
fieldGroupIds : Union[list, dict] = None,
description: str = "",
) -> dict:
"""
Create an ExperienceEvent schema based on the list mixin ID provided.
Arguments:
name : REQUIRED : Name of your schema
mixinIds : REQUIRED : dict of mixins $id and their type ["object" or "array"] to create the ExperienceEvent schema
Example {'mixinId1':'object','mixinId2':'array'}
if just a list is passed, it infers a 'object type'
fieldGroupIds : REQUIRED : List of fieldGroup $id to create the Indiviudal Profile schema
Example {'fgId1':'object','fgId2':'array'}
if just a list is passed, it infers a 'object type'
description : OPTIONAL : Schema description
"""
if name is None:
raise ValueError("Require a name")
if mixinIds is None and fieldGroupIds is None:
raise ValueError("Require a mixin ids or a field group id")
if mixinIds is None and fieldGroupIds is not None:
mixinIds = fieldGroupIds
obj = {
"title": name,
"description": description,
"allOf": [
{
"$ref": "https://ns.adobe.com/xdm/context/experienceevent",
"type": "object",
"meta:xdmType": "object",
}
],
}
if type(mixinIds) == list:
for mixin in mixinIds:
obj["allOf"].append(
{"$ref": mixin, "type": "object", "meta:xdmType": "object"}
)
if type(mixinIds) == dict:
for mixin in mixinIds:
if mixinIds[mixin] == "array":
subObj = {
"$ref": mixin,
"type": mixinIds[mixin],
"meta:xdmType": mixinIds[mixin],
"items": {"$ref": mixin},
}
obj["allOf"].append(subObj)
else:
subObj = {
"$ref": mixin,
"type": mixinIds[mixin],
"meta:xdmType": mixinIds[mixin],
}
obj["allOf"].append(subObj)
if self.loggingEnabled:
self.logger.debug(f"Starting createExperienceEventSchema")
res = self.createSchema(obj)
return res
def createProfileSchema(
self,
name: str = None,
mixinIds: Union[list, dict] = None,
fieldGroupIds : Union[list, dict] = None,
description: str = "",
**kwargs
) -> dict:
"""
Create an IndividualProfile schema based on the list mixin ID provided.
Arguments:
name : REQUIRED : Name of your schema
mixinIds : REQUIRED : List of mixins $id to create the Indiviudal Profile schema
Example {'mixinId1':'object','mixinId2':'array'}
if just a list is passed, it infers a 'object type'
fieldGroupIds : REQUIRED : List of fieldGroup $id to create the Indiviudal Profile schema
Example {'fgId1':'object','fgId2':'array'}
if just a list is passed, it infers a 'object type'
description : OPTIONAL : Schema description
"""
if name is None:
raise ValueError("Require a name")
if mixinIds is None and fieldGroupIds is None:
raise ValueError("Require a mixin ids or a field group id")
if mixinIds is None and fieldGroupIds is not None:
mixinIds = fieldGroupIds
obj = {
"title": name,
"description": description,
"allOf": [
{
"$ref": "https://ns.adobe.com/xdm/context/profile",
"type": "object",
"meta:xdmType": "object",
}
],
}
if type(mixinIds) == list:
for mixin in mixinIds:
obj["allOf"].append(
{"$ref": mixin, "type": "object", "meta:xdmType": "object"}
)
if type(mixinIds) == dict:
for mixin in mixinIds:
if mixinIds[mixin] == "array":
subObj = {
"$ref": mixin,
"type": mixinIds[mixin],
"meta:xdmType": mixinIds[mixin],
"items": {"$ref": mixin},
}
obj["allOf"].append(subObj)
else:
subObj = {
"$ref": mixin,
"type": mixinIds[mixin],
"meta:xdmType": mixinIds[mixin],
}
obj["allOf"].append(subObj)
if self.loggingEnabled:
self.logger.debug(f"Starting createProfileSchema")
res = self.createSchema(obj)
return res
def addFieldGroupToSchema(self,schemaId:str=None,fieldGroupIds:Union[list,dict]=None)->dict:
"""
Take the list of field group ID to extend the schema.
Return the definition of the new schema with added field groups.
Arguments
schemaId : REQUIRED : The ID of the schema (alt:metaId or $id)
fieldGroupIds : REQUIRED : The IDs of the fields group to add. It can be a list or dictionary.
Example {'fgId1':'object','fgId2':'array'}
if just a list is passed, it infers a 'object type'
"""
if schemaId is None:
raise ValueError("Require a schema ID")
if fieldGroupIds is None:
raise ValueError("Require a list of field group to add")
schemaDef = self.getSchema(schemaId,full=False)
allOf = schemaDef.get('allOf',[])
if type(allOf) != list:
raise TypeError("Expecting a list for 'allOf' key")
if type(fieldGroupIds) == list:
for mixin in fieldGroupIds:
allOf.append(
{"$ref": mixin, "type": "object", "meta:xdmType": "object"}
)
if type(fieldGroupIds) == dict:
for mixin in fieldGroupIds:
if fieldGroupIds[mixin] == "array":
subObj = {
"$ref": mixin,
"type": fieldGroupIds[mixin],
"meta:xdmType": fieldGroupIds[mixin],
"items": {"$ref": mixin},
}
allOf.append(subObj)
else:
subObj = {
"$ref": mixin,
"type": fieldGroupIds[mixin],
"meta:xdmType": fieldGroupIds[mixin],
}
allOf.append(subObj)
res = self.putSchema(schemaId,schemaDef)
return res
def getClasses(self,
prop:str=None,
orderBy:str=None,
limit:int=300,
output:str='raw',
excludeAdhoc: bool = False,
**kwargs):
"""
Return the classes of the AEP Instances.
Arguments:
prop : OPTIONAL : A comma-separated list of top-level object properties to be returned in the response.
For example, property=meta:intendedToExtend==https://ns.adobe.com/xdm/context/profile
oderBy : OPTIONAL : Sort the listed resources by specified fields. For example orderby=title
limit : OPTIONAL : Number of resources to return per request, default 300 - the max.
excludeAdhoc : OPTIONAL : Exlcude the Adhoc classes that have been created.
output : OPTIONAL : type of output, default "raw", can be "df" for dataframe.
kwargs:
debug : if set to True, will print result for errors
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getClasses")
privateHeader = deepcopy(self.header)
privateHeader.update({"Accept": "application/vnd.adobe.xdm-id+json"})
params = {"limit":limit}
if excludeAdhoc:
params["property"] = "meta:extends!=https://ns.adobe.com/xdm/data/adhoc"
if prop is not None:
if 'property' not in params.keys():
params["property"] = prop
else:
params["property"] += prop
if orderBy is not None:
params['orderby'] = orderBy
path = f"/{self.container}/classes/"
verbose = kwargs.get("verbose", False)
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
if kwargs.get("debug", False):
if "results" not in res.keys():
print(res)
data = res["results"]
page = res["_page"]
while page["next"] is not None:
params["start"]= page["next"]
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
data += res["results"]
page = res["_page"]
if output=="df":
df = pd.DataFrame(data)
return df
return data
def getClassesGlobal(self,
prop:str=None,
orderBy:str=None,
limit:int=300,
output:str='raw',
**kwargs):
"""
Return the classes of the AEP Instances.
Arguments:
prop : OPTIONAL : A comma-separated list of top-level object properties to be returned in the response.
For example, property=meta:intendedToExtend==https://ns.adobe.com/xdm/context/profile
oderBy : OPTIONAL : Sort the listed resources by specified fields. For example orderby=title
limit : OPTIONAL : Number of resources to return per request, default 300 - the max.
output : OPTIONAL : type of output, default "raw", can be "df" for dataframe.
kwargs:
debug : if set to True, will print result for errors
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getClasses")
privateHeader = deepcopy(self.header)
privateHeader.update({"Accept": "application/vnd.adobe.xdm-id+json"})
params = {"limit":limit}
if prop is not None:
if 'property' not in params.keys():
params["property"] = prop
else:
params["property"] += prop
if orderBy is not None:
params['orderby'] = orderBy
path = f"/global/classes/"
verbose = kwargs.get("verbose", False)
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
if kwargs.get("debug", False):
if "results" not in res.keys():
print(res)
data = res["results"]
page = res["_page"]
while page["next"] is not None:
params["start"]= page["next"]
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
data += res["results"]
page = res["_page"]
if output=="df":
df = pd.DataFrame(data)
return df
return data
def getClass(
self,
classId: str = None,
full: bool = True,
desc: bool = False,
deprecated: bool = False,
xtype : str = "xdm",
version: int = 1,
save: bool = False,
):
"""
Return a specific class.
Arguments:
classId : REQUIRED : the meta:altId or $id from the class
full : OPTIONAL : True (default) will return the full schema.False just the relationships.
desc : OPTIONAL : If set to True, return the descriptors.
deprecated : OPTIONAL : Display the deprecated field from that schema (False by default)
xtype : OPTIONAL : either "xdm" (default) or "xed".
version : OPTIONAL : the version of the class to retrieve.
save : OPTIONAL : To save the result of the request in a JSON file.
"""
privateHeader = deepcopy(self.header)
if classId is None:
raise Exception("Require a class_id")
if classId.startswith("https://"):
from urllib import parse
classId = parse.quote_plus(classId)
if self.loggingEnabled:
self.logger.debug(f"Starting getClass")
privateHeader["Accept-Encoding"] = "identity"
updateFull,updateDesc, updateDeprecated = "","",""
if full:
updateFull = "-full"
if desc:
updateDesc = "-desc"
if deprecated:
updateDeprecated = "-deprecated"
privateHeader.update(
{"Accept": f"application/vnd.adobe.{xtype}{updateFull}{updateDesc}{updateDeprecated}+json; version=" + str(version)}
)
path = f"/{self.container}/classes/{classId}"
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if save:
aepp.saveFile(
module="schema", file=res, filename=res["title"], type_file="json"
)
return res
def createClass(self, class_obj: dict = None,title:str=None, class_template:str=None, **kwargs):
"""
Create a class based on the object pass. It should include the "allOff" element.
Arguments:
class_obj : REQUIRED : You can pass a complete object to create a class, include a title and a "allOf" element.
title : REQUIRED : Title of the class if you want to pass individual elements
class_template : REQUIRED : type of behavior for the class, either "https://ns.adobe.com/xdm/data/record" or "https://ns.adobe.com/xdm/data/time-series"
Possible kwargs:
description : To add a description to a class.
"""
path = f"/{self.container}/classes/"
if class_obj is not None:
if type(class_obj) != dict:
raise TypeError("Expecting a dictionary")
if "allOf" not in class_obj.keys():
raise Exception(
"The class object must include an ‘allOf’ attribute (a list) referencing the $id of the base class the schema will implement."
)
elif class_obj is None and title is not None and class_template is not None:
class_obj = {
"type": "object",
"title": title,
"description": "Generated by aepp",
"allOf": [
{
"$ref": class_template
}
]
}
if kwargs.get("descriptor","") != "":
class_obj['descriptor'] = kwargs.get("descriptor")
if self.loggingEnabled:
self.logger.debug(f"Starting createClass")
res = self.connector.postData(
self.endpoint + path, data=class_obj
)
return res
def putClass(self,classId:str=None,class_obj:dict=None)->dict:
"""
Replace the current definition with the new definition.
Arguments:
classId : REQUIRED : The class to be updated ($id or meta:altId)
class_obj : REQUIRED : The dictionary defining the new class definition
"""
if classId is None:
raise Exception("Require a classId")
if classId.startswith("https://"):
from urllib import parse
classId = parse.quote_plus(classId)
if class_obj is None:
raise Exception("Require a new definition for the class")
if self.loggingEnabled:
self.logger.debug(f"Starting putClass")
path = f"/{self.container}/classes/{classId}"
res = self.connector.putData(self.endpoint + path,data=class_obj)
return res
def patchClass(self,classId:str=None,operation:list=None)->dict:
"""
Patch a class with the operation specified such as:
update = [{
"op": "replace",
"path": "title",
"value": "newTitle"
}]
Possible operation value : "replace", "remove", "add"
Arguments:
classId : REQUIRED : The class to be updated ($id or meta:altId)
operation : REQUIRED : List of operation to realize on the class
"""
if classId is None:
raise Exception("Require a classId")
if classId.startswith("https://"):
from urllib import parse
classId = parse.quote_plus(classId)
if operation is None or type(operation) != list:
raise Exception("Require a list of operation for the class")
if self.loggingEnabled:
self.logger.debug(f"Starting patchClass")
path = f"/{self.container}/classes/{classId}"
res = self.connector.patchData(self.endpoint + path,data=operation)
return res
def deleteClass(self,classId: str = None)->str:
"""
Delete a class based on the its ID.
Arguments:
classId : REQUIRED : The class to be deleted ($id or meta:altId)
"""
if classId is None:
raise Exception("Require a classId")
if classId.startswith("https://"):
from urllib import parse
classId = parse.quote_plus(classId)
if self.loggingEnabled:
self.logger.debug(f"Starting patchClass")
path = f"/{self.container}/classes/{classId}"
res = self.connector.deleteData(self.endpoint + path)
return res
def getFieldGroups(self, format: str = "xdm", **kwargs) -> list:
"""
returns the fieldGroups of the account.
Arguments:
format : OPTIONAL : either "xdm" or "xed" format
kwargs:
debug : if set to True, will print result for errors
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getFieldGroups")
path = f"/{self.container}/fieldgroups/"
start = kwargs.get("start", 0)
params = {"start": start}
verbose = kwargs.get("debug", False)
privateHeader = deepcopy(self.header)
privateHeader["Accept"] = f"application/vnd.adobe.{format}+json"
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
if kwargs.get("verbose", False):
if "results" not in res.keys():
print(res)
data = res["results"]
page = res.get("_page",{})
nextPage = page.get('next',None)
while nextPage is not None:
params['start'] = nextPage
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
data += res.get("results")
page = res.get("_page",{})
nextPage = page.get('next',None)
self.data.fieldGroups_id = {mix["title"]: mix["$id"] for mix in data}
self.data.fieldGroups_altId = {mix["title"]: mix["meta:altId"] for mix in data}
return data
def getFieldGroupsGlobal(self,format: str = "xdm",output:str='raw', **kwargs)->list:
"""
returns the global fieldGroups of the account.
Arguments:
format : OPTIONAL : either "xdm" or "xed" format
output : OPTIONAL : either "raw" (default) or "df" for dataframe
kwargs:
debug : if set to True, will print result for errors
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getFieldGroups")
path = f"/global/fieldgroups/"
start = kwargs.get("start", 0)
params = {"start": start}
verbose = kwargs.get("debug", False)
privateHeader = deepcopy(self.header)
privateHeader["Accept"] = f"application/vnd.adobe.{format}+json"
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
if kwargs.get("verbose", False):
if "results" not in res.keys():
print(res)
data = res["results"]
page = res.get("_page",{})
nextPage = page.get('next',None)
while nextPage is not None:
params['start'] = nextPage
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params, verbose=verbose
)
data += res.get("results")
page = res.get("_page",{})
nextPage = page.get('next',None)
self.data.fieldGroups_id = {mix["title"]: mix["$id"] for mix in data}
self.data.fieldGroups_altId = {mix["title"]: mix["meta:altId"] for mix in data}
if output == 'df':
df = pd.DataFrame(data)
return df
return data
# def getMixin(
# self,
# mixinId: str = None,
# version: int = 1,
# full: bool = True,
# save: bool = False,
# ):
# """
# Returns a specific mixin / field group.
# Arguments:
# mixinId : REQUIRED : meta:altId or $id
# version : OPTIONAL : version of the mixin
# full : OPTIONAL : True (default) will return the full schema.False just the relationships.
# """
# if mixinId.startswith("https://"):
# from urllib import parse
# mixinId = parse.quote_plus(mixinId)
# if self.loggingEnabled:
# self.logger.debug(f"Starting getMixin")
# privateHeader = deepcopy(self.header)
# privateHeader["Accept-Encoding"] = "identity"
# if full:
# accept_full = "-full"
# else:
# accept_full = ""
# update_accept = (
# f"application/vnd.adobe.xed{accept_full}+json; version={version}"
# )
# privateHeader.update({"Accept": update_accept})
# path = f"/{self.container}/mixins/{mixinId}"
# res = self.connector.getData(self.endpoint + path, headers=privateHeader)
# if save:
# aepp.saveFile(
# module="schema", file=res, filename=res["title"], type_file="json"
# )
# if "title" in res.keys():
# self.data.mixins[res["title"]] = res
# return res
def getFieldGroup(
self,
fieldGroupId: str = None,
version: int = 1,
full: bool = True,
desc: bool = False,
type: str = 'xed',
flat: bool = False,
deprecated: bool = False,
save: bool = False,
):
"""
Returns a specific mixin / field group.
Arguments:
fieldGroupId : REQUIRED : meta:altId or $id
version : OPTIONAL : version of the mixin
full : OPTIONAL : True (default) will return the full schema.False just the relationships
desc : OPTIONAL : Add descriptor of the field group
type : OPTIONAL : Either "xed" (default) or "xdm"
flat : OPTIONAL : if the fieldGroup is flat (false by default)
deprecated : OPTIONAL : Display the deprecated fields from that schema
save : Save the fieldGroup to a JSON file
"""
if fieldGroupId.startswith("https://"):
from urllib import parse
fieldGroupId = parse.quote_plus(fieldGroupId)
if self.loggingEnabled:
self.logger.debug(f"Starting getFieldGroup")
privateHeader = deepcopy(self.header)
privateHeader["Accept-Encoding"] = "identity"
accept_full, accept_desc,accept_flat,accept_deprec= "","","",""
if full:
accept_full = "-full"
if desc:
accept_desc = "-desc"
if flat:
accept_flat = "-flat"
if deprecated:
accept_deprec = "-deprecated"
update_accept = (
f"application/vnd.adobe.{type}{accept_full}{accept_desc}{accept_flat}{accept_deprec}+json; version={version}"
)
privateHeader.update({"Accept": update_accept})
path = f"/{self.container}/fieldgroups/{fieldGroupId}"
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if save:
aepp.saveFile(
module="schema", file=res, filename=res["title"], type_file="json"
)
if "title" in res.keys():
self.data.fieldGroups[res["title"]] = res
return res
def copyMixin(
self, mixin: dict = None, tenantId: str = None, title: str = None
) -> dict:
"""
Copy the dictionary returned by getMixin to the only required elements for copying it over.
Arguments:
mixin : REQUIRED : the object retrieved from the getMixin.
tenantId : OPTIONAL : if you want to change the tenantId (if None doesn't rename)
name : OPTIONAL : rename your mixin (if None, doesn't rename it)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting copyMixin")
if mixin is None:
raise ValueError("Require a mixin object")
mixin_obj = deepcopy(mixin)
oldTenant = mixin_obj["meta:tenantNamespace"]
if "definitions" in mixin_obj.keys():
obj = {
"type": mixin_obj["type"],
"title": title or mixin_obj["title"],
"description": mixin_obj["description"],
"meta:intendedToExtend": mixin_obj["meta:intendedToExtend"],
"definitions": mixin_obj.get("definitions"),
"allOf": mixin_obj.get(
"allOf",
[
{
"$ref": "#/definitions/property",
"type": "object",
"meta:xdmType": "object",
}
],
),
}
elif "properties" in mixin_obj.keys():
obj = {
"type": mixin_obj["type"],
"title": title or mixin_obj["title"],
"description": mixin_obj["description"],
"meta:intendedToExtend": mixin_obj["meta:intendedToExtend"],
"definitions": {
"property": {
"properties": mixin_obj["properties"],
"type": "object",
"['meta:xdmType']": "object",
}
},
"allOf": mixin_obj.get(
"allOf",
[
{
"$ref": "#/definitions/property",
"type": "object",
"meta:xdmType": "object",
}
],
),
}
if tenantId is not None:
if tenantId.startswith("_") == False:
tenantId = f"_{tenantId}"
obj["definitions"]["property"]["properties"][tenantId] = obj["definitions"][
"property"
]["properties"][oldTenant]
del obj["definitions"]["property"]["properties"][oldTenant]
return obj
def copyFieldGroup(
self, fieldGroup: dict = None, tenantId: str = None, title: str = None
) -> dict:
"""
Copy the dictionary returned by getMixin to the only required elements for copying it over.
Arguments:
fieldGroup : REQUIRED : the object retrieved from the getFieldGroup.
tenantId : OPTIONAL : if you want to change the tenantId (if None doesn't rename)
name : OPTIONAL : rename your mixin (if None, doesn't rename it)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting copyFieldGroup")
if fieldGroup is None:
raise ValueError("Require a mixin object")
mixin_obj = deepcopy(fieldGroup)
oldTenant = mixin_obj["meta:tenantNamespace"]
if "definitions" in mixin_obj.keys():
obj = {
"type": mixin_obj["type"],
"title": title or mixin_obj["title"],
"description": mixin_obj["description"],
"meta:intendedToExtend": mixin_obj["meta:intendedToExtend"],
"definitions": mixin_obj.get("definitions"),
"allOf": mixin_obj.get(
"allOf",
[
{
"$ref": "#/definitions/property",
"type": "object",
"meta:xdmType": "object",
}
],
),
}
elif "properties" in mixin_obj.keys():
obj = {
"type": mixin_obj["type"],
"title": title or mixin_obj["title"],
"description": mixin_obj["description"],
"meta:intendedToExtend": mixin_obj["meta:intendedToExtend"],
"definitions": {
"property": {
"properties": mixin_obj["properties"],
"type": "object",
"['meta:xdmType']": "object",
}
},
"allOf": mixin_obj.get(
"allOf",
[
{
"$ref": "#/definitions/property",
"type": "object",
"meta:xdmType": "object",
}
],
),
}
if tenantId is not None:
if tenantId.startswith("_") == False:
tenantId = f"_{tenantId}"
if 'property' in obj["definitions"].keys():
obj["definitions"]["property"]["properties"][tenantId] = obj["definitions"]["property"]["properties"][oldTenant]
del obj["definitions"]["property"]["properties"][oldTenant]
elif 'customFields' in obj["definitions"].keys():
obj["definitions"]["customFields"]["properties"][tenantId] = obj["definitions"]["customFields"]["properties"][oldTenant]
del obj["definitions"]["customFields"]["properties"][oldTenant]
return obj
def createMixin(self, mixin_obj: dict = None) -> dict:
"""
Create a mixin based on the dictionary passed.
Arguments :
mixin_obj : REQUIRED : the object required for creating the mixin.
Should contain title, type, definitions
"""
if mixin_obj is None:
raise Exception("Require a mixin object")
if (
"title" not in mixin_obj
or "type" not in mixin_obj
or "definitions" not in mixin_obj
):
raise AttributeError(
"Require to have at least title, type, definitions set in the object."
)
if self.loggingEnabled:
self.logger.debug(f"Starting createMixin")
path = f"/{self.container}/mixins/"
res = self.connector.postData(
self.endpoint + path, data=mixin_obj)
return res
def createFieldGroup(self, fieldGroup_obj: dict = None) -> dict:
"""
Create a mixin based on the dictionary passed.
Arguments :
fieldGroup_obj : REQUIRED : the object required for creating the field group.
Should contain title, type, definitions
"""
if fieldGroup_obj is None:
raise Exception("Require a mixin object")
if (
"title" not in fieldGroup_obj
or "type" not in fieldGroup_obj
or "definitions" not in fieldGroup_obj
):
raise AttributeError(
"Require to have at least title, type, definitions set in the object."
)
if self.loggingEnabled:
self.logger.debug(f"Starting createFieldGroup")
path = f"/{self.container}/fieldgroups/"
res = self.connector.postData(
self.endpoint + path, data=fieldGroup_obj)
return res
def deleteMixin(self, mixinId: str = None):
"""
Arguments:
mixinId : meta:altId or $id
"""
if mixinId is None:
raise Exception("Require an ID")
if mixinId.startswith("https://"):
from urllib import parse
mixinId = parse.quote_plus(mixinId)
if self.loggingEnabled:
self.logger.debug(f"Starting deleteMixin")
path = f"/{self.container}/mixins/{mixinId}"
res = self.connector.deleteData(self.endpoint + path)
return res
def deleteFieldGroup(self, fieldGroupId: str = None):
"""
Arguments:
fieldGroupId : meta:altId or $id
"""
if fieldGroupId is None:
raise Exception("Require an ID")
if fieldGroupId.startswith("https://"):
from urllib import parse
fieldGroupId = parse.quote_plus(fieldGroupId)
if self.loggingEnabled:
self.logger.debug(f"Starting deleteFieldGroup")
path = f"/{self.container}/fieldgroups/{fieldGroupId}"
res = self.connector.deleteData(self.endpoint + path)
return res
def patchMixin(self, mixinId: str = None, changes: list = None):
"""
Update the mixin with the operation described in the changes.
Arguments:
mixinId : REQUIRED : meta:altId or $id
changes : REQUIRED : dictionary on what to update on that mixin.
Example:
[
{
"op": "add",
"path": "/allOf",
"value": {'$ref': 'https://ns.adobe.com/emeaconsulting/mixins/fb5b3cd49707d27367b93e07d1ac1f2f7b2ae8d051e65f8d',
'type': 'object',
'meta:xdmType': 'object'}
}
]
information : http://jsonpatch.com/
"""
if mixinId is None or changes is None:
raise Exception("Require an ID and changes")
if mixinId.startswith("https://"):
from urllib import parse
mixinId = parse.quote_plus(mixinId)
if self.loggingEnabled:
self.logger.debug(f"Starting patchMixin")
path = f"/{self.container}/mixins/{mixinId}"
if type(changes) == dict:
changes = list(changes)
res = self.connector.patchData(
self.endpoint + path, data=changes)
return res
def patchFieldGroup(self, fieldGroupId: str = None, changes: list = None):
"""
Update the mixin with the operation described in the changes.
Arguments:
fieldGroupId : REQUIRED : meta:altId or $id
changes : REQUIRED : dictionary on what to update on that mixin.
Example:
[
{
"op": "add",
"path": "/allOf",
"value": {'$ref': 'https://ns.adobe.com/emeaconsulting/mixins/fb5b3cd49707d27367b93e07d1ac1f2f7b2ae8d051e65f8d',
'type': 'object',
'meta:xdmType': 'object'}
}
]
information : http://jsonpatch.com/
"""
if fieldGroupId is None or changes is None:
raise Exception("Require an ID and changes")
if fieldGroupId.startswith("https://"):
from urllib import parse
fieldGroupId = parse.quote_plus(fieldGroupId)
if self.loggingEnabled:
self.logger.debug(f"Starting patchFieldGroup")
path = f"/{self.container}/fieldgroups/{fieldGroupId}"
if type(changes) == dict:
changes = list(changes)
res = self.connector.patchData(
self.endpoint + path, data=changes)
return res
def putMixin(self, mixinId: str = None, mixinObj: dict = None, **kwargs) -> dict:
"""
A PUT request essentially re-writes the schema, therefore the request body must include all fields required to create (POST) a schema.
This is especially useful when updating a lot of information in the schema at once.
Arguments:
mixinId : REQUIRED : $id or meta:altId
mixinObj : REQUIRED : dictionary of the new schema.
It requires a allOf list that contains all the attributes that are required for creating a schema.
#/Schemas/replace_schema
More information on : https://www.adobe.io/apis/experienceplatform/home/api-reference.html
"""
if mixinId is None:
raise Exception("Require an ID for the schema")
if mixinId.startswith("https://"):
from urllib import parse
mixinId = parse.quote_plus(mixinId)
if self.loggingEnabled:
self.logger.debug(f"Starting putMixin")
path = f"/{self.container}/mixins/{mixinId}"
res = self.connector.putData(
self.endpoint + path, data=mixinObj)
return res
def putFieldGroup(
self, fieldGroupId: str = None, fieldGroupObj: dict = None, **kwargs
) -> dict:
"""
A PUT request essentially re-writes the schema, therefore the request body must include all fields required to create (POST) a schema.
This is especially useful when updating a lot of information in the schema at once.
Arguments:
fieldGroupId : REQUIRED : $id or meta:altId
fieldGroupObj : REQUIRED : dictionary of the new Field Group.
It requires a allOf list that contains all the attributes that are required for creating a schema.
#/Schemas/replace_schema
More information on : https://www.adobe.io/apis/experienceplatform/home/api-reference.html
"""
if fieldGroupId is None:
raise Exception("Require an ID for the schema")
if fieldGroupId.startswith("https://"):
from urllib import parse
fieldGroupId = parse.quote_plus(fieldGroupId)
if self.loggingEnabled:
self.logger.debug(f"Starting putMixin")
path = f"/{self.container}/fieldgroups/{fieldGroupId}"
res = self.connector.putData(
self.endpoint + path, data=fieldGroupObj)
return res
def getUnions(self, **kwargs):
"""
Get all of the unions that has been set for the tenant.
Returns a dictionary.
Possibility to add option using kwargs
"""
path = f"/{self.container}/unions"
params = {}
if len(kwargs) > 0:
for key in kwargs.key():
if key == "limit":
if int(kwargs["limit"]) > 500:
kwargs["limit"] = 500
params[key] = kwargs.get(key, "")
if self.loggingEnabled:
self.logger.debug(f"Starting getUnions")
res = self.connector.getData(
self.endpoint + path, params=params)
data = res["results"] # issue when requesting directly results.
return data
def getUnion(self, union_id: str = None, version: int = 1):
"""
Get a specific union type. Returns a dictionnary
Arguments :
union_id : REQUIRED : meta:altId or $id
version : OPTIONAL : version of the union schema required.
"""
if union_id is None:
raise Exception("Require an ID")
if self.loggingEnabled:
self.logger.debug(f"Starting getUnion")
if union_id.startswith("https://"):
from urllib import parse
union_id = parse.quote_plus(union_id)
path = f"/{self.container}/unions/{union_id}"
privateHeader = deepcopy(self.header)
privateHeader.update(
{"Accept": "application/vnd.adobe.xdm-full+json; version=" + str(version)}
)
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
return res
def getXDMprofileSchema(self):
"""
Returns a list of all schemas that are part of the XDM Individual Profile.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getXDMprofileSchema")
path = "/tenant/schemas?property=meta:immutableTags==union&property=meta:class==https://ns.adobe.com/xdm/context/profile"
res = self.connector.getData(self.endpoint + path)
return res
def getDataTypes(self, **kwargs):
"""
Get the data types from a container.
Possible kwargs:
properties : str :limit the amount of properties return by comma separated list.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getDataTypes")
path = f"/{self.container}/datatypes/"
params = {}
if kwargs.get("properties", None) is not None:
params = {"properties": kwargs.get("properties", "title,$id")}
privateHeader = deepcopy(self.header)
privateHeader.update({"Accept": "application/vnd.adobe.xdm-id+json"})
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params
)
data = res["results"]
page = res.get("_page",{})
nextPage = page.get('next',None)
while nextPage is not None:
res = self.connector.getData(
self.endpoint + path, headers=privateHeader, params=params
)
data += res.get("results",[])
page = res.get("_page",{})
nextPage = page.get('next',None)
return data
def getDataType(
self, dataTypeId: str = None, version: str = "1", save: bool = False
):
"""
Retrieve a specific data type id
Argument:
dataTypeId : REQUIRED : The resource meta:altId or URL encoded $id URI.
"""
if dataTypeId is None:
raise Exception("Require a dataTypeId")
if dataTypeId.startswith("https://"):
from urllib import parse
dataTypeId = parse.quote_plus(dataTypeId)
if self.loggingEnabled:
self.logger.debug(f"Starting getDataType")
privateHeader = deepcopy(self.header)
privateHeader.update(
{"Accept": "application/vnd.adobe.xdm-full+json; version=" + version}
)
path = f"/{self.container}/datatypes/{dataTypeId}"
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if save:
aepp.saveFile(
module="schema", file=res, filename=res["title"], type_file="json"
)
return res
def createDataType(self, dataTypeObj: dict = None)->dict:
"""
Create Data Type based on the object passed.
"""
if dataTypeObj is None:
raise Exception("Require a dictionary to create the Data Type")
if self.loggingEnabled:
self.logger.debug(f"Starting createDataTypes")
path = f"/{self.container}/datatypes/"
res = self.connector.postData(
self.endpoint + path, data=dataTypeObj)
return res
def patchDataType(self,dataTypeId:str=None,operations:list=None)->dict:
"""
Patch an existing data type with the operation provided.
Arguments:
dataTypeId : REQUIRED : The Data Type ID to be used
operations : REQUIRED : The list of operation to be applied on that Data Type.
Example : '[
{
"op": "replace",
"path": "/loyaltyLevel/meta:enum",
"value": {
"ultra-platinum": "Ultra Platinum",
"platinum": "Platinum",
"gold": "Gold",
"silver": "Silver",
"bronze": "Bronze"
}
}
]'
"""
if dataTypeId is None:
raise Exception("Require a a data type ID")
if operations is None:
raise Exception("Require a list of operation to patch")
if self.loggingEnabled:
self.logger.debug(f"Starting patchDataType")
path = f"/{self.container}/datatypes/{dataTypeId}"
res = self.connector.patchData(
self.endpoint + path, data=operations)
return res
def putDataType(self,dataTypeId:str=None,dataTypeObj:dict=None)->dict:
"""
Replace an existing data type definition with the new definition provided.
Arguments:
dataTypeId : REQUIRED : The Data Type ID to be replaced
dataTypeObj : REQUIRED : The new Data Type definition.
"""
if dataTypeId is None:
raise Exception("Require a a data type ID")
if dataTypeObj is None:
raise Exception("Require a dictionary to replace the Data Type definition")
if self.loggingEnabled:
self.logger.debug(f"Starting putDataType")
path = f"/{self.container}/datatypes/{dataTypeId}"
res = self.connector.putData(
self.endpoint + path, data=dataTypeObj)
return res
def getDescriptors(
self,
type_desc: str = None,
id_desc: bool = False,
link_desc: bool = False,
save: bool = False,
**kwargs,
) -> list:
"""
Return a list of all descriptors contains in that tenant id.
By default return a v2 for pagination.
Arguments:
type_desc : OPTIONAL : if you want to filter for a specific type of descriptor. None default.
(possible value : "xdm:descriptorIdentity")
id_desc : OPTIONAL : if you want to return only the id.
link_desc : OPTIONAL : if you want to return only the paths.
save : OPTIONAL : Boolean that would save your descriptors in the schema folder. (default False)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getDescriptors")
path = f"/{self.container}/descriptors/"
params = {"start": kwargs.get("start", 0)}
if type_desc is not None:
params["property"] = f"@type=={type_desc}"
if id_desc:
update_id = "-id"
else:
update_id = ""
if link_desc:
update_link = "-link"
else:
update_link = ""
privateHeader = deepcopy(self.header)
privateHeader[
"Accept"
] = f"application/vnd.adobe.xdm-v2{update_link}{update_id}+json"
res = self.connector.getData(
self.endpoint + path, params=params, headers=privateHeader
)
data = res["results"]
page = res["_page"]
while page["next"] is not None:
data += self.getDescriptors(start=page["next"])
if save:
aepp.saveFile(
module="schema", file=data, filename="descriptors", type_file="json"
)
return data
def getDescriptor(self, descriptorId: str = None, save: bool = False) -> dict:
"""
Return a specific descriptor
Arguments:
descriptorId : REQUIRED : descriptor ID to return (@id).
save : OPTIONAL : Boolean that would save your descriptors in the schema folder. (default False)
"""
if descriptorId is None:
raise Exception("Require a descriptor id")
if self.loggingEnabled:
self.logger.debug(f"Starting getDescriptor")
path = f"/{self.container}/descriptors/{descriptorId}"
privateHeader = deepcopy(self.header)
privateHeader["Accept"] = f"application/vnd.adobe.xdm+json"
res = self.connector.getData(self.endpoint + path, headers=privateHeader)
if save:
aepp.saveFile(
module="schema",
file=res,
filename=f'{res["@id"]}_descriptors',
type_file="json",
)
return res
def createDescriptor(
self,
descriptorObj:dict = None,
desc_type: str = "xdm:descriptorIdentity",
sourceSchema: str = None,
sourceProperty: str = None,
namespace: str = None,
primary: bool = None,
**kwargs,
) -> dict:
"""
Create a descriptor attached to a specific schema.
Arguments:
descriptorObj : REQUIRED : If you wish to pass the whole object.
desc_type : REQUIRED : the type of descriptor to create.(default Identity)
sourceSchema : REQUIRED : the schema attached to your identity ()
sourceProperty : REQUIRED : the path to the field
namespace : REQUIRED : the namespace used for the identity
primary : OPTIONAL : Boolean (True or False) to define if it is a primary identity or not (default None).
possible kwargs:
version : version of the creation (default 1)
xdm:property : type of property
"""
if self.loggingEnabled:
self.logger.debug(f"Starting createDescriptor")
path = f"/tenant/descriptors"
if descriptorObj:
res = self.connector.postData(
self.endpoint + path, data=descriptorObj)
else:
if sourceSchema is None or sourceProperty is None:
raise Exception("Missing required arguments.")
obj = {
"@type": desc_type,
"xdm:sourceSchema": sourceSchema,
"xdm:sourceVersion": kwargs.get("version", 1),
"xdm:sourceProperty": sourceProperty,
}
if namespace is not None:
obj["xdm:namespace"] = namespace
if primary is not None:
obj["xdm:isPrimary"] = primary
for key in kwargs:
if 'xdm:' in key:
obj[key] = kwargs.get(key)
res = self.connector.postData(
self.endpoint + path, data=obj)
return res
def deleteDescriptor(self, descriptor_id: str = None) -> str:
"""
Delete a specific descriptor.
Arguments:
descriptor_id : REQUIRED : the descriptor id to delete
"""
if descriptor_id is None:
raise Exception("Require a descriptor id")
if self.loggingEnabled:
self.logger.debug(f"Starting deleteDescriptor")
path = f"/{self.container}/descriptors/{descriptor_id}"
privateHeader = deepcopy(self.header)
privateHeader["Accept"] = f"application/vnd.adobe.xdm+json"
res = self.connector.deleteData(self.endpoint + path, headers=privateHeader)
return res
def putDescriptor(
self,
descriptorId: str = None,
descriptorObj:dict = None,
desc_type: str = "xdm:descriptorIdentity",
sourceSchema: str = None,
sourceProperty: str = None,
namespace: str = None,
xdmProperty: str = "xdm:code",
primary: bool = False,
**kwargs
) -> dict:
"""
Replace the descriptor with the new definition. It updates the whole definition.
Arguments:
descriptorId : REQUIRED : the descriptor id to replace
descriptorObj : REQUIRED : The full descriptor object if you want to pass it directly.
desc_type : REQUIRED : the type of descriptor to create.(default Identity)
sourceSchema : REQUIRED : the schema attached to your identity ()
sourceProperty : REQUIRED : the path to the field
namespace : REQUIRED : the namespace used for the identity
xdmProperty : OPTIONAL : xdm code for the descriptor (default : xdm:code)
primary : OPTIONAL : Boolean to define if it is a primary identity or not (default False).
"""
if descriptorId is None:
raise Exception("Require a descriptor id")
if self.loggingEnabled:
self.logger.debug(f"Starting putDescriptor")
path = f"/{self.container}/descriptors/{descriptorId}"
if sourceSchema is None or sourceProperty is None or namespace is None:
raise Exception("Missing required arguments.")
if descriptorObj is not None and type(descriptorObj) == dict:
obj = descriptorObj
else:
obj = {
"@type": desc_type,
"xdm:sourceSchema": sourceSchema,
"xdm:sourceVersion": 1,
"xdm:sourceProperty": sourceProperty,
"xdm:namespace": namespace,
"xdm:property": xdmProperty,
"xdm:isPrimary": primary,
}
for key in kwargs:
if 'xdm:' in key:
obj[key] = kwargs.get(key)
res = self.connector.putData(
self.endpoint + path, data=obj)
return res
def getAuditLogs(self, resourceId: str = None) -> list:
"""
Returns the list of the changes made to a ressource (schema, class, mixin).
Arguments:
resourceId : REQUIRED : The "$id" or "meta:altId" of the resource.
"""
if not resourceId:
raise ValueError("resourceId should be included as a parameter")
if resourceId.startswith("https://"):
from urllib import parse
resourceId = parse.quote_plus(resourceId)
if self.loggingEnabled:
self.logger.debug(f"Starting createDescriptor")
path: str = f"/rpc/auditlog/{resourceId}"
res: list = self.connector.getData(self.endpoint + path)
return res
def exportResource(self,resourceId:str=None,Accept:str="application/vnd.adobe.xed+json; version=1")->dict:
"""
Return all the associated references required for importing the resource in a new sandbox or a new Org.
Argument:
resourceId : REQUIRED : The $id or meta:altId of the resource to export.
Accept : OPTIONAL : If you want to change the Accept header of the request.
"""
if resourceId is None:
raise ValueError("Require a resource ID")
if self.loggingEnabled:
self.logger.debug(f"Starting exportResource for resourceId : {resourceId}")
if resourceId.startswith("https://"):
from urllib import parse
resourceId = parse.quote_plus(resourceId)
privateHeader = deepcopy(self.header)
privateHeader['Accept'] = Accept
path = f"/rpc/export/{resourceId}"
res = self.connector.getData(self.endpoint +path,headers=privateHeader)
return res
def importResource(self,dataResource:dict = None)->dict:
"""
Import a resource based on the export method.
Arguments:
dataResource : REQUIRED : dictionary of the resource retrieved.
"""
if dataResource is None:
raise ValueError("a dictionary presenting the resource to be imported should be included as a parameter")
if self.loggingEnabled:
self.logger.debug(f"Starting importResource")
path: str = f"/rpc/export/"
res: list = self.connector.postData(self.endpoint + path, data=dataResource)
return res
def extendFieldGroup(self,fieldGroupId:str=None,values:list=None,tenant:str='tenant')->dict:
"""
Patch a Field Group to extend its compatibility with ExperienceEvents, IndividualProfile and Record.
Arguments:
fieldGroupId : REQUIRED : meta:altId or $id of the field group.
values : OPTIONAL : If you want to pass the behavior you want to extend the field group to.
Examples: ["https://ns.adobe.com/xdm/context/profile",
"https://ns.adobe.com/xdm/context/experienceevent",
]
by default profile and experienceEvent will be added to the FieldGroup.
tenant : OPTIONAL : default "tenant", possible value 'global'
"""
if fieldGroupId is None:
raise Exception("Require a field Group ID")
if self.loggingEnabled:
self.logger.debug(f"Starting extendFieldGroup")
path = f"/{tenant}/fieldgroups/{fieldGroupId}"
if values is not None:
list_fgs = values
else:
list_fgs = ["https://ns.adobe.com/xdm/context/profile",
"https://ns.adobe.com/xdm/context/experienceevent"]
operation = [
{
"op": "replace",
"path": "/meta:intendedToExtend",
"value": list_fgs
}
]
res = self.connector.patchData(self.endpoint + path,data=operation)
return res
def enableSchemaForRealTime(self,schemaId:str=None)->dict:
"""
Enable a schema for real time based on its ID.
Arguments:
schemaId : REQUIRED : The schema ID required to be updated
"""
if schemaId is None:
raise Exception("Require a schema ID")
if self.loggingEnabled:
self.logger.debug(f"Starting enableSchemaForRealTime")
path = f"/{self.container}/schemas/{schemaId}/"
operation = [
{
"op": "add",
"path": "/meta:immutableTags",
"value": ["union"]
}
]
res = self.connector.patchData(self.endpoint + path,data=operation)
return res
def FieldGroupManager(self,fieldGroup:Union[dict,str,None],title:str=None,fg_class:list=["experienceevent","profile"]) -> 'FieldGroupManager':
"""
Generate a field group Manager instance using the information provided by the schema instance.
Arguments:
fieldGroup : OPTIONAL : the field group definition as dictionary OR the ID to access it OR nothing if you want to start from scratch
title : OPTIONAL : If you wish to change the tile of the field group.
"""
tenantId = self.getTenantId()
return FieldGroupManager(tenantId=tenantId,fieldGroup=fieldGroup,title=title,fg_class=fg_class,schemaAPI=self)
def SchemaManager(self,schema:Union[dict,str],fieldGroups:list=None) -> 'FieldGroupManager':
"""
Generate a Schema Manager instance using the information provided by the schema instance.
Arguments:
schema : OPTIONAL : the schema definition as dictionary OR the ID to access it OR Nothing if you want to start from scratch
fieldGroups : OPTIONAL : If you wish to add a list of fieldgroups.
fgManager : OPTIONAL : If you wish to handle the different field group passed into a Field Group Manager instance and have additional methods available.
"""
return SchemaManager(schema=schema,fieldGroups=fieldGroups,schemaAPI=self)
def compareDFschemas(self,df1,df2,**kwargs)->dict:
"""
Compare 2 schema dataframe returned by the SchemaManager `to_dataframe` method.
Arguments:
df1 : REQUIRED : the first schema dataframe to compare
df2 : REQUIRED : the second schema dataframe to compare
possible keywords:
title1 : title of the schema used in the dataframe 1 (default df1)
title2 : title of the schema used in the dataframe 2 (default df2)
The title1 and title2 will be used instead of df1 or df2 in the results keys presented below.
Results:
Results are stored in a dictionary with these keys:
- df1 (or title1) : copy of the dataframe 1 passed
- df2 (or title2) : copy of the dataframe 2 passed
- fielgroups: dictionary containing
- aligned : boolean to define if the schema dataframes contain the same field groups
- df1_missingFieldGroups : tuple of field groups missing on df1 compare to df2
- df2_missingFieldGroups : tuple of field groups missing on df2 compare to df1
- paths: dictionary containing
- aligned : boolean to define if the schema dataframes contain the same fields.
- df1_missing : tuple of the paths missing in df1 compare to df2
- df2_missing : tuple of the paths missing in df2 compare to df1
- type_issues: list of all the paths that are not of the same type in both schemas.
"""
if type(df1) != pd.DataFrame or type(df2) != pd.DataFrame:
raise TypeError('Require dataframes to be passed')
if 'path' not in df1.columns or 'type' not in df1.columns or 'fieldGroup' not in df1.columns:
raise AttributeError('Your data frame 1 is incomplete, it does not contain one of the following columns : path, type, fieldGroup')
if 'path' not in df2.columns or 'type' not in df2.columns or 'fieldGroup' not in df2.columns:
raise AttributeError('Your data frame 2 is incomplete, it does not contain one of the following columns : path, type, fieldGroup')
name1 = kwargs.get('title1','df1')
name2 = kwargs.get('title2','df2')
dict_result = {f'{name1}':df1.copy(),f'{name2}':df2.copy()}
fieldGroups1 = tuple(sorted(df1.fieldGroup.unique()))
fieldGroups2 = tuple(sorted(df2.fieldGroup.unique()))
if fieldGroups1 == fieldGroups2:
dict_result['fieldGroups'] = {'aligned':True}
else:
dict_result['fieldGroups'] = {'aligned':False}
dict_result['fieldGroups'][f'{name1}_missingFieldGroups'] = tuple(set(fieldGroups2).difference(set(fieldGroups1)))
dict_result['fieldGroups'][f'{name2}_missingFieldGroups'] = tuple(set(fieldGroups1).difference(set(fieldGroups2)))
path_fg1 = tuple(sorted(df1.path.unique()))
path_fg2 = tuple(sorted(df2.path.unique()))
if path_fg1 == path_fg2:
dict_result['paths'] = {'aligned':True}
else:
dict_result['paths'] = {'aligned':False}
dict_result['paths'][f'{name1}_missing'] = tuple(set(path_fg2).difference(set(path_fg1)))
dict_result['paths'][f'{name2}_missing'] = tuple(set(path_fg1).difference(set(path_fg2)))
common_paths = tuple(set(path_fg2).intersection(set(path_fg1)))
dict_result['type_issues'] = []
for path in common_paths:
if df1[df1['path'] == path]['type'].values[0] != df2[df2['path'] == path]['type'].values[0]:
dict_result['type_issues'].append(path)
return dict_result
class FieldGroupManager:
"""
Class that reads and generate custom field groups
"""
def __init__(self,
fieldGroup:Union[dict,str]=None,
title:str=None,
fg_class:list=["experienceevent","profile"],
schemaAPI:'Schema'=None,
config: Union[dict,ConnectObject] = aepp.config.config_object,
)->None:
"""
Instantiator for field group creation.
Arguments:
fieldGroup : OPTIONAL : the field group definition as dictionary OR the $id/altId to access it.
If you pass the $id or altId, you should pass the schemaAPI instance or have uploaded a configuration file.
title : OPTIONAL : If you want to name the field group.
fg_class : OPTIONAL : the class that will support this field group.
by default events and profile, possible value : "record"
schemaAPI : OPTIONAL : The instance of the Schema class. Provide a way to connect to the API.
config : OPTIONAL : The config object in case you want to override the configuration.
"""
self.EDITABLE = False
self.fieldGroup = {}
if schemaAPI is not None and type(schemaAPI) == Schema:
self.schemaAPI = schemaAPI
else:
self.schemaAPI = Schema(config=config)
self.tenantId = f"_{self.schemaAPI.getTenantId()}"
if fieldGroup is not None:
if type(fieldGroup) == dict:
if fieldGroup.get("meta:resourceType",None) == "mixins":
if fieldGroup.get('definitions',None) is not None:
if 'mixins' in fieldGroup.get('$id'):
self.EDITABLE = True
self.fieldGroup = self.schemaAPI.getFieldGroup(fieldGroup['$id'],full=False)
else:
tmp_def = self.schemaAPI.getFieldGroup(fieldGroup['$id'],full=True) ## handling default mixins
tmp_def['definitions'] = tmp_def['properties']
self.fieldGroup = tmp_def
else:
self.fieldGroup = self.schemaAPI.getFieldGroup(fieldGroup['$id'],full=False)
elif type(fieldGroup) == str and (fieldGroup.startswith('https:') or fieldGroup.startswith(f'{self.tenantId}.')):
if self.schemaAPI is None:
raise Exception("You try to retrieve the fieldGroup definition from the id, but no API has been passed in the schemaAPI parameter.")
if 'mixins' in fieldGroup:
self.EDITABLE = True
self.fieldGroup = self.schemaAPI.getFieldGroup(fieldGroup,full=False)
else: ## handling default mixins
tmp_def = self.schemaAPI.getFieldGroup(fieldGroup,full=True) ## handling default mixins
tmp_def['definitions'] = tmp_def['properties']
self.fieldGroup = tmp_def
else:
raise ValueError("the element pass is not a field group definition")
else:
self.EDITABLE = True
self.fieldGroup = {
"title" : "",
"meta:resourceType":"mixins",
"description" : "",
"type": "object",
"definitions":{
"customFields":{
"type" : "object",
"properties":{
self.tenantId:{
"properties":{},
"type" : "object"
},
}
},
"property":{
"type" : "object",
"properties":{
self.tenantId:{
"properties":{},
"type" : "object"
},
}
},
},
'allOf':[{
"$ref": "#/definitions/customFields",
"type": "object"
},
{
"$ref": "#/definitions/property",
"type": "object"
}],
"meta:intendedToExtend":[],
"meta:containerId": "tenant",
"meta:tenantNamespace": self.tenantId,
}
if self.fieldGroup.get("meta:intendedToExtend") == []:
for cls in fg_class:
if 'experienceevent' in cls or "https://ns.adobe.com/xdm/context/experienceevent" ==cls:
self.fieldGroup["meta:intendedToExtend"].append("https://ns.adobe.com/xdm/context/experienceevent")
elif "profile" in cls or "https://ns.adobe.com/xdm/context/profile" == cls:
self.fieldGroup["meta:intendedToExtend"].append("https://ns.adobe.com/xdm/context/profile")
elif "record" in cls or "https://ns.adobe.com/xdm/data/record" == cls:
self.fieldGroup["meta:intendedToExtend"].append("https://ns.adobe.com/xdm/context/profile")
if len(self.fieldGroup.get('allOf',[]))>1:
### handling the custom field group based on existing ootb field groups
for element in self.fieldGroup.get('allOf'):
if element.get('$ref') != '#/definitions/customFields' and element.get('$ref') != '#/definitions/property':
additionalDefinition = self.schemaAPI.getFieldGroup(element.get('$ref'),full=True)
self.fieldGroup['definitions'] = self.__simpleDeepMerge__(self.fieldGroup['definitions'],additionalDefinition.get('properties'))
self.__setAttributes__(self.fieldGroup)
if title is not None:
self.fieldGroup['title'] = title
self.title = title
def __setAttributes__(self,fieldGroup:dict)->None:
uniqueId = fieldGroup.get('id',str(int(time.time()*100))[-7:])
self.title = self.fieldGroup.get('title',f'unknown:{uniqueId}')
if self.fieldGroup.get('$id',False):
self.id = self.fieldGroup.get('$id')
if self.fieldGroup.get('meta:altId',False):
self.altId = self.fieldGroup.get('meta:altId')
def __str__(self)->str:
return json.dumps(self.fieldGroup,indent=2)
def __repr__(self)->dict:
return json.dumps(self.fieldGroup,indent=2)
def __simpleDeepMerge__(self,base:dict,append:dict)->dict:
"""
Loop through the keys of 2 dictionary and append the new found key of append to the base.
Arguments:
base : The base you want to extend
append : the new dictionary to append
"""
if type(append) == list:
append = append[0]
for key in append:
if type(base)==dict:
if key in base.keys():
self.__simpleDeepMerge__(base[key],append[key])
else:
base[key] = append[key]
elif type(base)==list:
base = base[0]
if type(base) == dict:
if key in base.keys():
self.__simpleDeepMerge__(base[key],append[key])
else:
base[key] = append[key]
return base
def __accessorAlgo__(self,mydict:dict,path:list=None)->dict:
"""
recursive method to retrieve all the elements.
Arguments:
mydict : REQUIRED : The dictionary containing the elements to fetch (in "properties" key)
path : the path with dot notation.
"""
path = self.__cleanPath__(path)
pathSplit = path.split('.')
key = pathSplit[0]
if 'customFields' in mydict.keys():
level = self.__accessorAlgo__(mydict.get('customFields',{}).get('properties',{}),'.'.join(pathSplit))
if 'error' not in level.keys():
return level
if 'property' in mydict.keys() :
level = self.__accessorAlgo__(mydict.get('property',{}).get('properties',{}),'.'.join(pathSplit))
return level
level = mydict.get(key,None)
if level is not None:
if level["type"] == "object":
levelProperties = mydict[key].get('properties',None)
if levelProperties is not None:
level = self.__accessorAlgo__(levelProperties,'.'.join(pathSplit[1:]))
return level
elif level["type"] == "array":
levelProperties = mydict[key]['items'].get('properties',None)
if levelProperties is not None:
level = self.__accessorAlgo__(levelProperties,'.'.join(pathSplit[1:]))
return level
else:
if len(pathSplit) > 1:
return {'error':f'cannot find the key "{pathSplit[1]}"'}
return level
else:
if key == "":
return mydict
return {'error':f'cannot find the key "{key}"'}
def __searchAlgo__(self,mydict:dict,string:str=None,partialMatch:bool=False,caseSensitive:bool=False,results:list=None,path:str=None,completePath:str=None)->list:
"""
recursive method to retrieve all the elements.
Arguments:
mydict : REQUIRED : The dictionary containing the elements to fetch (start with fieldGroup definition)
string : the string to look for with dot notation.
partialMatch : if you want to use partial match
caseSensitive : to see if we should lower case everything
results : the list of results to return
path : the path currently set
completePath : the complete path from the start.
"""
finalPath = None
if results is None:
results=[]
for key in mydict:
if caseSensitive == False:
keyComp = key.lower()
string = string.lower()
else:
keyComp = key
string = string
if partialMatch:
if string in keyComp:
### checking if element is an array without deeper object level
if mydict[key].get('type') == 'array' and mydict[key]['items'].get('properties',None) is None:
finalPath = path + f".{key}[]"
if path is not None:
finalPath = path + f".{key}"
else:
finalPath = f"{key}"
else:
if path is not None:
finalPath = path + f".{key}"
else:
finalPath = f"{key}"
value = deepcopy(mydict[key])
value['path'] = finalPath
value['queryPath'] = self.__cleanPath__(finalPath)
if completePath is None:
value['completePath'] = f"/definitions/{key}"
else:
value['completePath'] = completePath + "/" + key
results.append({key:value})
else:
if caseSensitive == False:
if keyComp == string:
if path is not None:
finalPath = path + f".{key}"
else:
finalPath = key
value = deepcopy(mydict[key])
value['path'] = finalPath
value['queryPath'] = self.__cleanPath__(finalPath)
if completePath is None:
value['completePath'] = f"/definitions/{key}"
else:
value['completePath'] = completePath + "/" + key
results.append({key:value})
else:
if keyComp == string:
if path is not None:
finalPath = path + f".{key}"
else:
finalPath = key
value = deepcopy(mydict[key])
value['path'] = finalPath
value['queryPath'] = self.__cleanPath__(finalPath)
if completePath is None:
value['completePath'] = f"/definitions/{key}"
else:
value['completePath'] = completePath + "/" + key
results.append({key:value})
## loop through keys
if mydict[key].get("type") == "object" or 'properties' in mydict[key].keys():
levelProperties = mydict[key].get('properties',{})
if levelProperties != dict():
if completePath is None:
tmp_completePath = f"/definitions/{key}"
else:
tmp_completePath = f"{completePath}/{key}"
tmp_completePath += f"/properties"
if path is None:
if key != "property" and key != "customFields" :
tmp_path = key
else:
tmp_path = None
else:
tmp_path = f"{path}.{key}"
results = self.__searchAlgo__(levelProperties,string,partialMatch,caseSensitive,results,tmp_path,tmp_completePath)
elif mydict[key].get("type") == "array":
levelProperties = mydict[key]['items'].get('properties',{})
if levelProperties != dict():
if completePath is None:
tmp_completePath = f"/definitions/{key}"
else:
tmp_completePath = f"{completePath}/{key}"
tmp_completePath += f"/items/properties"
if levelProperties is not None:
if path is None:
if key != "property" and key != "customFields":
tmp_path = key
else:
tmp_path = None
else:
tmp_path = f"{path}.{key}[]{{}}"
results = self.__searchAlgo__(levelProperties,string,partialMatch,caseSensitive,results,tmp_path,tmp_completePath)
return results
def __searchAttrAlgo__(self,mydict:dict,key:str=None,value:str=None,regex:bool=False, originalField:str=None, results:list=None)->list:
"""
recursive method to retrieve all the elements.
Arguments:
mydict : REQUIRED : The dictionary containing the elements to fetch (start with fieldGroup definition)
key : key of the attribute
value : the value of that key to look for.
regex : if the regex match should be used.
originalField : the key used to dig deeper.
results : the list of results to return
"""
if results is None:
results=[]
for k in mydict:
if key == k:
if regex:
checkValue = deepcopy(mydict[k])
if type(checkValue) == list or type(checkValue) == dict:
checkValue = json.dumps(checkValue)
if re.match(value,checkValue):
if originalField is not None and originalField != 'property' and originalField != 'properties' and originalField != 'items':
results.append(originalField)
else:
if mydict[k] == value:
if originalField is not None and originalField != 'property' and originalField != 'properties' and originalField != 'items':
results.append(originalField)
## recursive action for objects and array
if type(mydict[k]) == dict:
if k == "properties" or k == 'items':
self.__searchAttrAlgo__(mydict[k],key,value,regex,originalField,results)
else:
self.__searchAttrAlgo__(mydict[k],key,value,regex,k,results)
return results
def __transformationDict__(self,mydict:dict=None,typed:bool=False,dictionary:dict=None)->dict:
"""
Transform the current XDM schema to a dictionary.
"""
if dictionary is None:
dictionary = {}
else:
dictionary = dictionary
for key in mydict:
if type(mydict[key]) == dict:
if mydict[key].get('type') == 'object' or 'properties' in mydict[key].keys():
properties = mydict[key].get('properties',None)
if properties is not None:
if key != "property" and key != "customFields":
if key not in dictionary.keys():
dictionary[key] = {}
self.__transformationDict__(mydict[key]['properties'],typed,dictionary=dictionary[key])
else:
self.__transformationDict__(mydict[key]['properties'],typed,dictionary=dictionary)
elif mydict[key].get('type') == 'array':
levelProperties = mydict[key]['items'].get('properties',None)
if levelProperties is not None:
dictionary[key] = [{}]
self.__transformationDict__(levelProperties,typed,dictionary[key][0])
else:
if typed:
dictionary[key] = [mydict[key]['items'].get('type','object')]
else:
dictionary[key] = []
else:
if typed:
dictionary[key] = mydict[key].get('type','object')
else:
dictionary[key] = ""
return dictionary
def __transformationDF__(self,mydict:dict=None,dictionary:dict=None,path:str=None,queryPath:bool=False,description:bool=False,xdmType:bool=False)->dict:
"""
Transform the current XDM schema to a dictionary.
Arguments:
mydict : the fieldgroup
dictionary : the dictionary that gather the paths
path : path that is currently being developed
queryPath: boolean to tell if we want to add the query path
description : boolean to tell if you want to retrieve the description
xdmType : boolean to know if you want to retrieve the xdm Type
"""
if dictionary is None:
dictionary = {'path':[],'type':[]}
if queryPath:
dictionary['querypath'] = []
if description:
dictionary['description'] = []
else:
dictionary = dictionary
for key in mydict:
if type(mydict[key]) == dict:
if mydict[key].get('type') == 'object' or 'properties' in mydict[key].keys():
if path is None:
if key != "property" and key != "customFields":
tmp_path = key
else:
tmp_path = None
else:
tmp_path = f"{path}.{key}"
if tmp_path is not None:
dictionary["path"].append(tmp_path)
dictionary["type"].append(f"{mydict[key].get('type')}")
if queryPath:
dictionary["querypath"].append(self.__cleanPath__(tmp_path))
if description:
dictionary["description"].append(f"{mydict[key].get('description','')}")
properties = mydict[key].get('properties',None)
if properties is not None:
self.__transformationDF__(properties,dictionary,tmp_path,queryPath,description)
elif mydict[key].get('type') == 'array':
levelProperties = mydict[key]['items'].get('properties',None)
if levelProperties is not None:
if path is None:
tmp_path = key
else :
tmp_path = f"{path}.{key}[]{{}}"
dictionary["path"].append(tmp_path)
dictionary["type"].append(f"[{mydict[key]['items'].get('type')}]")
if queryPath and tmp_path is not None:
dictionary["querypath"].append(self.__cleanPath__(tmp_path))
if description and tmp_path is not None:
dictionary["description"].append(mydict[key]['items'].get('description',''))
self.__transformationDF__(levelProperties,dictionary,tmp_path,queryPath,description)
else:
finalpath = f"{path}.{key}"
dictionary["path"].append(finalpath)
dictionary["type"].append(f"[{mydict[key]['items'].get('type')}]")
if queryPath and finalpath is not None:
dictionary["querypath"].append(self.__cleanPath__(finalpath))
if description and finalpath is not None:
dictionary["description"].append(mydict[key]['items'].get('description',''))
else:
if path is not None:
finalpath = f"{path}.{key}"
else:
finalpath = f"{key}"
dictionary["path"].append(finalpath)
dictionary["type"].append(mydict[key].get('type','object'))
if queryPath and finalpath is not None:
dictionary["querypath"].append(self.__cleanPath__(finalpath))
if description and finalpath is not None:
dictionary["description"].append(mydict[key].get('description',''))
return dictionary
def __setField__(self,completePathList:list=None,fieldGroup:dict=None,newField:str=None,obj:dict=None)->dict:
"""
Create a field with the attribute provided
Arguments:
completePathList : list of path to use for creation of the field.
fieldGroup : the self.fieldgroup attribute
newField : name of the new field to create
obj : the object associated with the new field
"""
foundFlag = False ## Flag to set if the operation has been realized or not
lastField = completePathList[-1]
fieldGroup = deepcopy(fieldGroup)
for key in fieldGroup:
level = fieldGroup.get(key,None)
if type(level) == dict and key in completePathList:
if 'properties' in level.keys():
if key != lastField:
res,foundFlag = self.__setField__(completePathList,fieldGroup[key]['properties'],newField,obj)
fieldGroup[key]['properties'] = res
else:
fieldGroup[key]['properties'][newField] = obj
foundFlag = True
return fieldGroup,foundFlag
elif 'items' in level.keys():
if 'properties' in fieldGroup[key].get('items',{}).keys():
if key != lastField:
res, foundFlag = self.__setField__(completePathList,fieldGroup[key]['items']['properties'],newField,obj)
fieldGroup[key]['items']['properties'] = res
else:
fieldGroup[key]['items']['properties'][newField] = obj
foundFlag = True
return fieldGroup,foundFlag
return fieldGroup,foundFlag
def __removeKey__(self,completePathList:list=None,fieldGroup:dict=None)->dict:
"""
Remove the key and all element based on the path provided.
Arugments:
completePathList : list of path to use for identifying the key to remove
fieldGroup : the self.fieldgroup attribute
"""
lastField = deepcopy(completePathList).pop()
success = False
for key in fieldGroup:
level = fieldGroup.get(key,None)
if type(level) == dict and key in completePathList:
if 'properties' in level.keys():
if lastField in level['properties'].keys():
level['properties'].pop(lastField)
success = True
return success
else:
sucess = self.__removeKey__(completePathList,fieldGroup[key]['properties'])
return sucess
elif 'items' in level.keys():
if 'properties' in level.get('items',{}).keys():
if lastField in level.get('items',{}).get('properties'):
level['items']['properties'].pop(lastField)
success = True
return success
else:
success = self.__removeKey__(completePathList,fieldGroup[key]['items']['properties'])
return success
return success
def __transformFieldType__(self,dataType:str=None)->dict:
"""
return the object with the type and possible meta attribute.
"""
obj = {}
if dataType == 'double':
obj['type'] = "number"
elif dataType == 'long':
obj['type'] = "integer"
obj['maximum'] = 9007199254740991
obj['minimum'] = -9007199254740991
elif dataType == "short":
obj['type'] = "integer"
obj['maximum'] = 32768
obj['minimum'] = -32768
elif dataType == "date":
obj['type'] = "string"
obj['format'] = "date"
elif dataType == "DateTime":
obj['type'] = "string"
obj['format'] = "date-time"
elif dataType == "byte":
obj['type'] = "integer"
obj['maximum'] = 128
obj['minimum'] = -128
else:
obj['type'] = dataType
return obj
def __cleanPath__(self,string:str=None)->str:
"""
An abstraction to clean the path string and remove the following characters : [,],{,}
Arguments:
string : REQUIRED : a string
"""
return string.replace('[','').replace(']','').replace("{",'').replace('}','')
def setTitle(self,name:str=None)->None:
"""
Set a name for the schema.
Arguments:
name : REQUIRED : a string to be used for the title of the FieldGroup
"""
self.fieldGroup['title'] = name
return None
def getField(self,path:str)->dict:
"""
Returns the field definition you want want to obtain.
Arguments:
path : REQUIRED : path with dot notation to which field you want to access
"""
definition = self.fieldGroup.get('definitions',self.fieldGroup.get('properties',{}))
data = self.__accessorAlgo__(definition,path)
return data
def searchField(self,string:str,partialMatch:bool=True,caseSensitive:bool=False)->list:
"""
Search for a field name based the string passed.
By default, partial match is enabled and allow case sensitivity option.
Arguments:
string : REQUIRED : the string to look for for one of the field
partialMatch : OPTIONAL : if you want to look for complete string or not. (default True)
caseSensitive : OPTIONAL : if you want to compare with case sensitivity or not. (default False)
"""
definition = self.fieldGroup.get('definitions',self.fieldGroup.get('properties',{}))
data = self.__searchAlgo__(definition,string,partialMatch,caseSensitive)
return data
def searchAttribute(self,attr:dict=None,regex:bool=False,extendedResults:bool=False,joinType:str='outer', **kwargs)->list:
"""
Search for an attribute on the field of the field groups.
Returns either the list of fields that match this search or their full definitions.
Arguments:
attr : REQUIRED : a dictionary of key value pair(s). Example : {"type" : "string"}
NOTE : If you wish to have the array type on top of the array results, use the key "arrayType". Example : {"type" : "array","arrayType":"string"}
This will automatically set the joinType to "inner". Use type for normal search.
regex : OPTIONAL : if you want your value of your key to be matched via regex.
Note that regex will turn every comparison value to string for a "match" comparison.
extendedResults : OPTIONAL : If you want to have the result to contain all details of these fields. (default False)
joinType : OPTIONAL : If you pass multiple key value pairs, how do you want to get the match.
outer : provide the fields if any of the key value pair is matched.
inner : provide the fields if all the key value pair matched.
"""
resultsDict = {f"{key}":[] for key in attr.keys()}
if 'arrayType' in attr.keys(): ## forcing inner join
joinType = 'inner'
definition = self.fieldGroup.get('definitions',self.fieldGroup.get('properties',{}))
for key in attr:
if key == "arrayType":
resultsDict[key] += self.__searchAttrAlgo__(definition,"type",attr[key],regex)
else:
resultsDict[key] += self.__searchAttrAlgo__(definition,key,attr[key],regex)
result_combi = []
if joinType == 'outer':
for key in resultsDict:
result_combi += resultsDict[key]
result_combi = set(result_combi)
elif joinType == 'inner':
result_combi = set()
for key in resultsDict:
resultsDict[key] = set(resultsDict[key])
if len(result_combi) == 0:
result_combi = resultsDict[key]
else:
result_combi = result_combi.intersection(resultsDict[key])
if extendedResults:
result_extended = []
for field in result_combi:
result_extended += self.searchField(field,partialMatch=False,caseSensitive=True)
return result_extended
return list(result_combi)
def addFieldOperation(self,path:str,dataType:str=None,title:str=None,objectComponents:dict=None,array:bool=False,enumValues:dict=None,enumType:bool=None,**kwargs)->None:
"""
Return the operation to be used on the field group with the Patch method (patchFieldGroup), based on the element passed in argument.
Arguments:
path : REQUIRED : path with dot notation where you want to create that new field.
In case of array of objects, use the "[]{}" notation
dataType : REQUIRED : the field type you want to create
A type can be any of the following: "string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object","array"
NOTE : "array" type is to be used for array of objects. If the type is string array, use the boolean "array" parameter.
title : OPTIONAL : if you want to have a custom title.
objectComponents: OPTIONAL : A dictionary with the name of the fields contain in the "object" or "array of objects" specify, with their typed.
Example : {'field1':'string','field2':'double'}
array : OPTIONAL : Boolean. If the element to create is an array. False by default.
enumValues : OPTIONAL : If your field is an enum, provid a dictionary of value and display name, such as : {'value':'display'}
enumType: OPTIONAL: If your field is an enum, indicates whether it is an enum (True) or suggested values (False)
possible kwargs:
defaultPath : Define which path to take by default for adding new field on tenant. Default "property", possible alternative : "customFields"
"""
typeTyped = ["string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object",'array']
if dataType not in typeTyped:
raise TypeError('Expecting one of the following type : "string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object"')
if dataType == 'object' and objectComponents is None:
raise AttributeError('Require a dictionary providing the object component')
if title is None:
title = self.__cleanPath__(path.split('.').pop())
if title == 'items' or title == 'properties':
raise Exception('"item" and "properties" are 2 reserved keywords')
pathSplit = path.split('.')
if pathSplit[0] == '':
del pathSplit[0]
completePath = ['definitions',kwargs.get('defaultPath','property')]
for p in pathSplit:
if '[]{}' in p:
completePath.append(self.__cleanPath__(p))
completePath.append('items')
completePath.append('properties')
else:
completePath.append(self.__cleanPath__(p))
completePath.append('properties')
finalPath = '/' + '/'.join(completePath)
operation = [{
"op" : "add",
"path" : finalPath,
"value": {}
}]
if dataType != 'object' and dataType != "array":
if array: # array argument set to true
operation[0]['value']['type'] = 'array'
operation[0]['value']['items'] = self.__transformFieldType__(dataType)
else:
operation[0]['value'] = self.__transformFieldType__(dataType)
else:
if dataType == "object":
operation[0]['value']['type'] = self.__transformFieldType__(dataType)
operation[0]['value']['properties'] = {key:self.__transformFieldType__(value) for key, value in zip(objectComponents.keys(),objectComponents.values())}
operation[0]['value']['title'] = title
if enumValues is not None and type(enumValues) == dict:
if array == False:
operation[0]['value']['meta:enum'] = enumValues
if enumType:
operation[0]['value']['enum'] = list(enumValues.keys())
else:
operation[0]['value']['items']['meta:enum'] = enumValues
if enumType:
operation[0]['value']['items']['enum'] = list(enumValues.keys())
return operation
def addField(self,path:str,dataType:str=None,title:str=None,objectComponents:dict=None,array:bool=False,enumValues:dict=None,enumType:bool=None,**kwargs)->dict:
"""
Add the field to the existing fieldgroup definition.
Returns False when the field could not be inserted.
Arguments:
path : REQUIRED : path with dot notation where you want to create that new field. New field name should be included.
dataType : REQUIRED : the field type you want to create
A type can be any of the following: "string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object","array"
NOTE : "array" type is to be used for array of objects. If the type is string array, use the boolean "array" parameter.
title : OPTIONAL : if you want to have a custom title.
objectComponents: OPTIONAL : A dictionary with the name of the fields contain in the "object" or "array of objects" specify, with their typed.
Example : {'field1:'string','field2':'double'}
array : OPTIONAL : Boolean. If the element to create is an array. False by default.
enumValues : OPTIONAL : If your field is an enum, provid a dictionary of value and display name, such as : {'value':'display'}
enumType: OPTIONAL: If your field is an enum, indicates whether it is an enum (True) or suggested values (False)
possible kwargs:
defaultPath : Define which path to take by default for adding new field on tenant. Default "property", possible alternative : "customFields"
"""
if path is None:
raise ValueError("path must provided")
typeTyped = ["string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object",'array']
if dataType not in typeTyped:
raise TypeError('Expecting one of the following type : "string","boolean","double","long","integer","short","byte","date","dateTime","boolean","object","bytes"')
if dataType == 'object' and objectComponents is None:
raise AttributeError('Require a dictionary providing the object component')
if title is None:
title = self.__cleanPath__(path.split('.').pop())
if title == 'items' or title == 'properties':
raise Exception('"item" and "properties" are 2 reserved keywords')
pathSplit = self.__cleanPath__(path).split('.')
if pathSplit[0] == '':
del pathSplit[0]
newField = pathSplit.pop()
obj = {}
if dataType == 'object':
obj = { 'type':'object', 'title':title,
'properties':{key:self.__transformFieldType__(objectComponents[key]) for key in objectComponents }
}
elif dataType == 'array':
obj = { 'type':'array', 'title':title,
"items":{
'type':'object',
'properties':{key:self.__transformFieldType__(objectComponents[key]) for key in objectComponents }
}
}
else:
obj = self.__transformFieldType__(dataType)
obj['title']= title
if array:
obj['type'] = "array"
obj['items'] = self.__transformFieldType__(dataType)
if enumValues is not None and type(enumValues) == dict:
if array == False:
obj['meta:enum'] = enumValues
if enumType:
obj['enum'] = list(enumValues.keys())
else:
obj['items']['meta:enum'] = enumValues
if enumType:
obj['items']['enum'] = list(enumValues.keys())
completePath:list[str] = [kwargs.get('defaultPath','property')] + pathSplit
customFields,foundFlag = self.__setField__(completePath, self.fieldGroup['definitions'],newField,obj)
if foundFlag == False:
return False
else:
self.fieldGroup['definitions'] = customFields
return self.fieldGroup
def removeField(self,path:str)->dict:
"""
Remove a field from the definition based on the path provided.
NOTE: A path that has received data cannot be removed from a schema or field group.
Argument:
path : REQUIRED : The path to be removed from the definition.
"""
if path is None:
raise ValueError('Require a path to remove it')
pathSplit = self.__cleanPath__(path).split('.')
if pathSplit[0] == '':
del pathSplit[0]
success = False
## Try customFields
completePath:list[str] = ['customFields'] + pathSplit
success = self.__removeKey__(completePath,self.fieldGroup['definitions'])
## Try property
if success == False:
completePath:list[str] = ['property'] + pathSplit
success = self.__removeKey__(completePath,self.fieldGroup['definitions'])
return success
def to_dict(self,typed:bool=True,save:bool=False)->dict:
"""
Generate a dictionary representing the field group constitution
Arguments:
typed : OPTIONAL : If you want the type associated with the field group to be given.
save : OPTIONAL : If you wish to save the dictionary in a JSON file
"""
definition = self.fieldGroup.get('definitions',self.fieldGroup.get('properties',{}))
data = self.__transformationDict__(definition,typed)
if save:
filename = self.fieldGroup.get('title',f'unknown_fieldGroup_{str(int(time.time()))}')
aepp.saveFile(module='schema',file=data,filename=f"{filename}.json",type_file='json')
return data
def to_dataframe(self,save:bool=False,queryPath:bool=False,description:bool=False)->pd.DataFrame:
"""
Generate a dataframe with the row representing each possible path.
Arguments:
save : OPTIONAL : If you wish to save it with the title used by the field group.
save as csv with the title used. Not title, used "unknown_fieldGroup_" + timestamp.
queryPath : OPTIONAL : If you want to have the query path to be used.
description : OPTIONAL : If you want to have the description used
"""
definition = self.fieldGroup.get('definitions',self.fieldGroup.get('properties',{}))
data = self.__transformationDF__(definition,queryPath=queryPath,description=description)
df = pd.DataFrame(data)
if save:
title = self.fieldGroup.get('title',f'unknown_fieldGroup_{str(int(time.time()))}')
df.to_csv(f"{title}.csv",index=False)
return df
def to_xdm(self)->dict:
"""
Return the fieldgroup definition as XDM
"""
return self.fieldGroup
def patchFieldGroup(self,operations:list=None)->dict:
"""
Patch the field group with the given operation.
Arguments:
operation : REQUIRED : The list of operation to realise
"""
if operations is None or type(operations) != list:
raise ValueError('Require a list of operations')
if self.schemaAPI is None:
Exception('Require a schema API connection. Pass the instance of a Schema class or import a configuration file.')
res = self.schemaAPI.patchFieldGroup(self.id,operations)
if 'status' in res.keys():
if res['status'] >= 400:
print(res['title'])
return res
else:
return res
self.fieldGroup = res
self.__setAttributes__(self.fieldGroup)
return res
def updateFieldGroup(self)->dict:
"""
Use the PUT method to push the current field group representation to AEP via API request.
"""
if self.schemaAPI is None:
Exception('Require a schema API connection. Pass the instance of a Schema class or import a configuration file.')
res = self.schemaAPI.putFieldGroup(self.id,self.to_xdm())
if 'status' in res.keys():
if res['status'] >= 400:
print(res['title'])
return res
else:
return res
self.fieldGroup = res
self.__setAttributes__(self.fieldGroup)
return res
def createFieldGroup(self)->dict:
"""
Use the POST method to create the field group in the organization.
"""
if self.schemaAPI is None:
Exception('Require a schema API connection. Pass the instance of a Schema class or import a configuration file.')
res = self.schemaAPI.createFieldGroup(self.to_xdm())
if 'status' in res.keys():
if res['status'] >= 400:
print(res['title'])
return res
else:
return res
self.fieldGroup = res
self.__setAttributes__(self.fieldGroup)
return res
class SchemaManager:
"""
A class to handle the schema management.
"""
DESCRIPTOR_TYPES =["xdm:descriptorIdentity","xdm:alternateDisplayInfo","xdm:descriptorOneToOne","xdm:descriptorReferenceIdentity","xdm:descriptorDeprecated"]
def __init__(self,schema:Union[str,dict],
fieldGroups:list=None,
schemaAPI:'Schema'=None,
schemaClass:str=None,
config: Union[dict,ConnectObject] = aepp.config.config_object,
)->None:
"""
Instantiate the Schema Manager instance.
Arguments:
schemaId : OPTIONAL : Either a schemaId ($id or altId) or the schema dictionary itself.
If schemaId is passed, you need to provide the schemaAPI connection as well.
fieldGroups : OPTIONAL : Possible to specify a list of fieldGroup.
Either a list of fieldGroupIds (schemaAPI should be provided as well) or list of dictionary definition
schemaAPI : OPTIONAL : It is required if $id or altId are used. It is the instance of the Schema class.
schemaClass : OPTIONAL : If you want to set the class to be a specific class.
Default value is profile: "https://ns.adobe.com/xdm/context/profile", can be replaced with any class definition.
Possible default value: "https://ns.adobe.com/xdm/context/experienceevent", "https://ns.adobe.com/xdm/context/segmentdefinition"
config : OPTIONAL : The config object in case you want to override the configuration.
"""
self.fieldGroupIds=[]
self.fieldGroupsManagers = []
self.title = None
if schemaAPI is not None:
self.schemaAPI = schemaAPI
else:
self.schemaAPI = Schema(config=config)
if type(schema) == dict:
self.schema = schema
self.__setAttributes__(self.schema)
allOf = self.schema.get("allOf",[])
if len(allOf) == 0:
Warning("You have passed a schema with -full attribute, you should pass one referencing the fieldGroups.\n Using the meta:extends reference if possible")
self.fieldGroupIds = [ref for ref in self.schema['meta:extends'] if ('/mixins/' in ref or '/experience/' in ref or '/context/' in ref) and ref != self.classId]
self.schema['allOf'] = [{"$ref":ref} for ref in self.schema['meta:extends'] if ('/mixins/' in ref or 'xdm/class' in ref or 'xdm/context/' in ref) and ref != self.classId]
else:
self.fieldGroupIds = [obj['$ref'] for obj in allOf if ('/mixins/' in obj['$ref'] or '/experience/' in obj['$ref'] or '/context/' in obj['$ref']) and obj['$ref'] != self.classId]
if self.schemaAPI is None:
Warning("No schema instance has been passed or config file imported.\n Aborting the creation of field Group Manager")
else:
for ref in self.fieldGroupIds:
if '/mixins/' in ref:
definition = self.schemaAPI.getFieldGroup(ref,full=False)
else:
definition = self.schemaAPI.getFieldGroup(ref,full=True)
definition['definitions'] = definition['properties']
self.fieldGroupsManagers.append(FieldGroupManager(fieldGroup=definition,schemaAPI=self.schemaAPI))
elif type(schema) == str:
if self.schemaAPI is None:
Warning("No schema instance has been passed or config file imported.\n Aborting the retrieveal of the Schema Definition")
else:
self.schema = self.schemaAPI.getSchema(schema,full=False)
self.__setAttributes__(self.schema)
allOf = self.schema.get("allOf",[])
self.fieldGroupIds = [obj.get('$ref','') for obj in allOf if ('/mixins/' in obj.get('$ref','') or '/experience/' in obj.get('$ref','') or '/context/' in obj.get('$ref','')) and obj.get('$ref','') != self.classId]
if self.schemaAPI is None:
Warning("fgManager is set to True but no schema instance has been passed.\n Aborting the creation of field Group Manager")
else:
for ref in self.fieldGroupIds:
if '/mixins/' in ref:
definition = self.schemaAPI.getFieldGroup(ref,full=False)
elif ref == '':
pass
else:
## if the fieldGroup is an OOTB one
definition = self.schemaAPI.getFieldGroup(ref,full=True)
definition['definitions'] = definition['properties']
self.fieldGroupsManagers.append(FieldGroupManager(fieldGroup=definition,schemaAPI=self.schemaAPI))
elif schema is None:
self.schema = {
"title": None,
"description": "power by aepp",
"allOf": [
{
"$ref": "https://ns.adobe.com/xdm/context/profile"
}
]
}
if schemaClass is not None:
self.schema['allOf'][0]['$ref'] = schemaClass
if fieldGroups is not None and type(fieldGroups) == list:
if fieldGroups[0] == str:
for fgId in fieldGroups:
self.fieldGroupIds.append(fgId)
if self.schemaAPI is None:
Warning("fgManager is set to True but no schema instance has been passed.\n Aborting the creation of field Group Manager")
else:
definition = self.schemaAPI.getFieldGroup(ref)
self.fieldGroupsManagers.append(FieldGroupManager(definition,schemaAPI=self.schemaAPI))
elif fieldGroups[0] == dict:
for fg in fieldGroups:
self.fieldGroupIds.append(fg.get('$id'))
self.fieldGroupsManagers.append(FieldGroupManager(fg,schemaAPI=self.schemaAPI))
self.fieldGroupTitles= tuple(fg.title for fg in self.fieldGroupsManagers)
self.fieldGroups = {fg.id:fg.title for fg in self.fieldGroupsManagers}
def __setAttributes__(self,schemaDef:dict)->None:
"""
Set some basic attributes
"""
if schemaDef.get('title'):
self.title = schemaDef.get('title')
if schemaDef.get('$id'):
self.id = schemaDef.get('$id')
if schemaDef.get('meta:altId'):
self.altId = schemaDef.get('meta:altId')
if schemaDef.get('meta:class'):
self.classId = schemaDef.get('meta:class')
def __str__(self)->str:
return json.dumps(self.schema,indent=2)
def __repr__(self)->str:
return json.dumps(self.schema,indent=2)
def __simpleDeepMerge__(self,base:dict,append:dict)->dict:
"""
Loop through the keys of 2 dictionary and append the new found key of append to the base.
Arguments:
base : The base you want to extend
append : the new dictionary to append
"""
if type(append) == list:
append = append[0]
for key in append:
if type(base)==dict:
if key in base.keys():
self.__simpleDeepMerge__(base[key],append[key])
else:
base[key] = append[key]
elif type(base)==list:
base = base[0]
if type(base) == dict:
if key in base.keys():
self.__simpleDeepMerge__(base[key],append[key])
else:
base[key] = append[key]
return base
def searchField(self,string:str=None,partialMatch:bool=True,caseSensitive:bool=True)->list:
"""
Search for a field in the different field group.
You would need to have set fgManager attribute during instantiation or use the convertFieldGroups
Arguments:
string : REQUIRED : The string you are looking for
partialMatch : OPTIONAL : If you want to use partial match (default True)
caseSensitive : OPTIONAL : If you want to remove the case sensitivity.
"""
myResults = []
for fgmanager in self.fieldGroupsManagers:
res = fgmanager.searchField(string,partialMatch,caseSensitive)
for r in res:
r['fieldGroup'] = fgmanager.title
myResults += res
return myResults
def searchAttribute(self,attr:dict=None,regex:bool=False,extendedResults:bool=False,joinType:str='outer', **kwargs)->list:
"""
Search for an attribute and its value based on the keyword
Arguments:
attr : REQUIRED : a dictionary of key value pair(s). Example : {"type" : "string"}
NOTE : If you wish to have the array type, use the key "arrayType". Example : {"type" : "array","arrayType":"string"}
regex : OPTIONAL : if you want your value of your key to be matched via regex.
Note that regex will turn every comparison value to string for a "match" comparison.
extendedResults : OPTIONAL : If you want to have the result to contain all details of these fields. (default False)
joinType : OPTIONAL : If you pass multiple key value pairs, how do you want to get the match.
outer : provide the fields if any of the key value pair is matched. (default)
inner : provide the fields if all the key value pair matched.
"""
myResults = []
for fgmanager in self.fieldGroupsManagers:
res = fgmanager.searchAttribute(attr=attr,regex=regex,extendedResults=extendedResults,joinType=joinType)
if extendedResults:
for r in res:
r['fieldGroup'] = fgmanager.title
myResults += res
return myResults
def addFieldGroup(self,fieldGroup:Union[str,dict]=None)->Union[None,'FieldGroupManager']:
"""
Add a field groups to field Group object and the schema.
return the specific FieldGroup Manager instance.
Arguments:
fieldGroup : REQUIRED : The fieldGroup ID or the dictionary definition connecting to the API.
if a fieldGroup ID is provided, you should have added a schemaAPI previously.
"""
if type(fieldGroup) == dict:
if fieldGroup.get('$id') not in [fg for fg in self.fieldGroupIds]:
self.fieldGroupIds.append(fieldGroup['$id'])
self.schema['allOf'].append({'$ref':fieldGroup['$id'],"type": "object"})
elif type(fieldGroup) == str:
if fieldGroup not in [fg for fg in self.fieldGroupIds]:
self.fieldGroupIds.append(fieldGroup)
self.schema['allOf'].append({'$ref':fieldGroup,"type": "object"})
if self.schemaAPI is None:
raise AttributeError('Missing the schema API attribute. Please use the addSchemaAPI method to add it.')
else:
fieldGroup = self.schemaAPI.getFieldGroup(fieldGroup)
fbManager = FieldGroupManager(fieldGroup=fieldGroup)
self.fieldGroupsManagers.append(fbManager)
self.fieldGroupTitles = tuple(fgm.title for fgm in self.fieldGroupsManagers)
self.fieldGroups = {fgm.id:fgm.title for fgm in self.fieldGroupsManagers}
return fbManager
def getFieldGroupManager(self,fieldgroup:str=None)->'FieldGroupManager':
"""
Return a field group Manager of a specific name.
Only possible if fgManager was set to True during instanciation.
Argument:
fieldgroup : REQUIRED : The title or the $id of the field group to retrieve.
"""
if self.getFieldGroupManager is not None:
if "ns.adobe.com" in fieldgroup: ## id
return [fg for fg in self.fieldGroupsManagers if fg.id == fieldgroup][0]
else:
return [fg for fg in self.fieldGroupsManagers if fg.title == fieldgroup][0]
else:
raise Exception("The field group manager was not set to True during instanciation. No Field Group Manager to return")
def setTitle(self,name:str=None)->None:
"""
Set a name for the schema.
Arguments:
name : REQUIRED : a string to be used for the title of the FieldGroup
"""
self.schema['title'] = name
self.title = name
return None
def to_dataframe(self,save:bool=False,queryPath: bool = False,description:bool = False)->pd.DataFrame:
"""
Extract the information from the Field Group to DataFrame. You need to have instanciated the Field Group manager.
Arguments:
save : OPTIONAL : If you wish to save it with the title used by the field group.
save as csv with the title used. Not title, used "unknown_schema_" + timestamp.
queryPath : OPTIONAL : If you want to have the query path to be used.
"""
df = pd.DataFrame({'path':[],'type':[],'fieldGroup':[]})
for fgmanager in self.fieldGroupsManagers:
tmp_df = fgmanager.to_dataframe(queryPath=queryPath,description=description)
tmp_df['fieldGroup'] = fgmanager.title
df = df.append(tmp_df,ignore_index=True)
if save:
title = self.schema.get('title',f'unknown_schema_{str(int(time.time()))}.csv')
df.to_csv(f"{title}.csv",index=False)
df = df[~df.duplicated('path')].reset_index(drop=True)
return df
def to_dict(self)->dict:
"""
Return a dictionary of the whole schema. You need to have instanciated the Field Group Manager
"""
list_dict = [fbm.to_dict() for fbm in self.fieldGroupsManagers]
result = {}
for mydict in list_dict:
result = self.__simpleDeepMerge__(result,mydict)
return result
def createSchema(self)->dict:
"""
Send a createSchema request to AEP to create the schema.
It removes the "$id" if one was provided to avoid overriding existing ID.
"""
if self.schemaAPI is None:
raise Exception("Require a Schema instance to connect to the API")
res = self.schemaAPI.createSchema(self.schema)
self.schema = res
self.__setAttributes__(self.schema)
return res
def updateSchema(self)->dict:
"""
Use the PUT method to replace the existing schema with the new definition.
"""
if self.schemaAPI is None:
raise Exception("Require a Schema instance to connect to the API")
res = self.schemaAPI.putSchema(self.id,self.schema)
if 'status' in res.keys():
if res['status'] == 400:
print(res['title'])
return res
else:
return res
self.schema = res
self.__setAttributes__(self.schema)
return res
def createDescriptorOperation(self,descType:str=None,
completePath:str=None,
identityNSCode:str=None,
identityPrimary:bool=False,
alternateTitle:str=None,
alternateDescription:str=None,
lookupSchema:str=None,
targetCompletePath:str=None,
)->dict:
"""
Create a descriptor object to be used in the createDescriptor.
You can see the type of descriptor available in the DESCRIPTOR_TYPES attribute and also on the official documentation:
https://experienceleague.adobe.com/docs/experience-platform/xdm/api/descriptors.html?lang=en#appendix
Arguments:
descType : REQUIRED : The type to be used.
it can only be one of the following value: "xdm:descriptorIdentity","xdm:alternateDisplayInfo","xdm:descriptorOneToOne","xdm:descriptorReferenceIdentity","xdm:descriptorDeprecated"
completePath : REQUIRED : the complete path of the field you want to attach a descriptor.
Example: '/definitions/customFields/properties/_tenant/properties/tenantObject/properties/field'
identityNSCode : OPTIONAL : if the descriptor is identity related, the namespace CODE used.
identityPrimary : OPTIONAL : If the primary descriptor added is the primary identity.
alternateTitle : OPTIONAL : if the descriptor is alternateDisplay, the alternate title to be used.
alternateDescription : OPTIONAL if you wish to add a new description.
lookupSchema : OPTIONAL : The schema ID for the lookup if the descriptor is for lookup setup
targetCompletePath : OPTIONAL : if you have the complete path for the field in the target lookup schema.
"""
if descType not in self.DESCRIPTOR_TYPES:
raise Exception(f"The value provided ({descType}) is not supported by this method")
if completePath is None:
raise ValueError("Require a field complete path")
if descType == "xdm:descriptorIdentity":
obj = {
"@type": descType,
"xdm:sourceSchema": self.id,
"xdm:sourceVersion": 1,
"xdm:sourceProperty": completePath,
"xdm:namespace": identityNSCode,
"xdm:property": "xdm:code",
"xdm:isPrimary": identityPrimary
}
elif descType == "xdm:alternateDisplayInfo":
if alternateTitle is None:
raise ValueError("Require an alternate title")
obj = {
"@type": descType,
"xdm:sourceSchema": self.id,
"xdm:sourceVersion": 1,
"xdm:sourceProperty": completePath,
"xdm:title": {
"en_us": alternateTitle
}
}
if alternateDescription is not None:
obj["xdm:description"] = {
"en_us":alternateDescription
}
elif descType == "xdm:descriptorOneToOne":
obj = {
"@type": descType,
"xdm:sourceSchema":self.id,
"xdm:sourceVersion": 1,
"xdm:sourceProperty":completePath,
"xdm:destinationSchema":lookupSchema,
"xdm:destinationVersion": 1,
}
if targetCompletePath is not None:
obj["xdm:destinationProperty"] = targetCompletePath
elif descType == "xdm:descriptorReferenceIdentity":
obj = {
"@type": descType,
"xdm:sourceSchema": self.id,
"xdm:sourceVersion": 1,
"xdm:sourceProperty": completePath,
"xdm:identityNamespace": identityNSCode
}
elif descType == "xdm:descriptorDeprecated":
obj = {
"@type": descType,
"xdm:sourceSchema": self.id,
"xdm:sourceVersion": 1,
"xdm:sourceProperty": completePath
}
return obj
def createDescriptor(self,descriptor:dict=None)->dict:
"""
Create a descriptor attached to that class bsaed on the creatorDescriptor operation provided.
Arguments:
descriptor : REQUIRED : The operation to add a descriptor to the schema.
"""
if descriptor is None:
raise ValueError('Require an operation to be used')
res = self.schemaAPI.createDescriptor(descriptor)
return res
def compareObservableSchema(self,observableSchemaManager:'ObservableSchemaManager'=None)->pd.DataFrame:
"""
A method to compare the existing schema with the observable schema and find out the difference in them.
It output a dataframe with all of the path, the field group, the type (if provided) and the part availability (in that dataset)
Arguments:
observableSchemaManager : REQUIRED : the ObservableSchemaManager class instance.
"""
df_schema = self.to_dataframe()
df_obs = observableSchemaManager.to_dataframe()
df_merge = df_schema.merge(df_obs,left_on='path',right_on='path',how='left',indicator=True)
df_merge = df_merge.rename(columns={"_merge": "availability",'type_x':'type'})
df_merge = df_merge.drop("type_y",axis=1)
df_merge['availability'] = df_merge['availability'].str.replace('left_only','schema_only')
df_merge['availability'] = df_merge['availability'].str.replace('both','schema_dataset')
return df_merge | pitchmuc/aepp | aepp/schema.py | schema.py | py | 150,047 | python | en | code | 24 | github-code | 13 |
6948073764 | from typing import *
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
if len(cost) <= 2:
return min(cost)
pre1, pre2 = cost[0], cost[1]
tem = 0
for i in range(2, len(cost)):
tem = min(pre1 + cost[i], pre2 + cost[i])
pre1 = pre2
pre2 = tem
return min(tem, pre1)
if __name__ == '__main__':
sol = Solution()
cost = [10, 15, 20, 10, 10, 20, 10]
print(sol.minCostClimbingStairs(cost))
| Xiaoctw/LeetCode1_python | 动态规划/使用最小花费爬楼梯_746.py | 使用最小花费爬楼梯_746.py | py | 512 | python | en | code | 0 | github-code | 13 |
30989289608 | from flask import Flask, request, jsonify
import requests
app = Flask(__name__)
# URLs de las dos instancias de la aplicación Flask
app1_url = "http://127.0.0.1:5000" # Reemplaza con la URL de tu primera instancia
app2_url = "http://127.0.0.1:5001" # Reemplaza con la URL de tu segunda instancia
@app.route('/call_apps', methods=['GET'])
def call_apps():
try:
# Datos a enviar en el cuerpo de la solicitud POST
data = {
"ItemId": "1",
"Quantity": "3"
}
# Realiza solicitudes POST a ambas instancias en paralelo
response1 = requests.post(f"{app1_url}/buy", params=data)
response2 = requests.post(f"{app2_url}/buy", params=data)
# Puedes personalizar cómo deseas manejar las respuestas aquí
return jsonify({
"app1_response": response1.text,
"app2_response": response2.text
})
except Exception as e:
return jsonify({"error": str(e)})
if __name__ == '__main__':
app.run(debug=True, port=5002) # Puedes elegir otro puerto si es necesario
| Zanderz17/Soft_Sem11_Jueves | script.py | script.py | py | 1,087 | python | es | code | 0 | github-code | 13 |
73506861776 | #-*- coding:utf-8 -*-
'''
Created on 2013-9-21
@author: lenovo
'''
import time
from bson.objectid import ObjectId
from model import Model
from const_var import ROOM_STATE_FREE, ROOM_STATE_CHATTING, TABLE_ROOM, TABLE_USER
class Room(Model):
table = TABLE_ROOM
#
def create_room(self, user_id, tags, des):
cur_time = time.time()
room = {
"_id": self.get_id(),
"des": "",
"tags": tags,
"state": ROOM_STATE_FREE,
"create_time": cur_time,
"create_user": self.dbref(TABLE_USER, user_id)
#"guest_user"
}
return self.insert(room)
#得到room
def get_room_by_id(self, room_id):
parameters = {"_id": room_id}
return self.get(parameters)
#
def get_rooms_by_tags(self, tags, offset=0, limit=10):
rooms = []
parameters = {
"tags":{"$all": tags},
"state": ROOM_STATE_FREE
}
result = self.query(parameters, offset, limit)
if result and len(result):
rooms = [r for r in result]
return rooms
#加入聊天
def join_room(self, room_id, guest_id):
parameters = {"_id": room_id}
update = {
'$set': {'state': ROOM_STATE_CHATTING},
'$set': {'guest_user': self.dbref(TABLE_USER, guest_id)}
}
self.update(parameters, update)
#判断room 状态是否可以加入
def is_room_joinable(self, room_id):
parameters = {"_id": room_id}
result = self.get(parameters)
if result and len(result):
if ROOM_STATE_FREE == result['state']:
return True
return False
#guest离开room
def leave_room(self, room_id, guest_id):
parameters = {"_id": room_id}
update = {
'$set': {'state': ROOM_STATE_FREE},
'$unset': {'guest_user': 1}
}
self.update(parameters, update);
#
def dissolution_room(self, room_id):
parameters = {"_id": room_id}
self.remove(parameters) | conwaywang/daohe | model/room.py | room.py | py | 2,303 | python | en | code | 0 | github-code | 13 |
38761625072 | from flask import Blueprint, render_template, redirect, url_for, request, flash, jsonify, session, abort
from flask_login import login_user, logout_user, login_required, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from ..models.User import User
from ..models.Course import Course
from ..models.Reading import Reading
from ..models.StudentList import StudentList
from project import db
from flask_cors import CORS, cross_origin
import sqlite3, random, string
admin = Blueprint('admin', __name__)
CORS(admin, supports_credentials=True)
@admin.route('/admin', methods=['GET'])
@login_required
def admin_crud():
c = sqlite3.connect('project/db.sqlite')
cur = c.cursor()
cur.execute("SELECT * from User")
table_user = cur.fetchall()
cur.execute("SELECT * from Course")
table_course = cur.fetchall()
cur.execute("SELECT * from Reading")
table_reading = cur.fetchall()
cur.execute(f"SELECT * from Student_List")
table_student = cur.fetchall()
return jsonify(table_user, table_course, table_reading, table_student)
#return render_template('admin/admin.html', table_user = table_user, table_course=table_course, table_reading=table_reading, table_student=table_student, user_name=current_user.name)
###################################################################################################
# Admin-User #
###################################################################################################
@admin.route('/admin-update-user')
@login_required
def admin_update_user():
return render_template('admin/admin-update-user.html', user_name=current_user.name)
@admin.route('/admin-update-user-getid', methods=['POST'])
@login_required
def admin_update_user_post_getid():
userid = request.form.get('userid')
return render_template('admin/admin-update-user.html', userid=userid, user_name=current_user.name)
@admin.route('/admin-update-user', methods=['POST'])
@login_required
def admin_update_user_post():
userid = request.form.get('userid')
name = request.form['name']
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(id=userid).first()
#########################################################################################
if user.user_type == "Student":
studentlist = StudentList.query.filter_by(id_student=userid)
for student in studentlist:
db.session.delete(student)
db.session.commit()
student = StudentList(id=student.id, id_student=student.id_student, name_student=name, id_course=student.id_course)
db.session.add(student)
db.session.commit()
if user.user_type == "Teacher":
course = Course.query.filter_by(id_teacher=userid)
for teacher in course:
db.session.delete(teacher)
db.session.commit()
teacher = Course(id=teacher.id, name=teacher.name, entry_key=teacher.entry_key, id_teacher=teacher.id_teacher, name_teacher=name)
db.session.add(teacher)
db.session.commit()
if request.method == 'POST':
if user:
db.session.delete(user)
db.session.commit()
user = User(id=userid, user_type=user.user_type, name=name, email=email, password = generate_password_hash(password, method='sha256'))
db.session.add(user)
db.session.commit()
return redirect('/admin')
return "ID does not exist"
return render_template('admin/admin-update-user.html', user_name=current_user.name)
@admin.route('/admin-delete-user', methods=['POST'])
@login_required
def admin_delete_user_post():
userid = request.form.get('userid')
user = User.query.filter_by(id=userid).first()
if user.user_type == "Student":
studentlist = StudentList.query.filter_by(id_student=userid)
for student in studentlist:
db.session.delete(student)
db.session.commit()
if user.user_type == "Teacher":
course = Course.query.filter_by(id_teacher=userid)
for teacher in course:
studentlist = StudentList.query.filter_by(id_course=teacher.id)
for list in studentlist:
db.session.delete(list)
db.session.commit()
db.session.delete(teacher)
db.session.commit()
reading = Reading.query.filter_by(id_teacher=userid)
for teacher in reading:
db.session.delete(teacher)
db.session.commit()
if request.method == 'POST':
if user:
db.session.delete(user)
db.session.commit()
return redirect('/admin')
abort(404)
return render_template('admin/admin.html')
###################################################################################################
# Admin-Course #
###################################################################################################
@admin.route('/admin-update-course')
@login_required
def admin_update_course():
return render_template('admin/admin-update-course.html', user_name=current_user.name)
@admin.route('/admin-update-course-getid', methods=['POST'])
@login_required
def admin_update_course_post_getid():
courseid = request.form.get('courseid')
return render_template('admin/admin-update-course.html', courseid=courseid, user_name=current_user.name)
@admin.route('/admin-update-course', methods=['POST'])
@login_required
def admin_update_course_post():
courseid = request.form.get('courseid')
name = request.form.get('name')
characters = string.ascii_letters + string.digits + string.punctuation
randomkey = ''.join(random.choice(characters) for i in range(16))
teacherid = request.form.get('teacherid')
course = Course.query.filter_by(id=courseid).first()
user = User.query.filter_by(id=teacherid).first()
if request.method == 'POST':
if course:
if Course.query.filter_by(entry_key=randomkey).first():
return "Entry Key can't be the same as another course"
db.session.delete(course)
db.session.commit()
course = Course(id=courseid, name=name, entry_key=randomkey, id_teacher=teacherid, name_teacher = user.name)
db.session.add(course)
db.session.commit()
return redirect('/admin')
return "ID does not exist"
return redirect(url_for('admin/admin-update-course.html'))
@admin.route('/admin-delete-course', methods=['POST'])
@login_required
def admin_delete_course_post():
courseid = request.form.get('courseid')
course = Course.query.filter_by(id=courseid).first()
studentlist = StudentList.query.filter_by(id_course=course.id)
for list in studentlist:
db.session.delete(list)
db.session.commit()
if request.method == 'POST':
if course:
db.session.delete(course)
db.session.commit()
return redirect('/admin')
abort(404)
return render_template('admin/admin.html')
###################################################################################################
# Admin-Reading #
###################################################################################################
@admin.route('/admin-update-reading')
@login_required
def admin_update_reading():
return render_template('admin/admin-update-reading.html', user_name=current_user.name)
@admin.route('/admin-update-reading-getid', methods=['POST'])
@login_required
def admin_update_reading_post_getid():
readingid = request.form.get('readingid')
return render_template('admin/admin-update-reading.html', readingid=readingid, user_name=current_user.name)
@admin.route('/admin-update-reading', methods=['POST'])
@login_required
def admin_update_reading_post():
name = request.form.get('name')
text = request.form.get('text')
teacherid = request.form.get('teacherid')
readingid = request.form.get('readingid')
reading = Reading.query.filter_by(id=readingid).first()
if request.method == 'POST':
if reading:
db.session.delete(reading)
db.session.commit()
reading = Reading(id=readingid, name=name, text=text, id_teacher=teacherid)
db.session.add(reading)
db.session.commit()
return redirect('/admin')
return "ID does not exist"
return redirect(url_for('admin/admin-update-reading.html'))
@admin.route('/admin-delete-reading', methods=['POST'])
@login_required
def admin_delete_reading_post():
readingid = request.form.get('readingid')
reading = Reading.query.filter_by(id=readingid).first()
if request.method == 'POST':
if reading:
db.session.delete(reading)
db.session.commit()
return redirect('/admin')
abort(404)
return render_template('admin/admin.html') | LEGS2001/Proyecto-IHM | backend/project/controllers/admin.py | admin.py | py | 9,265 | python | en | code | 1 | github-code | 13 |
70234784337 | import numpy as np
from matplotlib import pyplot as plt
#Exercicio1
t=np.arange(-1,3,0.001)
x=2*np.cos(2*np.pi*10*t+(np.pi/4))+np.sin(2*np.pi*11*t-(np.pi/3))
plt.xlabel("t")
plt.ylabel("x(t)")
plt.title("ex1.I-> "+r"$x(t)=2cos(2\pi10t+\frac{\pi}{4})+sin(2\pi11t-\frac{\pi}{3})$")
plt.plot(t,x)
plt.show()
| miguelTavora/Digital-Signal | Trabalho 1/exercicio 1/ex1.I.py | ex1.I.py | py | 312 | python | en | code | 0 | github-code | 13 |
33251716990 | #!/usr/bin/env python
import urllib.request
import urllib.parse
import re
user_agent = "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25"
check_url = 'http://wo.yao.cl/register.php'
alphabeta = [chr(x+ord('a')) for x in range(26)]
digi = [chr(x+ord('0')) for x in range(10)]
begin_code = ['92b7040**dbbc19c']
def BuildReq(url,data):
req = urllib.request.Request(url,data)
req.add_header('User-Agent',user_agent)
return req
def BuildCode(begin_code,end):
codes = []
for code in begin_code:
for letter in alphabeta:
codes.append(code[0:end]+letter+code[end+1:])
for data in digi:
codes.append(code[0:end]+data+code[end+1:])
return codes
def CheckCode(code):
data = {
'reginvcode' : code,
'action' : 'reginvcodeck'
}
data = urllib.parse.urlencode(data)
data = data.encode('utf-8')
req = BuildReq(check_url,data)
resp = urllib.request.urlopen(req).read()
if "retmsg_invcode('1')" in resp.decode('utf-8'):
return 0
else:
print('OK!')
return 1
if __name__ == '__main__':
fp = open('code.txt','a')
end = []
repl = re.finditer(r'\*',begin_code[0])
for tp in repl:
end.append(tp.start())
print(end)
num = len(end)
codes = begin_code
for i in range(num):
codes = BuildCode(codes,end[i])
i = 0
for code in codes:
result = CheckCode(code)
if result == 1:
fp.write(code)
print(code,'ok')
if i == 0:
print('code:',code)
i += 1
print(i,'Codes checked!')
| donyfeng/cltest | cltest.py | cltest.py | py | 1,748 | python | en | code | 0 | github-code | 13 |
35950959746 | class sym_t(object):
'''
symbol used in asm source, can use '.set <label>, <value>'
if parse in label as tuple or list, then this label serve as indirect-indexed symbol
in this case, value can be ommited
'''
def __init__(self, label, value = 0, comments = ''):
if type(label) in (tuple, list):
for a in label:
assert type(a) is str
else:
assert type(label) is str
assert type(value) is int
self.label = label
self.value = value
self.comments = comments
def declare(self):
if type(self.label) in (tuple, list):
assert False, "not support label is tuple and call declare"
comments_str = '' if self.comments == '' else f' ; {self.comments}'
return f'.set {self.label}, {self.value}{comments_str}'
@staticmethod
def expr(label, index = 0):
if type(index) is int:
if type(label) in (tuple, list):
assert index < len(label)
return f'{label[index]}'
else:
if index == 0:
return label
return f'{label}+{index}'
elif type(index) is tuple:
if type(label) in (tuple, list):
assert False, "not suppport both label, index are tuple"
else:
assert len(index) == 2, 'expect tuple (start-index, end-index), inclusive'
return f'{label}+{index[0]}:{label}+{index[1]}'
else:
assert False
def __call__(self, index = 0):
return self.expr(self.label, index)
def __eq__(self, other):
if type(other) is not sym_t:
return False
if type(self.label) in (tuple, list):
if type(other.label) not in (tuple, list):
return False
if len(self.label) != len(other.label):
return False
for a, b in zip(self.label, other.label):
if a != b:
return False
return True
return self.label == other.label and self.value == other.value
def __ne__(self, other):
return not self == other
class msym_t(object):
""" reference a symbol inside macro """
def __init__(self, sym):
assert type(sym) is sym_t
self.sym = sym
self.label_in_macro = f'\\{sym.label}'
def __call__(self, index = 0):
return self.sym.expr(self.label_in_macro, index)
| ROCmSoftwarePlatform/MISA | python/codegen/symbol.py | symbol.py | py | 2,498 | python | en | code | 29 | github-code | 13 |
35107130958 | # from django.views.decorators.cache import cache_page
from django.urls import path
# from myapp import views
from . import views
urlpatterns = [
# path('', views.home, name="home"),
path('', views.Home.as_view(), name="home"),
path('about/', views.about, name="about"),
# path('about/', cache_page(60)(views.about), name="about"),
path('contact/', views.contact, name="contact"),
path('courses/', views.courses, name="courses"),
path('courses/<int:courseID>', views.courseDetailsByID, name="courseDetailsByID"),
path('courses/<str:courseName>', views.courseDetailsByName, name="courseDetailsByName"),
path('register/', views.register, name="register"),
path('login/', views.login, name="login"),
path('logout/', views.logout, name="logout"),
path('search/', views.search, name="search"),
path('add_course/', views.add_course, name="add_course"),
path('course_api/', views.course_api, name="course_api"),
path('dashboard/', views.dashboard, name="dashboard"),
] | ronysingh1209/mysite | mysite/myapp/urls.py | urls.py | py | 1,022 | python | en | code | 0 | github-code | 13 |
73492686736 | import os
import pytest
import torch
import torch.distributed as dist
from torch.utils.data import Dataset
from dynapipe.model import TransformerModelSpec, get_uniform_cluster
from dynapipe.pipe.data_loader import DynaPipeDataLoader, TrainingSpec
from dynapipe.pipe.instructions import ExecutionPlan, ForwardPass
torch.manual_seed(42)
@pytest.fixture(scope="module", autouse=True)
def init_torch_distributed():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
class DummyDataset(Dataset):
def __init__(self, size, inputs_only=False):
self.size = size
torch.manual_seed(42)
# pre-generate all data
self.enc_seqlen = []
self.dec_seqlen = []
self.data = []
for _ in range(size):
enc_seqlen, dec_seqlen = torch.randint(24, 512, (2,))
self.enc_seqlen.append(enc_seqlen)
if not inputs_only:
self.dec_seqlen.append(dec_seqlen)
result = {
"text_enc": list(
torch.randint(0, 100, (enc_seqlen,)).numpy()
),
"text_dec": list(
torch.randint(0, 100, (dec_seqlen,)).numpy()
),
}
else:
result = {
"text": list(torch.randint(0, 100, (enc_seqlen,)).numpy()),
}
self.data.append(result)
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index]
def dummy_pack_fn(tensors):
# (input, extra)
if len(tensors) == 0:
return [], 0
if isinstance(tensors[0], list):
concated_list = []
for t in tensors:
concated_list.extend(t)
return concated_list, 0
return torch.cat(tensors, dim=0), 0
def dummy_constructor_fn(
encoder_input,
encoder_extra,
decoder_input,
decoder_extra,
encoder_seqlen,
decoder_seqlen,
):
encoder_padding_len = encoder_seqlen - len(encoder_input)
if decoder_input is not None:
decoder_padding_len = decoder_seqlen - len(decoder_input)
encoder_input = torch.tensor(encoder_input, dtype=torch.long)
if decoder_input is not None:
decoder_input = torch.tensor(decoder_input, dtype=torch.long)
encoder_padded = torch.cat(
[
encoder_input,
torch.zeros(
encoder_padding_len,
dtype=encoder_input.dtype,
device=encoder_input.device,
),
],
dim=0,
)
if decoder_input is not None:
decoder_padded = torch.cat(
[
decoder_input,
torch.zeros(
decoder_padding_len,
dtype=decoder_input.dtype,
device=decoder_input.device,
),
],
dim=0,
)
return {
"text_enc": encoder_padded,
"text_dec": decoder_padded,
}
else:
return {
"text": encoder_padded,
}
def get_mb_shape_from_ep(ep: ExecutionPlan):
fw_shapes = []
for instr in ep.instructions:
if isinstance(instr, ForwardPass):
fw_shapes.append(instr.buffer_shapes)
return fw_shapes
def test_joint_data_loader(inputs_only=False):
cluster_spec = get_uniform_cluster(2)
if inputs_only:
train_spec = TrainingSpec(
"test_cm.pkl",
cluster_spec,
TransformerModelSpec(8, 0, 1024, 128, 65536, 128),
1,
2,
0,
[0, 0, 0, 0, 1, 1, 1, 1],
800000, # ignore memory limit for this test
prefetch_buffer_size=2,
)
else:
train_spec = TrainingSpec(
"test_cm.pkl",
cluster_spec,
TransformerModelSpec(4, 4, 1024, 128, 65536, 128),
1,
2,
0,
[0, 0, 0, 0, 1, 1, 1, 1],
800000, # ignore memory limit for this test
prefetch_buffer_size=2,
model_type="t5",
)
rank = dist.get_rank()
is_kv_host = rank == 0
data_loader = DynaPipeDataLoader(
train_spec,
DummyDataset(256 * 10, inputs_only=inputs_only),
pack_fn=dummy_pack_fn,
constructor_fn=dummy_constructor_fn,
is_kv_host=is_kv_host,
node_rank=0,
node_local_rank=rank,
dp_rank=0,
pp_rank=rank,
batch_size=256,
shuffle=False,
num_workers=2,
num_preprocess_workers=2,
pin_memory=True,
encoder_key="text_enc" if not inputs_only else "text",
decoder_key="text_dec" if not inputs_only else None,
)
batch_idx = 0
for batch, ep in data_loader:
if rank == 0:
assert batch is not None
ep_shapes = get_mb_shape_from_ep(ep)
assert len(ep_shapes) == len(batch)
for microbatch, ep_shape in zip(batch, ep_shapes):
if not inputs_only:
enc_seqlen, dec_seqlen = (
microbatch["text_enc"].shape[1],
microbatch["text_dec"].shape[1],
)
enc_mbs, dec_mbs = (
microbatch["text_enc"].shape[0],
microbatch["text_dec"].shape[0],
)
else:
enc_seqlen = microbatch["text"].shape[1]
enc_mbs = microbatch["text"].shape[0]
dec_mbs = enc_mbs
dec_seqlen = 0
assert enc_mbs == dec_mbs
# encoder only have ep_shape size 1
assert len(ep_shape) == 1
# test shape rounding
assert enc_seqlen % 8 == 0
assert dec_seqlen % 8 == 0
mbs_from_ep = ep_shape[0][0]
enc_seqlen_from_ep = ep_shape[0][1]
assert mbs_from_ep == enc_mbs
assert enc_seqlen_from_ep == enc_seqlen
# get enc and decoder len from rank 1
mbs_rank1_ep_tensor = torch.empty(1, dtype=torch.int64)
encoder_ep_seqlen_tensor = torch.empty(1, dtype=torch.int64)
decoder_ep_seqlen_tensor = torch.empty(1, dtype=torch.int64)
dist.recv(tensor=mbs_rank1_ep_tensor, src=1)
dist.recv(tensor=encoder_ep_seqlen_tensor, src=1)
dist.recv(tensor=decoder_ep_seqlen_tensor, src=1)
mbs_rank1_ep = mbs_rank1_ep_tensor.item()
encoder_ep_seqlen = encoder_ep_seqlen_tensor.item()
decoder_ep_seqlen = decoder_ep_seqlen_tensor.item()
assert mbs_rank1_ep == enc_mbs
assert dec_seqlen == decoder_ep_seqlen
assert enc_seqlen == encoder_ep_seqlen
print(f"batch {batch_idx} passed")
batch_idx += 1
else:
assert batch is not None
assert ep is not None
ep_shapes = get_mb_shape_from_ep(ep)
for ep_shape in ep_shapes:
if not inputs_only:
assert len(ep_shape) == 2
assert ep_shape[0][0] == ep_shape[1][0]
mbs_from_ep = ep_shape[0][0]
enc_seqlen_from_ep = ep_shape[0][1]
dec_seqlen_from_ep = ep_shape[1][1]
else:
assert len(ep_shape) == 1
mbs_from_ep = ep_shape[0][0]
enc_seqlen_from_ep = ep_shape[0][1]
dec_seqlen_from_ep = 0
mbs_tensor = torch.tensor(mbs_from_ep, dtype=torch.int64)
enc_seqlen_tensor = torch.tensor(
enc_seqlen_from_ep, dtype=torch.int64
)
dec_seqlen_tensor = torch.tensor(
dec_seqlen_from_ep, dtype=torch.int64
)
dist.send(tensor=mbs_tensor, dst=0)
dist.send(tensor=enc_seqlen_tensor, dst=0)
dist.send(tensor=dec_seqlen_tensor, dst=0)
dist.barrier()
def test_joint_data_loader_hanging():
cluster_spec = get_uniform_cluster(4)
train_spec = TrainingSpec(
"test_cm.pkl",
cluster_spec,
TransformerModelSpec(4, 4, 1024, 128, 65536, 128),
1,
4,
0,
[0, 0, 1, 1, 2, 2, 3, 3],
800000, # ignore memory limit for this test
prefetch_buffer_size=32,
model_type="t5",
)
rank = dist.get_rank()
data_loader = DynaPipeDataLoader(
train_spec,
DummyDataset(256 * 1000),
pack_fn=dummy_pack_fn,
constructor_fn=dummy_constructor_fn,
is_kv_host=rank == 0,
node_rank=0,
node_local_rank=rank,
dp_rank=0,
pp_rank=rank,
batch_size=256,
shuffle=False,
num_workers=2,
num_preprocess_workers=32,
pin_memory=True,
)
for idx, (batch, ep) in enumerate(data_loader):
if rank == 0:
print("Progress: Iteration {}".format(idx))
dist.barrier()
dist.barrier()
def test_joint_data_loader_multiple_nodes():
cluster_spec = get_uniform_cluster(4)
train_spec = TrainingSpec(
"test_cm.pkl",
cluster_spec,
TransformerModelSpec(4, 4, 1024, 128, 65536, 128),
1,
4,
0,
[0, 0, 1, 1, 2, 2, 3, 3],
800000, # ignore memory limit for this test
prefetch_buffer_size=32,
model_type="t5",
)
rank = dist.get_rank()
data_loader = DynaPipeDataLoader(
train_spec,
DummyDataset(256 * 1000),
pack_fn=dummy_pack_fn,
constructor_fn=dummy_constructor_fn,
is_kv_host=rank == 0,
node_rank=rank // 2,
node_local_rank=rank % 2,
node_size=2,
dp_rank=0,
pp_rank=rank,
batch_size=256,
shuffle=False,
num_workers=2,
num_preprocess_workers=32,
pin_memory=True,
)
for idx, (batch, ep) in enumerate(data_loader):
if rank == 0:
print("Progress: Iteration {}".format(idx))
dist.barrier()
dist.barrier()
def test_joint_data_loader_with_virtual_ranks():
cluster_spec = get_uniform_cluster(2)
train_spec = TrainingSpec(
"test_cm.pkl",
cluster_spec,
TransformerModelSpec(4, 4, 1024, 128, 65536, 128),
1,
2,
0,
[0, 0, 1, 1, 0, 0, 1, 1],
800000, # ignore memory limit for this test
prefetch_buffer_size=2,
model_type="t5",
)
rank = dist.get_rank()
data_loader_0 = DynaPipeDataLoader(
train_spec,
DummyDataset(256 * 10),
pack_fn=dummy_pack_fn,
constructor_fn=dummy_constructor_fn,
is_kv_host=True if rank == 0 else False,
node_rank=0,
node_local_rank=rank,
dp_rank=0,
pp_rank=rank,
virtual_pp_rank=0,
batch_size=256,
shuffle=False,
num_workers=2,
num_preprocess_workers=2,
pin_memory=True,
)
data_loader_1 = DynaPipeDataLoader(
train_spec,
DummyDataset(256 * 10),
pack_fn=dummy_pack_fn,
constructor_fn=dummy_constructor_fn,
is_kv_host=False,
node_rank=0,
node_local_rank=rank,
node_size=1,
dp_rank=0,
pp_rank=rank,
virtual_pp_rank=1,
batch_size=256,
shuffle=False,
num_workers=2,
pin_memory=True,
)
for it, ((batch0, ep0), (batch1, ep1)) in enumerate(
zip(data_loader_0, data_loader_1)
):
assert len(batch0) == len(
batch1
), "batch size mismatch ({}, {}) at iter {}".format(
len(batch0), len(batch1), it
)
for mb0, mb1 in zip(batch0, batch1):
assert torch.equal(mb0["encoder_input"], mb1["encoder_input"])
assert torch.equal(mb0["decoder_input"], mb1["decoder_input"])
assert ep0 == ep1
dist.barrier()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
# test hanging issue
# test_joint_data_loader_hanging()
# test multi-node preprocessing
# test_joint_data_loader_multiple_nodes()
# test without virtual ranks
test_joint_data_loader(inputs_only=True)
# test with virtual ranks
# test_joint_data_loader_with_virtual_ranks()
| awslabs/optimizing-multitask-training-through-dynamic-pipelines | tests/test_dataloader/test_dataloader.py | test_dataloader.py | py | 12,642 | python | en | code | 1 | github-code | 13 |
1652730655 | #我的思路就是先判断位数,然后分情况
#但感觉有点复杂
#查了个,用的DFS,用到了helper函数
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
ans = []
self.helper(ans, s, 4, [])
return ['.'.join(x) for x in ans]
def helper(self, ans, s, k, temp):
if len(s) > k*3:
return
if k == 0:
ans.append(temp[:])
else:
for i in range(min(3,len(s)-k+1)):
if i==2 and int(s[:3]) > 255 or i > 0 and s[0] == '0':
continue
self.helper(ans, s[i+1:], k-1, temp+[s[:i+1]])
| fire717/Algorithms | LeetCode/python/_093.RestoreIPAddresses.py | _093.RestoreIPAddresses.py | py | 714 | python | en | code | 6 | github-code | 13 |
74358804178 | #问题1
fi = open("test.txt", "r", encoding="utf-8")
txt = fi.read()
d = {}
exclude = ",。!?、()【】<>《》=:+-*—“”…"
for word in txt:
if word in exclude:
continue
else:
d[word] = d.get(word,0)+1
fi.close()
ls = list(d.items())
ls.sort(key=lambda x:x[1],reverse=True)
print(ls)
print("{}:{}".format(ls[0][0], ls[2][1]))
| dusizhong/python-examples | 2.py | 2.py | py | 370 | python | en | code | 0 | github-code | 13 |
26215943984 | import numpy as np
import random
# can replace colors with RGB values later
COLORS = ['red', 'pink', 'lightblue',
'white', 'black', 'blue',
'green', 'yellow', 'none']
MAXIMUM_SQ = 6
GRID_ROWS = 2
NUM_TRIALS = 150
WINDOW_SIZE = (1440, 1080)
def calculate_location(size, i):
"""
Calculate the middle of each box in the grid.
"""
return int(i * size + size // 2)
def rand_location(loc, color):
if color == 'none':
return loc
width = WINDOW_SIZE[0]
height = WINDOW_SIZE[1]
cols = MAXIMUM_SQ // GRID_ROWS
area_height = height / cols
area_width = width / GRID_ROWS
return find_loc(area_height, area_width, loc)
def find_loc(area_height, area_width, loc):
y_bound = area_height / 4
x_bound = area_width / 4
x = loc[0]
y = loc[1]
loc_x = np.random.randint(x - x_bound, x + x_bound)
loc_y = np.random.randint(y - y_bound, y + x_bound)
return loc_x, loc_y
def window_locations():
"""
Ensure that the MAXIMUM_SQ you have is divisible
by GRID_ROWS. Returns an array of locations.
"""
width = WINDOW_SIZE[0]
height = WINDOW_SIZE[1]
cols = MAXIMUM_SQ // GRID_ROWS
area_height = height / cols
area_width = width / GRID_ROWS
locations = []
for i in range(GRID_ROWS):
for j in range(cols):
x = calculate_location(area_width, i)
y = calculate_location(area_height, j)
locations.append((x, y))
return locations
def random_block():
"""
Creates a random block of NUM_TRIALS size;
returns a dict of randomized trials in the block.
"""
block = {}
for i in range(NUM_TRIALS):
name = 'trial' + str(i + 1)
block[name] = random_trial()
return block
def random_trial():
"""
Create a randomized trial that has x number of squares
between MIMIMUM_SQ and MAXIMUM_SQ. Returns a dict
of tuples of the square color and number corresponding
to a location on a grid of MAXIMUM_SQ size.
"""
# creating randomized colors for each square
rand_colors = np.random.randint(len(COLORS), size=MAXIMUM_SQ)
random.shuffle(COLORS)
colors = [COLORS[num] for num in rand_colors]
# shuffles potential locations
locations = window_locations()
random.shuffle(locations)
# color, coordinate for each trial
trial = {}
for i in range(MAXIMUM_SQ):
name = 'square' + str(i + 1)
trial[name] = {}
square = trial[name]
square['location'] = rand_location(locations[i], colors[i])
square['color'] = colors[i]
return trial
def main():
"""
Print out blocks.
"""
trial = random_block()
for key, value in trial.items():
print(key.upper())
for key2, value2 in value.items():
print(key2, value2)
print()
if __name__ == "__main__":
main() | amyflo/discretewholereport | wholereport.py | wholereport.py | py | 2,898 | python | en | code | 1 | github-code | 13 |
70435957457 | class ListNode:
def __init__(self, val):
self.val = val
self.next = None
class MyLinkedList(object):
#Implementation with singly linked list
def __init__(self):
self.head = None
self.size = 0
def get(self, index):
"""
:type index: int
:rtype: int
"""
#If index is a negative number or index is greater than the size of the Linked List, the index is invalid and we must return -1
if index < 0 or index >= self.size:
return -1
#Otherwise set a current pointer to the head of the Linked List
current = self.head
#Iterate through all the nodes in the linked list until we have reached the desired index number
for i in range(0, index):
current = current.next
#Once we have reached the index, current.val will be the correct value and thus we should return it
return current.val
def addAtHead(self, val):
"""
:type val: int
:rtype: None
"""
#Use the addAtIndex function we created, with index being 0 (the head of our linked list)
self.addAtIndex(0, val)
def addAtTail(self, val):
"""
:type val: int
:rtype: None
"""
#Use the addAtIndex function we created, with index being self.size (the size of our linked list)
self.addAtIndex(self.size, val)
def addAtIndex(self, index, val):
"""
:type index: int
:type val: int
:rtype: None
"""
#If the index is greater than the size of the Linked List, exit from the function
if index > self.size:
return
#Otherwise set a current pointer to the head of the Linked List and initialize ListNode(val) as new_node
current = self.head
new_node = ListNode(val)
#If the index is 0, set new_node.next to current and make the head of the Linked List new_node
if (index == 0):
new_node.next = current
self.head = new_node
else:
#Otherwise, iterate through all the ListNodes until we have reached the desired index
for i in range(index - 1):
current = current.next
#Once we have reached the index, set new_node.next to the current ListNode's next pointer
new_node.next = current.next
#And set current.next to the new_node
current.next = new_node
#No matter if the index == 0 or index > 0, add to the size of the LinkedList
self.size += 1
def deleteAtIndex(self, index):
"""
:type index: int
:rtype: None
"""
#If the index is a negative number or the index is greater than the size of the Linked List, exit from the function
if index < 0 or index >= self.size:
return
#Otherwise set a current pointer to the head of the Linked List
current = self.head
#If the desired index number is 0, set the head of the Linked List to the next pointer
if index == 0:
self.head = self.head.next
else:
#Otherwise, iterate through all the ListNodes until we have reached the desired index
for i in range(0, index - 1):
current = current.next
#Once we have reached the index, set current.next to the ListNode next to it, essentially deleting the ListNode at the desired index
current.next = current.next.next
#No matter if the index == 0 or index > 0, subtract from the size of the LinkedList
self.size -= 1
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index) | ItsMeeSeanLee337/LeetCode-Questions | 707. Design Linked List/Design_Linked_List.py | Design_Linked_List.py | py | 3,889 | python | en | code | 0 | github-code | 13 |
3874495997 | # coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
diabetes = pd.read_csv('diabetes_balanced.csv')
X = np.array(diabetes.drop(['Outcome'], axis=1).astype(float))
y = np.array(diabetes['Outcome'])
kmeans = KMeans(n_clusters=4, max_iter=10000, algorithm='auto')
kmeans.fit(X)
pred = []
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = kmeans.predict(predict_me)
pred.append(float(prediction[0]))
diabetes['Area'] = pred
diabetes.to_csv('diabetes_balanced_area.csv', index=False) | ncd-surveillance-system/SIH_demogrphical_analysis | OldDataset/K-Means.py | K-Means.py | py | 707 | python | en | code | 0 | github-code | 13 |
71831414098 | from ultralytics import YOLO
import os
import cv2
model = YOLO(os.path.expanduser('~/overwrite_det/last_31_OWO.pt'))
clip_limit = 78 # Set your desired clip limit (78 in this example)
tile_size = 20
if __name__ == '__main__':
image = cv2.imread("Sample4_1.png", cv2.IMREAD_GRAYSCALE)
#image=cv2.imread("Sample2.jpg")
# Create an instance of the CLAHE (Contrast Limited Adaptive Histogram Equalization) class
bgr_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
model.predict(bgr_image, save=True, show=True)
cv2.waitKey(0)
| CMA020/overwrite_det | Det_test.py | Det_test.py | py | 553 | python | en | code | 0 | github-code | 13 |
23623838372 | """3d pose graph visualization utilities that use the Open3d library."""
from typing import List, Optional
import gtsfm.visualization.open3d_vis_utils as open3d_vis_utils
import numpy as np
import open3d
from gtsam import Pose3
import salve.utils.colormap as colormap_utils
def get_colormapped_spheres(wTi_list: List[Optional[Pose3]]) -> np.ndarray:
"""Render each camera as a sphere, with sphere colors incrementally transitioning from red to green.
Args:
wTi_list: global poses of N cameras.
Returns:
point_cloud: float array of shape (N,3) representing sphere center coordinates.
rgb: uint8 array of shape (N,3) representing sphere RGB colors.
"""
num_valid_poses = sum([1 if wTi is not None else 0 for wTi in wTi_list])
colormap = colormap_utils.get_redgreen_colormap(N=num_valid_poses)
curr_color_idx = 0
point_cloud = []
rgb = []
for i, wTi in enumerate(wTi_list):
if wTi is None:
continue
point_cloud += [wTi.translation()]
rgb += [colormap[curr_color_idx]]
curr_color_idx += 1
point_cloud = np.array(point_cloud)
rgb = np.array(rgb)
return point_cloud, rgb
def draw_coordinate_frame(wTc: Pose3, axis_length: float = 1.0) -> List[open3d.geometry.LineSet]:
"""Draw 3 orthogonal axes representing a camera coordinate frame.
Note: x,y,z axes correspond to red, green, blue colors.
Args:
wTc: Pose of any camera in the world frame.
axis_length:
Returns:
line_sets: list of Open3D LineSet objects
"""
RED = np.array([1, 0, 0])
GREEN = np.array([0, 1, 0])
BLUE = np.array([0, 0, 1])
colors = (RED, GREEN, BLUE)
line_sets = []
for axis, color in zip([0, 1, 2], colors):
lines = [[0, 1]]
verts_worldfr = np.zeros((2, 3))
verts_camfr = np.zeros((2, 3))
verts_camfr[0, axis] = axis_length
for i in range(2):
verts_worldfr[i] = wTc.transformFrom(verts_camfr[i])
line_set = open3d.geometry.LineSet(
points=open3d.utility.Vector3dVector(verts_worldfr),
lines=open3d.utility.Vector2iVector(lines),
)
line_set.colors = open3d.utility.Vector3dVector(color.reshape(1, 3))
line_sets.append(line_set)
return line_sets
def plot_3d_poses(aTi_list_gt: List[Optional[Pose3]], bTi_list_est: List[Optional[Pose3]]) -> None:
"""
Ground truth poses are rendered large (sphere of radius 0.5)
Estimated poses are rendered small (spehere of radius 0.2)
Args:
aTi_list_gt: list of ground truth camera poses.
bTi_list_est: list of estimated camera poses.
"""
point_cloud_est, rgb_est = get_colormapped_spheres(bTi_list_est)
point_cloud_gt, rgb_gt = get_colormapped_spheres(aTi_list_gt)
geo1 = open3d_vis_utils.create_colored_spheres_open3d(point_cloud_est, rgb_est, sphere_radius=0.2)
geo2 = open3d_vis_utils.create_colored_spheres_open3d(point_cloud_gt, rgb_gt, sphere_radius=0.5)
def get_coordinate_frames(wTi_list: List[Optional[Pose3]]) -> List[open3d.geometry.LineSet]:
frames = []
for i, wTi in enumerate(wTi_list):
if wTi is None:
continue
frames.extend(draw_coordinate_frame(wTi))
return frames
frames1 = get_coordinate_frames(aTi_list_gt)
frames2 = get_coordinate_frames(bTi_list_est)
open3d.visualization.draw_geometries(geo1 + geo2 + frames1 + frames2)
| zillow/salve | salve/visualization/utils.py | utils.py | py | 3,499 | python | en | code | 4 | github-code | 13 |
23617476522 | from django.forms import ModelForm
from django import forms
from administrativo.models import Estudiante, NumeroTelefonico
class EstudianteForm(ModelForm):
class Meta:
model = Estudiante
fields = ['nombre', 'apellido', 'cedula']
class NumeroTelefonicoForm(ModelForm):
def __init__(self, estudiante, *args, **kwargs):
super(NumeroTelefonicoForm, self).__init__(*args, **kwargs)
self.fields['estudiante'] = forms.CharField(initial = estudiante.id,
widget=forms.HiddenInput())
class Meta:
model = NumeroTelefonico
fields = ['telefono', 'tipo']
exclude = ('estudiante',)
| taw-desarrollo-plataformas-web/ejemplos5.3.7_1 | ejemplo1/proyectoUno/administrativo/forms.py | forms.py | py | 679 | python | en | code | 0 | github-code | 13 |
14936771205 | import csv
import operator
import os
import re
from flask_script import Command, Option
from user_agents import parse
class UaParser(Command):
"""
Утилита для парсинга лога с юзер агентами
Пример строки: Count "UserAgent"
"""
def __init__(self):
super().__init__()
self.top = 100 # По умолчанию ставим топ-100
self.path = '/spool1/' # Путь для сохранения отчета по умолчанию в папку с логами
def get_options(self):
return (
Option('-l', '--logfile', dest='logfile', type=str, help='Path to log'),
Option('-t', '--top', dest='top', type=int, help='How much browsers will be in top'),
Option('-p', '--path', dest='path', type=str, help='Path to save results'),
)
def run(self, logfile: str, top: int, path: str):
if not logfile:
print("Required -l parameter\n")
exit(1)
if top:
self.top = top
if path:
self.path = path
print('Script started!')
self.get_parsed_ua(self.parse_log(logfile))
print('Done!')
@staticmethod
def parse_log(logfile: str) -> dict:
"""
Парсим файл со статистикой по юзер агентам
Ищем строки по регэксп паттерну, далее собираем в словарь - browser + version: count
"""
ua_dict = {}
print('Parsing now...')
regex = re.compile(r'^\s*(\d+)\s+"(.+)"')
with open(logfile) as fh:
for line in fh:
regex_matches = re.search(regex, line)
if regex_matches:
ua_count = regex_matches.group(1)
ua = parse(regex_matches.group(2))
parsed_ua = f'{ua.browser.family} {ua.browser.version_string}'
# Если у нас еще не было такого ключа(браузер + версия), то 0 + количество
# Если у нас уже есть такой ключ, то старое количество + новое
ua_dict[parsed_ua] = ua_dict.get(parsed_ua, 0) + int(ua_count)
print('Parsing done!')
return ua_dict
@staticmethod
def get_all_browser_count(ua_dict: dict) -> int:
"""
Считаем общее количество Юзер Агентов
"""
all_count = 0
for _, value in ua_dict.items():
all_count += value
return all_count
@staticmethod
def get_percent(all_count: int, value: int) -> float:
"""
Берем процент от общего числа
"""
return (value / all_count) * 100
def get_parsed_ua(self, ua_dict: dict):
i = 0
all_percents = 0
others_dict = {}
all_browsers_count = self.get_all_browser_count(ua_dict) # Вычисляем общее количество ua
regex = re.compile(r'(.+) (\d+)\.(\d+)')
print('Writing in file now...')
with open(os.path.join(self.path, 'ua_output.tsv'), 'w', newline='') as f_output:
tsv_writer = csv.writer(f_output, delimiter='\t')
tsv_writer.writerow(['#', 'Percent ', 'Count', 'Browser', 'Major version', 'Minor version'])
for key, value in sorted(ua_dict.items(), key=operator.itemgetter(1), reverse=True):
i += 1
percent = self.get_percent(all_browsers_count, value)
all_percents += percent
# Расчленям строку на браузер мажорная версия и минорная.
regex_matches = re.search(regex, key)
if regex_matches:
browser = regex_matches.group(1)
browser_major = regex_matches.group(2)
browser_minor = regex_matches.group(3)
else:
browser = key
browser_major = 0
browser_minor = 0
# Ставим ограничение для ТОП 100
if i > self.top:
# Общий процент по остальным браузерам
others_dict['percent'] = others_dict.get('percent', 0) + percent
# Общее количество по остальным браузерам
others_dict['count'] = others_dict.get('count', 0) + value
else:
tsv_writer.writerow([i, f'{percent:2f}', value, browser, browser_major, browser_minor])
# Пишем остальные браузеры, которые не вошли в ТОП
if others_dict:
# Пишем данные по всем остальным браузерам
tsv_writer.writerow(['#####', f'{others_dict["percent"]:2f}', others_dict['count'], 'Others', 0, 0])
# Пишем общую статистику
tsv_writer.writerow(['#####', f'{all_percents:2f}', all_browsers_count, '#####', '#####', '#####'])
| DmitryShahbazov/UA-Parser | ua_parser.py | ua_parser.py | py | 5,329 | python | ru | code | 0 | github-code | 13 |
21828381732 | import json
from itertools import groupby
from team_league_elt.root import ROOT_DIR
from typing import List, Dict
def build_team_fifa_ranking_list():
with open(f'{ROOT_DIR}/world_cup_team_players_stats_raw.json') as json_file:
team_stats_as_dicts = json.load(json_file)
team_fifa_ranking: List[Dict] = []
for k, g in groupby(team_stats_as_dicts, lambda t: t['nationality']):
group_values = list(g)
any_value = group_values[0]
fifa_ranking = any_value['fifaRanking']
team_fifa_ranking.append(
{
'teamName': k,
'fifaRanking': fifa_ranking
}
)
return team_fifa_ranking
if __name__ == '__main__':
build_team_fifa_ranking_list()
| tosun-si/world-cup-qatar-team-stats-kotlin-midgard | scripts/create_team_fifa_ranking_list.py | create_team_fifa_ranking_list.py | py | 756 | python | en | code | 3 | github-code | 13 |
23905040301 | #!/usr/bin/env python3
"""Module used to"""
import numpy as np
def batch_norm(Z, gamma, beta, epsilon):
"""normalizes an unactivated output of a NN"""
β = beta
γ = gamma
ε = epsilon
μ = Z.mean(0)
σ = Z.std(0)
σ2 = Z.std(0) ** 2
z_normalized = (Z - μ) / ((σ2 + ε) ** (0.5))
Ẑ = γ * z_normalized + β
return Ẑ
| diego0096/holbertonschool-machine_learning | supervised_learning/0x03-optimization/13-batch_norm.py | 13-batch_norm.py | py | 366 | python | en | code | 0 | github-code | 13 |
73097435536 | class Evaluator:
"""String weight evaluator"""
@staticmethod
def check_args(coefs, words):
if len(coefs) != len(words):
return(False)
if not isinstance(coefs, list):
return(False)
if not isinstance(words, list):
return(False)
try:
coefs = list(map(float, coefs))
except Exception:
return(False)
if not all(isinstance(x, str) for x in words):
return(False)
return(True)
@staticmethod
def zip_evaluate(coefs, words):
if not Evaluator.check_args(coefs, words):
return(-1)
zipped = zip(coefs, words)
return(sum([x[0] * len(x[1]) for x in zipped]))
@staticmethod
def enumerate_evaluate(coefs, words):
if not Evaluator.check_args(coefs, words):
return(-1)
enumerated = [coef * len(words[i]) for i, coef in enumerate(coefs)]
return(sum(enumerated))
if __name__ == '__main__':
words = ["Le", "Lorem", "Ipsum", "est", "simple"]
coefs = [1.0, 2.0, 1.0, 4.0, 0.5]
# words = ["Le", "Lorem", "Ipsum", "n'", "est", "pas", "simple"]
# coefs = [0.0, -1.0, 1.0, -12.0, 0.0, 42.42]
# words = []
# coefs = []
print(Evaluator.zip_evaluate(coefs, words))
print(Evaluator.enumerate_evaluate(coefs, words))
| Cizeur/Bootcamp_Python | day01/ex04/eval.py | eval.py | py | 1,349 | python | en | code | 0 | github-code | 13 |
41818191376 | from google.cloud import storage
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file("vision-6964-cec0e32a1768.json")
CLIENT_ID = "GOOG1ELM7PJRRII5V3WQZJDFPLMVLU7BWMX3CPYOIF4QWXQGHG37DSZDCVYSY"
CLIENT_SECRET = "Nk/1HI0zI00gx208y4Sm+ZiK/dP8sqpt7i+QoIWZ"
storage_client = storage.Client('vision-6964', credentials=credentials)
bucket = storage_client.get_bucket('convertmed-form-bucket')
file_name = 'admissionRecord2.xhtml.pdf'
blob = bucket.blob(file_name)
blob.download_to_filename(f'data/{file_name}') | sidharthmrao/RovicareOCR | ocr/cloudstoragetesting.py | cloudstoragetesting.py | py | 568 | python | en | code | 1 | github-code | 13 |
73538982416 |
from pytube import YouTube
from .step import Step
from yt_concate.settings import VIDEOS_DIR
import logging
class DownloadVideos(Step):
def process(self, data, inputs, utils):
logger = logging.getLogger()
yt_set = set ([found.yt for found in data])
logger.info(f'videos to download:, {len(yt_set)}')
for yt in yt_set:
url = yt.url
if inputs['fast'] and utils.video_file_exists(yt):
logger.debug(f'found existing video file for {url}, skipping')
continue
logger.debug(f'downloading {url}')
# YouTube(url).streams.first().download(output_path=VIDEOS_DIR, filename=yt.id)
return data | cindypai/yt-concate | yt_concate/pipeline/steps/download_videos.py | download_videos.py | py | 709 | python | en | code | 0 | github-code | 13 |
43588848989 |
import logging
import numpy as np
import pandas as pd
from sklearn import preprocessing
from gzreduction.deprecated.uncertainty import uncertainty
def reduced_votes_to_predictions(df, schema, save_loc):
"""
Calculate predicted answers and uncertainty from reduced vote counts.
Args:
df (pd.DataFrame) rows of subject, columns of question_answer counts. Includes total votes and vote fractions.
schema (Schema): definition object for questions and answers
save_loc (str): if not None, save predictions here
Returns:
(pd.DataFrame) df with prediction, prediction_conf, vote_fraction_min, vote_fraction_max columns added
"""
predictions = get_predictions(df, schema)
encoded_predictions = encode_answers(predictions, schema)
if save_loc is not None:
logging.info('Saved predictions to {}'.format(save_loc))
encoded_predictions.to_csv(save_loc, index=False)
return encoded_predictions
def get_predictions(df, schema):
"""
For each question in schema, find:
- prediction
- prediction confidence
- 80% confidence interval +/- vote fraction values for all answers
Args:
df (pd.DataFrame) rows of subject, columns of question_answer counts. Includes total votes and vote fractions
schema (Schema): definition object for questions and answers
Returns:
(pd.DataFrame) with prediction, prediction confidence and confidence intervals added
"""
prediction_df = df.copy()
for question in schema.questions:
prediction_df = get_predictions_for_question(prediction_df, question)
return prediction_df
def get_predictions_for_question(df, question):
"""
For the provided question, find:
- prediction
- prediction confidence
- 80% confidence interval +/- vote fraction values for all answers
Args:
df (pd.DataFrame) rows of subject, columns of question_answer counts. Includes total votes and vote fractions
question (Question): definition object for question
Returns:
(pd.DataFrame) with prediction, prediction confidence and confidence intervals added for question
"""
df = df.drop([question.prediction, question.prediction_conf], axis=1, errors='ignore')
relevant_count_cols = question.get_count_columns() # get fraction cols
most_common_answer_cols = df[relevant_count_cols].fillna(0).idxmax(axis=1).values # first if tie
# inverse: count column of known question to answer value
most_common_answers = list(map(lambda x: question.get_answer_from_count_column(x).name, most_common_answer_cols))
df[question.prediction] = most_common_answers
total_votes_by_row = df[relevant_count_cols].sum(axis=1)
df[question.total_votes] = total_votes_by_row
votes_for_most_popular = df[relevant_count_cols].max(axis=1)
df[question.prediction_conf] = votes_for_most_popular / total_votes_by_row
# if total votes is 0, this will be na. Replace with confidence appropriate for equal likelihood.
df[question.prediction_conf] = df[question.prediction_conf] .fillna(
value=1./len(question.answers))
# calculate uncertainty - disabled as binomial approximation known to have some fairly important failures e.g. on extreme vote fractions, irrelvant answers
# for answer in question.answers:
# fraction_min_col = question.get_fraction_min_col(answer)
# fraction_max_col = question.get_fraction_max_col(answer)
# row_iter = [df.iloc[n].to_dict() for n in range(len(df))]
# votes_series = list(map(lambda x: votes_from_subject_row(x, question=question, answer=answer), row_iter))
# df[fraction_min_col] = list(map(lambda x: uncertainty.get_min_p_estimate(x, interval=0.8), votes_series))
# df[fraction_max_col] = list(map(lambda x: uncertainty.get_max_p_estimate(x, interval=0.8), votes_series))
return df
def votes_from_subject_row(row, question, answer):
"""
Infer volunteer votes for answer to question, using row of reduced vote counts
Note: this is not easier pre-reduction because '0' means a vote for (potentially) another question or no response
Note: Likely to be deprecated when more sophisticated reduction is used
Args:
row (pd.Series): vote counts for a subject by question_answer, including total_votes for question
question (Question): question to find votes for
answer (Answer): answer to find votes for
Returns:
(np.array): [1, 0] ints representing each vote for answer (1) or another answer within question (0)
"""
total_votes_value = row[question.total_votes]
count_for_answer = row[question.get_count_column(answer)]
return np.array(([1] * int(count_for_answer)) + ([0] * (int(total_votes_value) - int(count_for_answer))))
def get_encoders(schema):
"""
Create encoders to transform answer predictions into ints. Useful for machine learning tools.
Args:
schema (Schema): definition object for questions and answers
Returns:
(dict) of form {question.name: encoder for question (can transform response to int and vica versa)}
"""
encoders = dict([(question.name, preprocessing.LabelEncoder()) for question in schema.questions])
assert len(schema.get_question_names()) == len(set(schema.get_question_names())) # questions must be uniquely named
for question in schema.questions:
encoders[question.name] = encoders[question.name].fit(question.get_answer_names())
return encoders
def encode_answers(df, schema):
"""
Create encoded predictions for every question. Useful for machine learning tools.
Args:
df (pd.DataFrame): reduced votes including predicted answer to each question as string value (e.g 'smooth')
schema (Schema): definition object for questions and answers
Returns:
(pd.DataFrame): df with encoded prediction columns for each question (e.g. 1 )
"""
encoders = get_encoders(schema)
for question in schema.questions:
df[question.prediction_encoded] = encoders[question.name].transform(df[question.prediction])
return df
| mwalmsley/gz-panoptes-reduction | gzreduction/votes_to_predictions/reduced_votes_to_predictions.py | reduced_votes_to_predictions.py | py | 6,204 | python | en | code | 1 | github-code | 13 |
20859588889 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# BAR GRAPH 1 MEDIAN AGE
plt.figure(figsize=(9,7))
data = pd.read_csv('data03sheet.csv')
plt.title('Average (Median) Age in Different Regions', fontdict={'fontweight':'bold', 'fontsize': 18})
#sorting the data into ascending order
myReg = data['Region']
myAge = data['Median age']
df = pd.DataFrame({"Region":myReg, "Median age":myAge})
df_sorted = df.sort_values(by='Median age', ascending=True)
plt.bar(df_sorted['Region'], df_sorted['Median age'], zorder=3)
plt.grid(zorder=0)
plt.ylabel('Median Age')
plt.yscale('linear')
plt.xticks(rotation=25)
plt.savefig('demographics_img01.png')
plt.show()
# PIE CHART 2 RELIGION
plt.figure(figsize=(8,5))
plt.title('Population by Religion (%)', fontdict={'fontweight':'bold', 'fontsize': 18})
myPercent = [31.2, 24.1, 16, 15.1, 6.9, 5.7, 1]
myReligions = 'Christianity', 'Islam', 'Non-religious', 'Hinduism', 'Buddhism', 'Folk', 'Other'
plt.pie(myPercent, labels=myReligions, autopct='%1.1f%%')
plt.axis('equal')
plt.savefig('demographics_img02.png')
plt.show()
# BAR GRAPH 3 - Religion
plt.figure(figsize=(8,6.5))
data = pd.read_csv('data03Bsheet.csv')
plt.title('Population by Religion (billions)', fontdict={'fontweight':'bold', 'fontsize': 18})
plt.bar(data.Religion, data.Number/1000, zorder=3)
plt.grid(zorder=0)
plt.ylabel('Population (billions)')
plt.xticks(rotation=25)
plt.xlim(-0.5, 6.5)
plt.savefig('demographics_img03.png')
plt.show()
# PIE CHART 4 POPULATION BY RACE AND ETHNICITY
plt.figure(figsize=(8,5))
plt.title('Population by Race/Ethnicity', fontdict={'fontweight':'bold', 'fontsize': 18})
myRaceRatio = [23.48, 21.23, 16, 13.93, 9.26, 8.62, 4.45, 3.03]
myRace = 'South Asian', 'East Asian', 'European', 'African', 'Southeast Asian', 'Middle Eastern','Latina', 'Other'
plt.pie(myRaceRatio, labels=myRace, autopct='%1.1f%%')
plt.axis('equal')
plt.savefig('demographics_img04.png')
plt.show()
# BAR GRAPH 5 Literacy Rate
plt.figure(figsize=(8,6.5))
data = pd.read_csv('data04Csheet.csv')
plt.title('Literacy Rate by Region', fontdict={'fontweight':'bold', 'fontsize': 18})
plt.bar(data.Region, data['Literacy Rate'], zorder=3)
plt.ylabel('Literacy Rate')
plt.grid(zorder=0)
plt.yscale('linear')
plt.xticks(rotation=25)
plt.ylim(50, 100)
plt.xlim(left=0.5)
plt.savefig('demographics_img05.png')
plt.show()
# LINE GRAPH 6 - Access
plt.figure(figsize=(8,5))
data = pd.read_csv('data04Bsheet.csv')
plt.title('Global Access (%) to Electricity and Clean Water', fontdict={'fontweight':'bold', 'fontsize': 18})
plt.plot(data.Year, data['Electricty Access'], label='Electricity')
plt.plot(data.Year, data['Clean Water Access'], label='Clean Water')
plt.xlabel('Year')
plt.ylabel('(%) Population With Access')
plt.grid(True)
plt.yscale('linear')
plt.xscale('linear')
plt.legend()
plt.xlim(1995, 2015)
plt.ylim(50, 100)
plt.savefig('demographics_img06.png')
plt.show() | ari-abr/World-In-Numbers | py_demographics.py | py_demographics.py | py | 2,898 | python | en | code | 0 | github-code | 13 |
22648280287 | import json
import logging
import os
from copy import deepcopy
from urllib.parse import urljoin, urlparse
from boto3utils import s3
from cirruslib import StateDB, stac, STATES
logger = logging.getLogger(__name__)
# envvars
DATA_BUCKET = os.getenv('CIRRUS_DATA_BUCKET', None)
# Cirrus state database
statedb = StateDB()
def response(body, status_code=200, headers={}):
_headers = deepcopy(headers)
# cors
_headers.update({
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True
})
return {
"statusCode": status_code,
"headers": _headers,
"body": json.dumps(body)
}
def create_link(url, title, rel, media_type='application/json'):
return {
"title": title,
"rel": rel,
"type": media_type,
"href": url
}
def get_root(root_url):
cat_url = f"s3://{DATA_BUCKET}/catalog.json"
logger.debug(f"Root catalog: {cat_url}")
cat = s3().read_json(cat_url)
links = []
workflows = cat.get('cirrus', {}).get('workflows', {})
for col in workflows:
for wf in workflows[col]:
name = f"{col} - {wf}"
link = create_link(urljoin(root_url, f"{col}/workflow-{wf}"), name, 'child')
links.append(link)
links.insert(0, create_link(root_url, "home", "self"))
links.append(create_link(cat_url, "STAC", "stac"))
root = {
"id": f"{cat['id']}-state-api",
"description": f"{cat['description']} State API",
"links": links
}
return root
def summary(collections_workflow, since, limit):
parts = collections_workflow.rsplit('_', maxsplit=1)
logger.debug(f"Getting summary for {collections_workflow}")
counts = {}
for s in STATES:
counts[s] = statedb.get_counts(collections_workflow, state=s, since=since, limit=limit)
return {
"collections": parts[0],
"workflow": parts[1],
"counts": counts
}
def lambda_handler(event, context):
logger.debug('Event: %s' % json.dumps(event))
# get request URL
domain = event.get('requestContext', {}).get('domainName', '')
if domain != '':
path = event.get('requestContext', {}).get('path', '')
root_url = f"https://{domain}{path}/"
else:
root_url = None
# get path parameters
stage = event.get('requestContext', {}).get('stage', '')
parts = [p for p in event.get('path', '').split('/') if p != '']
if len(parts) > 0 and parts[0] == stage:
parts = parts[1:]
catid = '/'.join(parts)
legacy = False
if catid.startswith('item'):
legacy = True
catid = catid.replace('item/', '', 1)
if catid.startswith('collections'):
legacy = True
catid = catid.replace('collections/', '', 1)
logger.info(f"Path parameters: {catid}")
# get query parameters
qparams = event['queryStringParameters'] if event.get('queryStringParameters') else {}
logger.info(f"Query Parameters: {qparams}")
state = qparams.get('state', None)
since = qparams.get('since', None)
nextkey = qparams.get('nextkey', None)
limit = int(qparams.get('limit', 100000))
sort_ascending = bool(qparams.get('sort_ascending', None))
sort_index = qparams.get('sort_index', None)
#count_limit = int(qparams.get('count_limit', 100000))
#legacy = qparams.get('legacy', False)
# root endpoint
if catid == '':
return response(get_root(root_url))
if '/workflow-' not in catid:
return response(f"{path} not found", status_code=400)
key = statedb.catid_to_key(catid)
if key['itemids'] == '':
# get summary of collection
return response(summary(key['collections_workflow'], since=since, limit=limit))
elif key['itemids'] == 'items':
# get items
logger.debug(f"Getting items for {key['collections_workflow']}, state={state}, since={since}")
items = statedb.get_items_page(key['collections_workflow'], state=state, since=since,
limit=limit, nextkey=nextkey, sort_ascending=sort_ascending,
sort_index=sort_index)
if legacy:
items = [to_legacy(item) for item in items]
return response(items)
else:
# get individual item
item = statedb.dbitem_to_item(statedb.get_dbitem(catid))
if legacy:
item = to_legacy(item)
return response(item)
def to_legacy(item):
_item = {
'id': item['catid'],
'catid': item['catid'],
'input_collections': item['collections'],
'current_state': f"{item['state']}_{item['updated']}",
'state': item['state'],
'created_at': item['created'],
'updated_at': item['updated'],
'input_catalog': item['catalog']
}
if 'executions' in item:
_item['execution'] = item['executions'][-1]
if 'outputs' in item:
_item['items'] = item['outputs']
if 'last_error' in item:
_item['error_message'] = item['last_error']
return _item | cirrus-geo/cirrus-earth-search | core/api/lambda_function.py | lambda_function.py | py | 5,104 | python | en | code | 21 | github-code | 13 |
30171855103 | #!/usr/bin/env python
# Note: This demo will only work if you have a Barobo breakout-board currently
# attached to the linkbot.
from barobo import Linkbot
if __name__ == "__main__":
linkbot = Linkbot()
linkbot.connect()
adcs = map(linkbot.getBreakoutADC, range(0,8))
print(map(lambda x: x/1024.0*5.0, adcs))
| davidko/PyBarobo | demo/test/with_BaroboLink/getBreakoutADC.py | getBreakoutADC.py | py | 328 | python | en | code | 0 | github-code | 13 |
5132625046 | # -*- coding: utf-8 -*-
# python结巴分词使用停用词版本,根据输入的excel文件分词后输出到另一个excel中
import xlrd
import xlwt
import jieba
def jiebafenci(input_path,output_path):
jieba.load_userdict('userwords.txt') # userwords.txt 为文件类对象或自定义词典
# 读取停用词文件
with open('stopwords.txt', encoding='UTF-8') as f:
stoplist = f.readlines() # 读取所有的数据到 list 中
for i in range(stoplist.__len__()):
stoplist[i] = stoplist[i].strip('\n') #去掉换行符
f.close()
#打开源文件
data = xlrd.open_workbook(input_path)
#新建一个excel文件
target_file = xlwt.Workbook()
target_table = target_file.add_sheet('target',cell_overwrite_ok=True)
table = data.sheets()[0] # 读取excel文件的第一张表
nrows = table.nrows # 行数
#下面进行分词处理,并去除停用词
for index in range(nrows):
segs = jieba.lcut(table.row_values(index)[0]) # 对第一列进行jieba分词处理
segs = [word for word in list(segs) if word not in stoplist] # 去除在停用词中的词语
target_table.write(index, 0, ','.join(segs)) # 把结果写入到文件中
target_file.save(output_path)
#读取分词后的数据
def readExcel(output_path,output_path2):
postingList =[]
classVec =[]
#读取excl文件
book = xlrd.open_workbook(output_path)
table = book.sheets()[0]
nrows = table.nrows
for index in range(nrows):
segs = table.row_values(index)[0].split(',')
postingList.append(segs)
classVec.append(1)
book = xlrd.open_workbook(output_path2)
table = book.sheets()[0]
nrows = table.nrows
for index in range(nrows):
segs = table.row_values(index)[0].split(',')
postingList.append(segs)
classVec.append(0)
return postingList, classVec
#读取test文件
def readTestExcel(input_path):
postingList = []
classVec = []
# 读取excl文件
book = xlrd.open_workbook(input_path)
table = book.sheets()[0]
nrows = table.nrows
for index in range(nrows):
segs = table.row_values(index)[0].split(',')
postingList.append(segs)
return postingList
| yingtaoluo/51jobs-Text-Mining | sy4/jiebaWithStopwords.py | jiebaWithStopwords.py | py | 2,374 | python | en | code | 0 | github-code | 13 |
32318289681 | '''
This file renders out the layers of a blend file to different names. It is
configured by a JSON file that links a name to visible layers. For example:
[
{'name':'image_name', 'layers':[2,5]},
]
The json file is provided by the command line arguments. Invoke this script
using:
blender path/to/blendfile.blend --python ./path/to/render_layers.py -- ./path/to/json/file.json ./path/to/output/images
'''
import sys
import os
import json
import argparse
import bpy
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
import blenderhelper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--filename', help="Output File", required=True)
parser.add_argument('--opengl', help="Use the OpenGL renderer", action='store_true')
parser.add_argument('--resolution', help="Resolution Multiplier", default=1.0, type=float)
config = parser.parse_args(args)
blenderhelper.render(
config.filename,
config.resolution,
config.opengl
)
if __name__ == "__main__":
blenderhelper.run_function_with_args(main)
| sdfgeoff/LearningWhatAGameEngineIs | src/Scripts/render.py | render.py | py | 1,092 | python | en | code | 0 | github-code | 13 |
32383172296 | import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
"""Plotting"""
algorithms = ['Q']
# delays = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
delays = [2, 4, 6, 8, 10]
runs = 10
lambda_trace = 0.0
for algorithm in algorithms:
rewards = {}
rewards_err = {}
episodes = 10000
for delay in delays:
reward_runs = np.zeros(runs)
for run in range(runs):
filename = 'Maze-Tabular\\Results-v3-cumulative\\maze_' + algorithm + '_lambda_' + str(lambda_trace) + '_' + str(delay) \
+ '\\' + str(run) + '.npy'
reward_current = np.load(filename)[-episodes-1:-1]
reward_runs[run] = np.mean(reward_current)
rewards[delay] = np.mean(reward_runs)
rewards_err[delay] = np.std(reward_runs, axis=0)
alg = 'DRQ'
color = u'#1f77b4'
# plt.plot(list(rewards.keys()), list(rewards.values()), marker='o', label=alg, color=color)
plt.errorbar(list(rewards.keys()), list(rewards.values()), yerr=list(rewards_err.values()),
uplims=True, lolims=True, label=alg, color=color)
plt.title('W-Maze', fontsize=20)
plt.xticks(list(rewards.keys()))
algorithms = ['Q', 'dQ']
for algorithm in algorithms:
rewards = {}
rewards_err = {}
for delay in delays:
reward_runs = np.zeros(runs)
for run in range(runs):
filename = 'Maze-Tabular\\Results-v3\\maze_' + algorithm + '_lambda_' + str(0.0) + '_' + str(delay) \
+ '\\' + str(run) + '.npy'
reward_current = np.load(filename)[-episodes-1:-1]
reward_runs[run] = np.mean(reward_current)
rewards[delay] = np.mean(reward_runs)
rewards_err[delay] = np.std(reward_runs, axis=0)
if algorithm == 'dQ':
alg = 'delay-Q'
color = 'red'
else:
alg = 'Q'
color = u'#2ca02c'
# plt.plot(list(rewards.keys()), list(rewards.values()), marker='o', label=alg, color=color)
plt.errorbar(list(rewards.keys()), list(rewards.values()), yerr=list(rewards_err.values()),
uplims=True, lolims=True, label=alg, color=color)
plt.legend()
plt.xlabel('Delays', fontsize=16)
plt.xticks(fontsize=16)
plt.ylabel('Rewards', fontsize=16)
plt.yticks(fontsize=16)
save_dir = os.getcwd() + '/Maze-Tabular/Plots/'
try:
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Maze-Tabular/Plots/')
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
plt.show()
| baranwa2/DelayResolvedRL | W-Maze/Tabular-Q/plot.py | plot.py | py | 2,796 | python | en | code | 3 | github-code | 13 |
12343804785 | # Program to find the majority element that is the occurence should be greater than N/2 , where N is the size of the array
# BELOW PROGRAMS IN VARIOUS SECTIONS ARE FOR N/2, N/3.... MAJORITY ELEMENTS.
# ----------------------------------------------------------------------------------------------------------------
# NOTE :
# There can be at most one majority element which is more than ⌊n/2⌋ times.
# There can be at most two majority elements which are more than ⌊n/3⌋ times.
# There can be at most three majority elements which are more than ⌊n/4⌋ times.
# -----------------------------------------------------------------------------------------------------------------
# MOST OPTIMIZED METHOD :
# THIS USES BOYER - MOORE VOTING ALGORITHM
# BEST METHOD , TAKES 0(N) TIME, 0(1) SPACE BUT AGAIN IT ASSUMES MAJORITY ELEMENT EXISTS
# logic is to simply keep an counter of elements when repeated to be seen, and keep a variable for setting majority element, as soon as we see the same
# element , we increase its count, and as soon as we see some another element, we decrease the count till 0, when we change the majority element now to # # be at the current element as a new candidate and start the counter again , finally at the end, we will be having our majority element stored.
# -------------------------------------------------------------------------------------------------------------------
# NOTE : One important thing is that BOYER - MOORE VOTING ALGORITHM doesn't tell if the majority element exists, it gives the answer only
# if the majority element exists.
# So, we need to do a second linear time pass to count and check just for the candidates and their count if greater than required N//2, N//3..
# This will allow us to get exactly how many majority elements are present or there are none of them existent.
# --------------------------------------------------------------------------------------------------------------------
# Intution behind boyre moore voting algo :
# EX. [7, 7, 5, 7, 5, | 5, 7 | 5, 5, 7, 7, | 5, 5, 5, 5]
# The above bars suggests the indices wherever counter becomes 0 , now what does it signify ?
# It signifies that number of total count of minority elements will be balanced our or cancelled by total count of majority elements.
# As also, can be seen above in the first segment, second segment, third segment all the counts are same, therefore there can be no majority element
# but we have to have count(majority) > count(minority) in the last fourth segment becasue of condition of > [N/2] and only if it is true we
# can have majority element otherwise there can't be any majority element.
# -----------------------------------------------------------------------------------------------------------------------
def find_majority(arr):
array_size = len(arr)
count = 0
M_element = None
for i in range(array_size):
if count == 0:
M_element = arr[i]
if arr[i] == M_element:
count += 1
else:
count -= 1
return M_element
# Majority element occuring |N/3| times :
# --------------------------------------------------------------------------------------------------------------------
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
if not nums:
return []
array_size = len(nums)
count1, count2, M_element1, M_element2 = 0, 0, None, None
for i in range(array_size):
if nums[i] == M_element1:
count1 += 1
elif nums[i] == M_element2:
count2 += 1
elif count1 == 0:
M_element1 = nums[i]
count1 += 1
elif count2 == 0:
M_element2 = nums[i]
count2 += 1
else:
count1 -= 1
count2 -= 1
result = []
for i in [M_element1, M_element2]:
if nums.count(i) > len(nums) // 3:
result.append(i)
return result
# ---------------------------------------------------------------------------------------------------------------------
# ALTERNATE METHODS FOR N/2 BUT ARE APPLICABLE FOR OTHERS ALSO WITH SLIGHT MODIFICATIONS..... (FOR SHOWING DIFFERENT METHODS PURPOSES)
# METHOD 3 STARTS
# sorting the array , then middle element will always be the majority element
# time : 0(nlgn), space : 0(1)
# def find_majority(arr):
# arr.sort()
# array_size = len(arr)
# return arr[array_size // 2]
# ------------------*-------------
# METHOD 2 STARTS
# TIME : 0(nlgn), SPACE : 0(1)
# def find_majority(arr):
# array_size = len(arr)
# counter = 0
# if len(arr) == 1:
# return arr[0]
# arr.sort()
# for i in range(1, array_size):
# if arr[i] != arr[i - 1]:
# counter = 1
# else:
# if i == 1:
# counter = 2
# else:
# counter += 1
# if counter > (array_size) / 2:
# return arr[i]
# return -1
# ------------*----------------------
# METHOD 1 STARTS => using hashmaps
# TIME : 0(N), SPACE : 0(N)
# def find_majority(arr):
# count = {}
# array_size = len(arr)
# if len(arr) == 0:
# return -1
# for i in range(array_size):
# if arr[i] in count:
# count[arr[i]] += 1
# else:
# count[arr[i]] = 1
# for i, j in count.items():
# if j > (array_size) / 2:
# return i
# return -1
if __name__ == '__main__':
assert find_majority([3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 1, 3, 3]) == 3
#assert find_majority([1, 2, 3]) == -1
assert find_majority([3,2,3]) == 3
#assert find_majority([]) == -1
assert find_majority([2,2,1,1,1,2,2]) == 2
assert find_majority([1, 1, 1, 1, 1]) == 1
#assert find_majority([1,1,1,1,3,3,3,4,4,4]) == -1
assert find_majority([1]) == 1
assert find_majority([1, 1]) == 1
| souravs17031999/100dayscodingchallenge | arrays/majority_element_array.py | majority_element_array.py | py | 5,970 | python | en | code | 43 | github-code | 13 |
11653776738 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django import forms
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import AdminPasswordChangeForm
from models import *
class DoctorInline(admin.StackedInline):
model = Doctor
can_delete = False
verbose_name_plural = 'Doctor information'
class PatientInline(admin.StackedInline):
model = Patient
can_delete = False
verbose_name_plural = 'Patient information'
class UserBaseAdmin(UserAdmin):
list_filter = ('role',)
list_display = ('id','name','phone',
'address','sex','role')
fieldsets = (
(None, {'fields': ('username','email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Personal info'),
{'fields': ('first_name', 'last_name','phone',
'address','sex','role')}),
)
inlines = (DoctorInline, PatientInline)
def name(self,obj):
return obj.full_name
def get_formsets_with_inlines(self, request, obj=None):
for inline in self.get_inline_instances(request, obj):
# hide MyInline in the add view
if isinstance(inline, DoctorInline) and obj is None:
continue
if isinstance(inline, PatientInline) and obj is None:
continue
if obj:
if obj.role == UserBase.DOCTOR and isinstance(inline, PatientInline):
continue
if obj.role == UserBase.PATIENT and isinstance(inline, DoctorInline):
continue
yield inline.get_formset(request, obj), inline
# class UserBaseField(admin.ModelAdmin):
# list_display = ('address','phone','sex',)
# def address(self,obj):
# return obj.user.address
# def phone(self,obj):
# return obj.user.phone
# def sex(self,obj):
# return obj.user.get_sex_display()
# class PatientAdmin(UserBaseField):
# list_display = ('dob',) + UserBaseField.list_display
# class DoctorAdmin(UserBaseField):
# list_display = ('name','category',) + UserBaseField.list_display
# def name(self,obj):
# return u'{} {}'.format(obj.user.first_name,obj.user.last_name)
class AppointmentForm(forms.ModelForm):
class Meta:
exclude = ('last_change',)
class AppointmentAdmin(admin.ModelAdmin):
form = AppointmentForm
list_display = ('id','patient','doctor','appointment','creation_date','last_change')
list_filter = ('appointment',)
def has_add_permission(self, request):
return False
class TreatmentAdmin(admin.ModelAdmin):
form = AppointmentForm
list_display = ('patient','doctor','creation_date','last_change')
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('patient','feedback','creation_date',)
list_filter = ('creation_date',)
def has_add_permission(self, request):
return False
class ScheduleAdmin(admin.ModelAdmin):
list_display = ('creation_date','note','last_change',)
list_filter = ('creation_date',)
# date_hierarchy = 'creation_date'
# admin.site.register(Patient,PatientAdmin)
# admin.site.register(Doctor,DoctorAdmin)
admin.site.register(Schedule,ScheduleAdmin)
admin.site.register(Feedback,FeedbackAdmin)
admin.site.register(Appointment,AppointmentAdmin)
admin.site.register(Treatment,TreatmentAdmin)
admin.site.register(UserBase,UserBaseAdmin) | HoangJerry/bookingDoctor | api/admin.py | admin.py | py | 3,689 | python | en | code | 0 | github-code | 13 |
38970834340 | '''
Написать программу которая считает количество строк в прикрепленном файле.
Файл должен находиться в том же каталоге что и программа.
P.S. в подсчет не включать строки которые обозначают пропуски между частями стихотворения
'''
f = open('zadanie2.txt', encoding='utf-8')
st = f.readlines() # псчитываем все строки
count = 0
for i in st: # запускаем цикл на проверку пустых строк
if i == '\n':
count += 1
print(
f' Колличество строк в файле равно : {len(st) - count}') # выводим итогое число строк ( полная длина - пустые строчки)
f.close()
| ArTdrums/zadania-s-failami | 10. задание 2.py | 10. задание 2.py | py | 877 | python | ru | code | 0 | github-code | 13 |
7041750330 | from o3seespy.command.element.base_element import ElementBase
class TwoNodeLink(ElementBase):
"""
The TwoNodeLink Element Class
This command is used to construct a twoNodeLink element object, which is defined by two nodes. The element can have
zero or non-zero length. This element can have 1 to 6 degrees of freedom, where only the transverse and rotational
degrees of freedom are coupled as long as the element has non-zero length. In addition, if the element length is
larger than zero, the user can optionally specify how the P-Delta moments around the local x- and y-axis are
distributed among a moment at node i, a moment at node j, and a shear couple. The sum of these three ratios
is always equal to 1. In addition the shear center can be specified as a fraction of the element length
from the iNode. The element does not contribute to the Rayleigh damping by default. If the element has
non-zero length, the local x-axis is determined from the nodal geometry unless the optional x-axis
vector is specified in which case the nodal geometry is ignored and the user-defined orientation
is utilized. It is important to recognize that if this element has zero length, it does not
consider the geometry as given by the nodal coordinates, but utilizes the user-defined
orientation vectors to determine the directions of the springs.
"""
op_type = 'twoNodeLink'
def __init__(self, osi, ele_nodes, mats: list=None, dirs: list=None, p_delta_vals: list=None, shear_dist=None, do_rayleigh=False, orient: list=None, mass: float=None):
"""
Initial method for TwoNodeLink
Parameters
----------
osi: o3seespy.OpenSeesInstance
ele_nodes: list
A list of two element nodes
mats: list, optional
A list of objects associated with previously-defined uniaxial_material objects
dirs: list, optional
A list material directions: * 2d-case: ``1`` , ``2`` - translations along local x,y axes; ``3`` - rotation
about local z axis * 3d-case: ``1``, ``2``, ``3`` - translations along local x,y,z axes; ``4``, ``5``, ``6`` -
rotations about local x,y,z axes
p_delta_vals: list, optional
P-delta moment contribution ratios, size of ratio vector is 2 for 2d-case and 4 for 3d-case (entries:
``[my_inode, my_jnode, mz_inode, mz_jnode]``) ``my_inode`` + ``my_jnode`` <= 1.0, ``mz_inode`` + ``mz_jnode`` <=
1.0. remaining p-delta moments are resisted by shear couples.
shear_dist: None, optional
do_rayleigh: bool
To include rayleigh damping from the element (optional, default = no rayleigh damping contribution)
orient: list, optional
mass: float, optional
Element mass (optional, default = 0.0)
Examples
--------
>>> import o3seespy as o3
>>> # Example is currently not working
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> coords = [[0, 0], [0, 1]]
>>> ele_nodes = [o3.node.Node(osi, *coords[x]) for x in range(2)]
>>> mats = [o3.uniaxial_material.Elastic(osi, 1.0),
>>> o3.uniaxial_material.Elastic(osi, 1.0)]
>>> p_delta_vals = [1.0, 1.0]
>>> o3.element.TwoNodeLink(osi, ele_nodes=ele_nodes, mats=mats, dir=[1, 1], p_delta_vals=p_delta_vals)
"""
self.osi = osi
self.ele_node_tags = [x.tag for x in ele_nodes]
self.ele_nodes = ele_nodes
if mats is None:
self.mats = None
else:
self.mats = [x.tag for x in mats]
self.dirs = dirs
self.p_delta_vals = p_delta_vals
self.shear_dist = shear_dist
self.do_rayleigh = do_rayleigh
self.orient = orient
if mass is None:
self.mass = None
else:
self.mass = float(mass)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, *self.ele_node_tags]
if getattr(self, 'mats') is not None:
self._parameters += ['-mat', *self.mats]
if getattr(self, 'dirs') is not None:
self._parameters += ['-dir', *self.dirs]
if getattr(self, 'p_delta_vals') is not None:
self._parameters += ['-pDelta', *self.p_delta_vals]
if getattr(self, 'shear_dist') is not None:
self._parameters += ['-shearDist', *self.shear_dist]
if getattr(self, 'do_rayleigh'):
self._parameters += ['-doRayleigh']
if getattr(self, 'orient') is not None:
self._parameters += ['-orient', *self.orient]
if getattr(self, 'mass') is not None:
self._parameters += ['-mass', self.mass]
self.to_process(osi)
| o3seespy/o3seespy | o3seespy/command/element/link.py | link.py | py | 4,844 | python | en | code | 16 | github-code | 13 |
26445750684 | import os
import multiprocessing
from multiprocessing import Process
import pandas as pd
import tqdm
import sys
base = '/datasets/xeno_canto/wav_16khz_XC/'
def get_length(files):
audios = os.listdir(files)
audios = [f for f in audios if '_16.wav' not in f]
if len(audios) == 0:
print(files)
print(audios)
if __name__ == '__main__':
base = '/datasets/xeno_canto/wav_16khz_XC/'
paths = os.listdir(base)
pbar = tqdm.tqdm(total=len(os.listdir(base))) # Init pbar
processes = []
for folder in paths:
file = base + folder
p = Process(target=get_length, args=[file])
p.start()
processes.append(p)
pbar.update(n=1)
for p in processes:
p.join() | farrinfedra/bird_song_resynthesis | preprocessing/remove.py | remove.py | py | 767 | python | en | code | 0 | github-code | 13 |
8326381437 | lines = open('../data/ex7.txt', 'r').readlines()
dir_size_map = dict()
parent_map = dict()
def add_to_parents(size, cur_dir):
parent = parent_map[cur_dir]
if parent == '':
return
dir_size_map[parent] += size
try:
add_to_parents(size, parent)
except RecursionError as e:
print('recursion error at: ' + cur_dir + ' ' + parent)
return
def dir_size(cur_dir):
global lines
global parent_map
if len(lines) == 0:
return
if 'cd' not in lines[0]:
print(lines[0])
raise ValueError('cd not found!')
if '..' in lines[0]:
lines.pop(0)
if cur_dir != '':
dir_size(parent_map[cur_dir])
if len(lines) == 0:
return
dum = cur_dir
cur_dir = dum+'_'+lines[0].replace('$ cd ', '').replace('\n', '')
if cur_dir not in parent_map.keys():
parent_map[cur_dir] = dum
if cur_dir not in dir_size_map.keys():
dir_size_map[cur_dir] = 0
lines.pop(0)
if 'ls' not in lines[0]:
raise ValueError('cd not found!')
lines.pop(0)
i = 0
for line in lines:
if 'dir' in line:
i += 1
continue
elif line[0].isnumeric():
i += 1
size = int(line.split(' ')[0])
dir_size_map[cur_dir] += size
add_to_parents(size, cur_dir)
else:
break
lines = lines[i:]
if len(lines) > 0:
dir_size(cur_dir)
else:
return
dir_size('')
print(dir_size_map)
# print(parent_map)
filtered_dict = {k: v for k, v in dir_size_map.items() if v <= 100000}
print(filtered_dict)
total = 0
for k, v in filtered_dict.items():
total += v
print(total)
unused = 70000000 - dir_size_map['_/']
oke_dirs = [v for v in dir_size_map.values() if v >= (30000000 - unused)]
print(oke_dirs)
print(min(oke_dirs))
| DonnyWhoLovedBowling/aoc2022 | src/ex7.py | ex7.py | py | 1,860 | python | en | code | 0 | github-code | 13 |
1680816946 | #!/usr/bin/env python3
#encoding=utf-8
#-------------------------------------------
# Usage: python3 global_scope_example.py
# Description: global in function definition
#-------------------------------------------
x = 88 # Global x
print('before func, x = %s' % x)
def func():
global x
x = 99 # Global x: outside func, modify the x in global
func()
print('after func, x = %s' % x) # print 99
| mindnhand/Learning-Python-5th | Chapter17.Scopes/global_scope_example.py | global_scope_example.py | py | 427 | python | en | code | 0 | github-code | 13 |
25397659347 | import numpy as np
import matplotlib.pyplot as plt
import os
def sigmoid(x):
return 1/(1+np.exp(-x))
def logistic_regression_gradient_descent(x,y,W_init,learning_Rate,tol=10e-5,max_count=10000000):
w=W_init
count=0
while max_count>count:
y_predict=sigmoid(np.dot(x.T,w))
#GD
gradient=np.dot(x,(y_predict-y))
w=w-learning_Rate*gradient
os.system('cls')
print(str(count)+': ','%.5f'%(np.linalg.norm(gradient)),sep=' , ')
for i in y_predict:
print('%.3f'%(i))
if np.linalg.norm(gradient)<=tol:
return w
count+=1
return w
x=np.array([[0,0,1,1],[0,1,0,1]])
y=np.array([[0,0,0,1]]).T
bias=np.ones((1,x.shape[1]))
x=np.concatenate((bias,x))
w_init=np.random.randn(x.shape[0],1)
result=logistic_regression_gradient_descent(x,y,w_init,1)
print("result: ",result)
print("w_init: ",w_init)
'''
#plot
plt.plot([0,0,1],[0,1,0],'ro')
plt.plot(1,1,'bo')
def linear(w,x):
return -(w[0][0]+w[1][0]*x)/w[2][0]
line=linear(result,np.array([0,1]))
plt.plot([0,1],line)
plt.axis([-0.5,1.5,-0.5,1.5])
plt.show()
''' | nhoxnho1212/logisticRegression | And.py | And.py | py | 1,147 | python | en | code | 0 | github-code | 13 |
17015275115 | """
Naive n cubed solution - works but is too slow
"""
import csv
from itertools import combinations
from pathlib import Path
import pytest
class Solution:
def threeSum(self, nums: list[int]) -> list[list[int]]:
solution = {
tuple(sorted(comb)) for comb in combinations(nums, 3) if sum(comb) == 0
}
return [list(comb) for comb in solution]
@pytest.mark.parametrize(
"nums,expected_triplets",
(
([0,1,1],[]),
([0,0,0],[[0,0,0]]),
([-1,0,1,2,-1,-4],[[-1,-1,2],[-1,0,1]]),
)
)
def test(nums, expected_triplets):
actual_triplets = Solution().threeSum(nums)
actual_triplets_sorted = [sorted(trip) for trip in actual_triplets]
for exp_trip in expected_triplets:
assert exp_trip in actual_triplets_sorted
assert len(expected_triplets) == len(actual_triplets)
| chrisjdavie/interview_practice | leetcode/3sum/first.py | first.py | py | 859 | python | en | code | 0 | github-code | 13 |
23555310626 | import os
import sys
def list_files(*args):
path = os.getcwd()
dir_name = '.zeon_fs'
file_path = os.path.join(path, dir_name)
dir_files = os.listdir(file_path)
print('Files: ', len(dir_files))
for i in dir_files:
print(i)
if __name__ == "__main__":
args = sys.argv
if not len(args) < 2:
exit(0)
list_files(sys.argv)
| azatuuluaman/zeon_fs2 | commands/list_files.py | list_files.py | py | 375 | python | en | code | 0 | github-code | 13 |
5253575605 | from app.db.Models.flow_context import FlowContext
def get_domain_tags(domain_id):
cursor = FlowContext().db().aggregate([
{"$match": {"domain_id": domain_id}},
{"$project": {"upload_tags": 1}},
])
tags_set = set()
for i in cursor:
for tag in i.get("upload_tags", []):
tags_set.add(tag)
return list(tags_set)
def get_tags_by_ids(ids):
return {d['_id']: d.get('upload_tags', []) for d in FlowContext().db().find({"_id": {"$in": ids}}, {"upload_tags"})}
def delete_tag(domain_id, tag):
FlowContext().db().update_many(
{'domain_id': domain_id},
{'$pull': {'upload_tags': tag}}
)
def update_tag(domain_id, tag, new_value):
FlowContext().db().update_many(
{'domain_id': domain_id, 'upload_tags':tag},
{'$set': {'upload_tags.$': new_value}}
) | HassenMahdi/dcm-upload | app/main/service/tags_service.py | tags_service.py | py | 854 | python | en | code | 0 | github-code | 13 |
70459644497 | # making a request to fixer.io forex rates website
# using user input for api parameters
# NOT working anymore: base is always EURO for free accounts
import requests
def main():
base = input("First Currency: ")
other = input("Second Currency: ")
# the takeaway is that url params can be passed as below
res = requests.get("https://api.fixer.io/latest",
params={"base": base, "symbols": other})
if res.status_code != 200:
raise Exception("ERROR: API request unsuccessful.")
data = res.json()
rate = data["rates"][other]
print(f"1 {base} is equal to {rate} {other}")
if __name__ == "__main__":
main()
| amrfekryy/course-CS50W | lecture4 - ORM&API/21currency2.py | 21currency2.py | py | 672 | python | en | code | 0 | github-code | 13 |
2456556450 | import sys
from termcolor import colored, cprint
#def answer(dimensions, ur_position, guard_position, distance):
def debug(*objects): print(objects)
dims = [3, 2]
ur_pos = [1, 1]
g_pos = [2, 1]
dist = 4
# dims = [300, 275]
# ur_pos = [150, 150]
# g_pos = [185, 100]
# dist = 500
# dims = [1000, 1000]
# ur_pos = [250, 25]
# g_pos = [257, 49]
# dist = 25
dims = [42, 59]
ur_pos = [34, 44]
g_pos = [6, 34]
dist = 5000
##############################################
# def simulate_slow():
# count = 100000000
# for i in range(0, count):
# math.atan(count + i)
##############################################
##############################################
import math
from decimal import Decimal
# def debug(*objects): 1
guard_angles = dict()
your_angles = set()
def answer(dims, ur_pos, guard_pos, dist):
room_x_count = int(dist / dims[0]) + 1
room_y_count = int(dist / dims[1]) + 1
r_square = dist * dist
total_count = []
for room_y_idx in range(0, room_y_count + 1):
for room_x_idx in range(0, room_x_count + 1):
quadrants = set([ (room_x_idx, room_y_idx), (room_x_idx, -room_y_idx),
(-room_x_idx, room_y_idx), (-room_x_idx, -room_y_idx) ])
for room_index in quadrants:
total_count.append( check_room(room_index, dims, ur_pos, guard_pos, r_square) )
total_count = [x for x in total_count if x is not None]
# debug(total_count)
result = len(total_count)
return result
def check_room(room_index, dims, ur_pos, guard_pos, r_square):
mirr_guard_pos = get_mirror_pos(room_index, dims, guard_pos)
mirr_ur_pos = get_mirror_pos(room_index, dims, ur_pos)
guard_in_circle = is_in_circle(ur_pos, mirr_guard_pos, r_square)
your_in_circle = is_in_circle(ur_pos, mirr_ur_pos, r_square)
mirr_guard_angle = get_angle(ur_pos, mirr_guard_pos)
mirr_your_angle = get_angle(ur_pos, mirr_ur_pos)
# debug('Room Idx:', room_index, '- Mirr Guard:', mirr_guard_pos,
# '- Mirr Your:', mirr_ur_pos, '- Guard In Circle:', guard_in_circle)
if your_in_circle and tuple(ur_pos) != mirr_ur_pos:
# debug('Room:', colored(room_index, 'yellow'), 'Your Angle:', colored(mirr_your_angle, 'magenta'))
if (guard_in_circle and mirr_guard_angle not in guard_angles and
mirr_guard_angle == mirr_your_angle and
mirr_guard_is_closer(ur_pos, mirr_guard_pos, mirr_ur_pos) ):
guard_angles[mirr_guard_angle] = mirr_guard_angle
your_angles.add(mirr_your_angle)
return mirr_guard_pos
your_angles.add(mirr_your_angle)
if guard_in_circle:
if mirr_guard_angle in your_angles:
# debug(colored('Hit self: Room:', 'cyan'), colored(room_index, 'yellow'), 'Angle:', mirr_your_angle)
return None
if mirr_guard_angle not in guard_angles:
guard_angles[mirr_guard_angle] = mirr_guard_angle
return mirr_guard_pos
return None
def get_mirror_pos(room_index, dims, orig_pos):
room_x_pos = room_index[0] * dims[0]
room_y_pos = room_index[1] * dims[1]
mirr_x = orig_pos[0]
mirr_y = orig_pos[1]
if room_index[0] & 1: # check is odd
mirr_x = dims[0] - orig_pos[0] # flip position as in mirror
if room_index[1] & 1:
mirr_y = dims[1] - orig_pos[1]
return (mirr_x + room_x_pos, mirr_y + room_y_pos)
def is_in_circle(o_pos, x_pos, r_square):
dx = abs(x_pos[0] - o_pos[0])
dy = abs(x_pos[1] - o_pos[1])
return (dx*dx + dy*dy <= r_square)
def get_angle(o_pos, point_pos):
angle = math.atan2(Decimal(point_pos[1] - o_pos[1]) , Decimal(point_pos[0] - o_pos[0]))
# debug('Guard Pos:', point_pos, '- Angle: ', angle)
return angle
def mirr_guard_is_closer(o_pos, mirr_guard_pos, mirr_ur_pos):
mirr_guard_distance = abs(mirr_guard_pos[0] - o_pos[0]) ** 2 + abs(mirr_guard_pos[1] - o_pos[1]) ** 2
mirr_ur_distance = abs(mirr_ur_pos[0] - o_pos[0]) ** 2 + abs(mirr_ur_pos[1] - o_pos[1]) ** 2
debug('mirr_guard_pos: ', mirr_guard_pos)
return mirr_guard_distance <= mirr_ur_distance
result = answer( dims, ur_pos, g_pos, dist )
debug(colored(('RESULT: ', result), 'green'))
| damhonglinh/google-foobar | lvl-4a--bringing_a_gun_to_a_guard_fight/drawing-scripts/messy-solution_4a.py | messy-solution_4a.py | py | 4,024 | python | en | code | 0 | github-code | 13 |
70192938897 | import sqlite3
import pandas as pd
try:
sqliteConnection = sqlite3.connect('app.db')
cursor = sqliteConnection.cursor()
print("Successfully Connected to SQLite")
#agency_domain_white_list
cursor.execute("""DELETE FROM agency_domain_white_list""")
data = pd.read_csv(r'agency_domain_whitelist.csv')
df = pd.DataFrame(data, columns=['id', 'domain'])
sqlite_insert_query = """INSERT INTO agency_domain_white_list
(id, domain)
VALUES (?, ?);"""
for row in df.itertuples():
print(row)
cursor.execute(sqlite_insert_query, (row.id, row.domain))
print("Records inserted successfully into agency_domain_white_list table")
#agency
cursor.execute("""DELETE FROM agency""")
data = pd.read_csv(r'agency.csv')
df = pd.DataFrame(data, columns=['id', 'title', 'domain', 'address'])
print(df)
sqlite_insert_query = """INSERT INTO agency
(id,title,domain,address)
VALUES (?, ?, ?, ?);"""
for row in df.itertuples():
print(row)
cursor.execute(sqlite_insert_query,
(row.id, row.title, row.domain, row.address))
print("Records inserted successfully into agency table")
#broker
cursor.execute("""DELETE FROM broker""")
data = pd.read_csv(r'broker.csv')
df = pd.DataFrame(
data, columns=['id', 'email', 'password_hash', 'firstname', 'lastname', 'address', 'agencyId'])
print(df)
sqlite_insert_query = """INSERT INTO broker
(id, email, password_hash, firstname, lastname, address, agencyId)
VALUES (?, ?, ?, ?, ?, ?, ?);"""
for row in df.itertuples():
print(row)
cursor.execute(sqlite_insert_query,
(row.id, row.email, "$6$rounds=656000$Zglc9lwR6pvbqA/Y$NQmp8lUCWFgSswh9Ppc4y46UjecmxQDRcd3ulaYhSPkkpWIedq/nB9AD5WD3MNNKRGEhdnBaL5.QZheoEiwI.0", row.firstname, row.lastname, row.address, 1))
print("Records inserted successfully into broker table")
sqliteConnection.commit()
cursor.close()
except sqlite3.Error as error:
print("Failed to insert data into sqlite table", error)
finally:
if (sqliteConnection):
sqliteConnection.close()
print("The SQLite connection is closed")
| barhantas/coalitioninc-task | api/data-loader.py | data-loader.py | py | 2,361 | python | en | code | 2 | github-code | 13 |
16853240085 | # -*- coding:utf8 -*-
import time
import celery
from celery import task
from celery.schedules import crontab
app = celery.Celery('cele', broker='redis://localhost:6379')
@task
def sayHello():
print ("hello...")
time.sleep(3)
print ('world...')
'''
设置执行的时间
'''
# 每分钟执行一次
c1 = crontab()
# 每天凌晨十二点执行
c2 = crontab(minute=0, hour=0)
# 每十五分钟执行一次
crontab(minute='*/15')
# 每周日的每一分钟执行一次
crontab(minute='*', hour='*', day_of_week='sun')
# 每周三,五的三点,七点和二十二点没十分钟执行一次
crontab(minute='*/10', hour='3,17,22', day_of_week='thu,fri')
app.conf.beat_schedule = {
'send-every-10-seconds': {
'task': 'task.sayHello', # 调用要定时执行的函数
'schedule': 10.0, # 定时执行的时间
'args': () # 定时执行的函数的参数
},
} | ShuoDu/celery_used | showTime/task.py | task.py | py | 912 | python | zh | code | 0 | github-code | 13 |
14817079557 | ############################################################################################
# This script is tested and compatible with the python version 3.10 #
# make sure all python dependencies are installed to be able to import the listed modules. #
# This script was written and executed on a windows machine. #
# it can be adapted to a linux machine, by modifying few things like the file paths etc. #
############################################################################################
import requests
import json
import re
import os
import subprocess
import csv
import statistics
################################################
# path to a folder containing .txt files #
# of all my instances hostnames on Centreon #
################################################
file_path = r'C:\Users\Documents\projects\get_cpu_ram\servers'
#############################################
# list the files in the folder #
#############################################
def process_files_in_directory(directory):
try:
# Get a list of all files in the specified directory
files = os.listdir(directory)
# Iterate over each file in the directory
for file in files:
file_path = os.path.join(directory, file)
# Check if the item is a file with .txt extension
if os.path.isfile(file_path) and file.endswith('.txt'):
print(f"Processing file: {file_path}")
with open(file_path, 'r') as f:
contents = f.read()
print(contents)
print() # Print an empty line for separation
except FileNotFoundError:
print(f"The directory '{directory}' does not exist.")
# Provide the path to the directory you want to process
#directory_path = r"C:\Users\Documents\servers"
# Call the function to process the .txt files in the directory
process_files_in_directory(file_path)
######################################################
# A function to get CPU usage using Centreon's API #
# from hostnames in a file #
######################################################
def get_cpu_usage(file_path):
# URL and options
url = "https://centreon.domain.net/centreon/api/index.php"
option_cpu = "object=centreon_realtime_services&action=list&searchHost={}&searchOutput=CPU(s)&fields=host_name,output"
# données authentification
payload = {'username': 'your username', 'password': 'your password'}
# Authentication request
auth_response = requests.post(f"{url}?action=authenticate", data=payload, verify=False) #verify=false so as to bypass ssl verification of the request.
auth_token = auth_response.json()['authToken']
# Initialize header with the auth token
headers = {'centreon-auth-token': auth_token}
# Read server names from file
with open(file_path, 'r') as f:
servers = f.read().splitlines()
# Get CPU usage for each server
total_cpu_usage = 0
#cpu_usages = {}
for server in servers:
cpu_response = requests.get(f"{url}?{option_cpu.format(server)}", headers=headers, verify=False) #verify=false so as to bypass ssl verification of the request.
cpu_output = cpu_response.text
cpu_pattern = re.compile(r'\d+ CPU\(s\)')
cpu_usage_pattern = re.compile(r'(\d+\.\d+) %')
cpu_match = cpu_pattern.search(cpu_output)
if cpu_match:
cpu = cpu_match.group(0)
cpu_usage_match = cpu_usage_pattern.search(cpu_output)
if cpu_usage_match:
#cpu_usage = cpu_usage_match.group(0)
cpu_usage = float(cpu_usage_match.group(0).split()[0])
print(cpu_usage)
total_cpu_usage += cpu_usage
#cpu[server] = { 'cpu': cpu}
# Calculate the average memory usage
avg_cpu_usage = round(total_cpu_usage / len(servers))
#Option A: you can activate this option and disactivate option B if you want to know the average CPU usage and number of available CPUs.#
#return avg_cpu_usage, cpu
#Option B: you can activate this option if you want to know only the average CPU usage#
return avg_cpu_usage
######################################################
# A function to get RAM usage using Centreon's API #
# from hostnames in a file #
######################################################
def get_mem_usage(file_path):
# URL and options
url = "https://centreon.domain.net/centreon/api/index.php"
option_mem = "object=centreon_realtime_services&action=list&searchHost={}&searchOutput=Ram&fields=host_name,output"
# données authentification
payload = {'username': 'your username', 'password': 'your password'}
# Authentication request
auth_response = requests.post(f"{url}?action=authenticate", data=payload, verify=False) #verify=false so as to bypass ssl verification of the request.
auth_token = auth_response.json()['authToken']
# Initialize header with the auth token
headers = {'centreon-auth-token': auth_token}
# Read server names from file
with open(file_path, 'r') as f:
servers = f.read().splitlines()
mem_usages = {}
total_mem_usage = 0
for server in servers:
mem_response = requests.get(f"{url}?{option_mem.format(server)}", headers=headers, verify=False) #verify=false so as to bypass ssl verification of the request.
mem_output = mem_response.text
mem_pattern = re.compile(r'(\d+\.\d+) GB')
mem_usage_pattern = re.compile(r'(\d+\.\d+) GB \((\d+\.\d+)%')
mem_match = mem_pattern.search(mem_output)
if mem_match:
mem = mem_match.group(0)
mem_usage_match = mem_usage_pattern.search(mem_output)
if mem_usage_match:
#mem_usage = mem_usage_match.group(2)
mem_usage = float(mem_usage_match.group(2))
total_mem_usage += mem_usage
#mem[server] = {'mem': mem}
# Calculate the average memory usage
avg_mem_usage = round(total_mem_usage / len(servers))
#Option A: you can activate this option and disactivate option B if you want to know the average RAM usage and number of available RAM.#
#return avg_mem_usage, mem
#Option B: you can activate this option if you want to know only the average RAM usage#
return avg_mem_usage
##############################################################################
# A variable to store the interated CPU and RAM #
# results of all the instances. #
# #
# Note: You will need to specify the full path #
# of each file containing hostnames if using #
# python 3.7 #
# #
# Example: r'C:\Users\Documents\projects\get_cpu_ram\servers\selfcare.txt' #
# or create a variable like this: #
# selfcare = r'C:\Users\Documents\projects\get_cpu_ram\servers\selfcare.txt' #
# and use the varaible as: get_cpu_usage(selfcare) #
##############################################################################
data = {
'CPU_selfcare': get_cpu_usage('selfcare.txt'),
'RAM_selfcare': get_mem_usage('selfcare.txt'),
'CPU_nav': get_cpu_usage('nav.txt'),
'RAM_nav': get_mem_usage('nav.txt'),
'CPU_indexeur': get_cpu_usage('indexeur.txt'),
'RAM_indexeur': get_mem_usage('indexeur.txt'),
'CPU_pricing': get_cpu_usage('pricing.txt'),
'RAM_pricing': get_mem_usage('pricing.txt'),
'CPU_scoring': get_cpu_usage('scoring.txt'),
'RAM_scoring': get_mem_usage('scoring.txt'),
'CPU_dispo': get_cpu_usage('dispo.txt'),
'RAM_dispo': get_mem_usage('dispo.txt'),
'CPU_order': get_cpu_usage('order.txt'),
'RAM_order': get_mem_usage('order.txt'),
'CPU_innovente': get_cpu_usage('innovente.txt'),
'RAM_innovente': get_mem_usage('innovente.txt'),
'CPU_notifg': get_cpu_usage('notifg.txt'),
'RAM_notifg': get_mem_usage('notifg.txt'),
'CPU_mpi': get_cpu_usage('mpi.txt'),
'RAM_mpi': get_mem_usage('mpi.txt'),
'CPU_comparateur': get_cpu_usage('comparateur.txt'),
'RAM_comparateur': get_mem_usage('comparateur.txt'),
}
######################################################
# Check the variable $data to see the results #
# of the functions. #
# You can delete this or leave it #
######################################################
print(data)
######################################################
# Output the results in the variable $data #
# to a JSON file #
######################################################
json_file = r'C:\Users\Documents\projects\get_cpu_ram\csv_json\data_json.json'
with open(json_file, 'w') as outfile:
json.dump(data, outfile, indent=4)
######################################################
# Output the results in the variable $data #
# to a CSV file #
######################################################
csv_file = r'C:\Users\Documents\projects\get_cpu_ram\csv_json\Avg_Cpu_Ram.csv'
# Check if the file exists and has a non-zero size
if os.path.isfile(csv_file) and os.path.getsize(csv_file) > 0:
# Open the CSV file in append mode and write the header if the file is empty
with open(csv_file, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow([data['CPU_selfcare'], data['RAM_selfcare'], data['CPU_nav'], data['RAM_nav'], data['CPU_indexeur'], data['RAM_indexeur'], data['CPU_pricing'], data['RAM_pricing'],data['CPU_scoring'], data['RAM_scoring'], data['CPU_order'], data['RAM_order'], data['CPU_dispo'], data['RAM_dispo'], data['CPU_innovente'], data['RAM_innovente'], data['CPU_notifg'], data['RAM_notifg'], data['CPU_mpi'], data['RAM_mpi'], data['CPU_comparateur'],data['RAM_comparateur']])
else:
# Open the CSV file in write mode and write the header
with open(csv_file, 'w', newline='') as f:
writer = csv.writer(f, lineterminator='')
writer.writerow(['CPU_selfcare', 'RAM_selfcare', 'CPU_nav', 'RAM_nav', 'CPU_indexeur', 'RAM_indexeur', 'CPU_pricing', 'RAM_pricing', 'CPU_scoring', 'RAM_scoring', 'CPU_order', 'RAM_order', 'CPU_dispo', 'RAM_dispo', 'CPU_innovente', 'RAM_innovente', 'CPU_notifg', 'RAM_notifg', 'CPU_mpi', 'RAM_mpi', 'CPU_comparateur', 'RAM_comparateur'])
f.flush()
f.write('\n')
writer.writerow([data['CPU_selfcare'], data['RAM_selfcare'], data['CPU_nav'], data['RAM_nav'], data['CPU_indexeur'], data['RAM_indexeur'], data['CPU_pricing'], data['RAM_pricing'],data['CPU_scoring'], data['RAM_scoring'], data['CPU_order'], data['RAM_order'], data['CPU_dispo'], data['RAM_dispo'], data['CPU_innovente'], data['RAM_innovente'], data['CPU_notifg'], data['RAM_notifg'], data['CPU_mpi'], data['RAM_mpi'], data['CPU_comparateur'],data['RAM_comparateur']])
| MrTam-Node/get_avg_cpu_usage | get_avg_cpu_ram.py | get_avg_cpu_ram.py | py | 11,453 | python | en | code | 1 | github-code | 13 |
29201569515 | import numpy as np
import datetime
def distance(v1, v2):
# v1 = (T, T, T, F, F, F, F, F, T, T, F, T, F, F, T, T, T, T, F, F, T, T, T, T)
# v2 = (F, T, T, F, F, T, T, F, F, T, F, T, T, T, T, T, T, F, T, F, T, T, F, T)
return(sum([i[0]!=i[1] for i in zip(v1, v2)]))
def has_dup(G):
tmpG = {}
for v in G:
if v not in tmpG:
tmpG[v]=1
else:
return True
return False
def DFS(G,s):
explored = [False] * len(G)
explored[s-1] = True
f=[0] * len(G)
#reachable_order=[s-1]
stack = [iter(G[s])]
while stack:
try:
child = next(stack[-1])
if not explored[child - 1]:
explored[child - 1] = True
#reachable_order.append(child)
# Do whatever you want to do in the visit
stack.append(iter(G[child]))
except StopIteration:
stack.pop()
return explored#, reachable_order
print('start loading data:', datetime.datetime.now())
file = "clustering_big.txt"
with open('F:\\Google Drive\\coursera\\Algorithms - Tim Roughgarden\\3. Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming\\' + file) as f:
lines=f.read().split('\n')
i = 0
for l in range(len(lines)):
if lines[l]:
tmp = lines[l].split()
if i==0:
#n_v = int(tmp[0]) there are dup in 200k nodes
n_bits = int(tmp[1])
i += 1
G = {}
else:
tmp = tuple([True if int(t)==1 else False for t in tmp])
if tmp not in G:
G[tmp] = i # key = vertex value in bits, value: vertex number
i+=1
n_v = i-1
print('finished loading data:', datetime.datetime.now())
G1 = {}
# G1 is an adjacency list of a graph
for key in G:
for i in range(len(key)):
for j in range(i, len(key)):
tmp = list(key)
if i==j:
tmp[i] = not tmp[i]
else:
tmp[i] = not tmp[i]
tmp[j] = not tmp[j]
if tuple(tmp) in G:
if G[key] not in G1:
G1[G[key]] = [G[tuple(tmp)]]
else:
G1[G[key]].append(G[tuple(tmp)])
for i in range(1, n_v+1):
if i not in G1:
G1[i] = []
print('finished calculating G1:', datetime.datetime.now())
cluster = np.zeros(shape=n_v)
# index+1 = vertex number; value = cluster number. initial value=0
cluster_number=0
while 0 in cluster:
cluster_number += 1
s = np.where(cluster==0)[0][0]
explored = DFS(G1, s+1)
if len(set(cluster[explored]))>1:
print('error')
break
cluster[explored]=cluster_number
print('finished clustering:', datetime.datetime.now())
#print(cluster)
#print(set(cluster))
print(len(set(cluster)))
print(cluster_number)
# 6118
| sunanqi/learning | Greedy Algorithms- Minimum Spanning Trees- and Dynamic Programming by Tim Roughgarden/clustering_big.py | clustering_big.py | py | 2,833 | python | en | code | 0 | github-code | 13 |
39103849650 | __major__ = '2'
__minor__ = '0'
__patch__ = '0'
__version__ = '.'.join([__major__, __minor__, __patch__])
__author__ = 'Misha Turnbull'
__author_email__ = 'mishaturnbull@gmail.com'
__tested_on__ = {"windows 10": ['gui'],
"osx el capitan": ['gui'],
"kali linux 2016.3": ['gui'],
}
__date_started__ = "20160506"
__date_last_edited__ = "20181017"
# This totally wasn't a school project...
| mishaturnbull/EmailGUI | VERSION.py | VERSION.py | py | 442 | python | en | code | 0 | github-code | 13 |
24283281205 | def json_response(response, code):
from flask import make_response
resp = make_response(response.to_json(), code)
resp.headers['Content-Type'] = "application/json"
return resp
def format_content_range(start, end, size):
if start is None or end is None:
range = '*'
else:
range = str(start) + '-' + str(end)
_size = '*' if size is None else str(size)
return range + '/' + _size
| whisust/jellynote-backend | api/routes/utils.py | utils.py | py | 427 | python | en | code | 1 | github-code | 13 |
27080975159 | """
parsers.py
parsers models, input ip address list collapse, or
scanner results to parse.
"""
import fnmatch
import ipaddress
import os
import argparse
from libnmap.parser import NmapParser, NmapParserException
from dscan import log
def parse_args():
"""
Used by main to parse the user arguments.
:return: argparse instance.
:rtype: `argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(prog='Distributed scanner')
parser.add_argument('--name', type=str, required=True)
subparsers = parser.add_subparsers(dest='cmd')
subparsers.required = True
parser_server = subparsers.add_parser('srv')
parser_server.add_argument('--config', required=True)
parser_server.add_argument('-b', default='0.0.0.0')
parser_server.add_argument('-p', type=int, default=2040)
parser_server.add_argument('targets', type=argparse.FileType('rt'))
parser_agent = subparsers.add_parser('agent')
parser_agent.add_argument('--config', required=True)
parser_agent.add_argument('-s', required=True)
parser_agent.add_argument('-p', type=int, default=2040)
parser_config = subparsers.add_parser('config')
parser_config.add_argument("-email", type=str, required=True)
parser_config.add_argument("-cn", type=str, required=True)
parser_config.add_argument("-c", type=str, required=True)
parser_config.add_argument("-l", type=str, required=True)
parser_config.add_argument("-st", type=str, required=True)
parser_config.add_argument("-o", type=str, required=True)
parser_config.add_argument("-ou", type=str, required=True)
parser_config.add_argument("-days", type=int, required=True)
return parser
class ReportsParser:
"""
XML Nmap results parser.
"""
def __init__(self, reports_path, pattern):
"""
:param reports_path: path where the reports are stored
:param pattern: pattern `fnmatch` to find valid files to extract
the results from.
"""
self.path = reports_path
self.pattern = pattern
def hosts_up(self):
"""
:return: list of hosts up.
:rtype: `list`
"""
hosts_up = []
for host in self.__walk():
if host.is_up():
hosts_up.append(host.ipv4)
return hosts_up
def __walk(self):
"""
information.
:yield: A list with the filtered values
:rtype: `list`
"""
for report in os.scandir(self.path):
if fnmatch.fnmatch(report.name, self.pattern):
try:
nmap_report = NmapParser.parse_fromfile(report.path)
yield from nmap_report.hosts
except NmapParserException as ex:
log.error(f"Error parsing {report} - {ex}")
class TargetOptimization:
"""
This class takes lists of hosts or networks, and attempts to optimize
them by either split big cidr like /8 /16 in /24 or in rage format
192.168.10.1-4.
"""
def __init__(self, fpath, cidr="/24"):
self.cidr = cidr
self.fpath = fpath
def save(self, targets):
"""
Takes a list of targets to optimize and saves it in the workspace path.
:param targets: `list` of targets (`str` and top optimize.
:type: targets: `list` of `str`
"""
assert targets, "Empty target list"
ips = []
with open(self.fpath, 'wt') as qfile:
for target in targets:
try:
if "/" in target:
net = ipaddress.ip_network(target.strip())
if net.prefixlen < 24:
subs = map(lambda n: f"{n.with_prefixlen}\n",
net.subnets(new_prefix=24))
qfile.writelines(subs)
else:
qfile.write(f"{net.with_prefixlen}\n")
else:
ips.append(ipaddress.ip_address(target.strip()))
except (TypeError, ValueError):
log.error(f"Error optimizing target: {target}")
# sorting the ip addresses.
ips.sort(key=ipaddress.get_mixed_type_key)
# find consecutive ip address ranges.
if ips:
for first, last in ipaddress._find_address_range(ips):
ip_range = list(ipaddress.summarize_address_range(first,
last))
# if the number of ranges is more than one network in cidr
# format then the glob format x.x.x.x-y is more efficient,
# since nmap supports this format.
if len(ip_range) > 1:
qfile.write(f"{first}-{last.exploded.split('.')[3]}\n")
else:
qfile.write(f"{ip_range.pop().with_prefixlen}\n") | 0x4E0x650x6F/dscan | dscan/models/parsers.py | parsers.py | py | 5,012 | python | en | code | 15 | github-code | 13 |
35658457215 | #!/usr/bin/python3
import sys
import time
from scapy.all import sendp, ARP, Ether
if len(sys.argv) < 3:
print(sys.argv[0] + ": <target> <spoof_ip>")
sys.exit(1)
iface = "wlp2s0"
target_ip = sys.argv[1]
fake_ip = sys.argv[2]
ethernet = Ether()
arp = ARP(pdst=target_ip,
psrc=fake_ip,
op="is-at")
packet = ethernet / arp
while True:
sendp(packet, iface=iface)
time.sleep(1)
| balle/python-network-hacks | arp-spoof.py | arp-spoof.py | py | 399 | python | en | code | 135 | github-code | 13 |
32276470753 | # exercise 57: Cell Phone Bill
minutes = int(input('enter number of minutes: '))
messages = int(input('enter number of text messages: '))
if minutes > 50:
extra_mins = minutes - 50
else:
extra_mins = 0
if messages > 50:
extra_text = messages - 50
else:
extra_text = 0
base_charge = 15
included_mins = 50
included_text = 50
cost_of_extra_mins = 0.25
cost_of_extra_text = 0.15
additional_charge = 0.44
tax_rate = 0.05
total_charge = (base_charge + additional_charge) + (extra_mins * cost_of_extra_mins) + (extra_text * cost_of_extra_text)
tax_amount = tax_rate * total_charge
final_bill = total_charge + tax_amount
print()
print("base charge: $%.2f" % base_charge)
print("911 fee: $%.2f" % additional_charge)
print("taxes: $%.2f" % tax_amount)
if extra_mins:
print("%d extra minutes cost: $%.2f" % (extra_mins, extra_mins * cost_of_extra_mins))
if extra_text:
print("%d extra messages cost: $%.2f" % (extra_text, extra_text * cost_of_extra_text))
print("FINAL BILL: $%.2f" % final_bill)
| sara-kassani/1000_Python_example | books/Python Workbook/decision_making/ex57.py | ex57.py | py | 1,015 | python | en | code | 1 | github-code | 13 |
6331738275 | import os
import numpy as np
import argparse
import gym
import tqdm
from keras.models import load_model
from pid_lenya import Agent
from run_pid_optimized import PIDPolicy
model = load_model('model.hd5')
def main():
parser = argparse.ArgumentParser()
env = 'AttFC_GyroErr-MotorVel_M4_Ep-v0'
seeds = [5,]
current_dir = os.path.dirname(__file__)
config_path = os.path.join(current_dir,
"../configs/iris.config")
print("Loading config from ", config_path)
os.environ["GYMFC_CONFIG"] = config_path
print(" Making env=", env)
sum_reward = 0
env = gym.make(env)
agent = PIDPolicy() # AGENT
for seed in seeds:
agent.reset()
env.seed(seed)
ob = env.reset()
pbar = tqdm.tqdm(total=60000)
iter_reward = 0
rewards_mean = []
nn_cycles = 2
cycles_gone = 0
while True:
desired = env.omega_target
actual = env.omega_actual
ac = agent.action(ob, env.sim_time, desired, actual)
prediction = model.predict(np.array([desired - actual, ]))[0]
print('delta:',desired-actual)
# print(prediction, prediction.shape)
# ob, reward, done, info = env.step(ac)
ob, reward, done, info = env.step(prediction)
print('reward:',reward)
iter_reward += reward
rewards_mean.append(abs(reward))
if rewards_mean.__len__() == 10000:
_mean = sum(rewards_mean) / 10000
print(' --- Mean reward for 10k iters: {}'.format(_mean))
rewards_mean.clear()
pbar.update(1)
if done:
break
print('ITERATION {} RESULT: {}'.format(seeds.index(seed), iter_reward))
pbar.close()
print(sum_reward)
if __name__ == '__main__':
main()
| prokhn/onti-2019-bigdata | gymfc/examples/controllers/run_test.py | run_test.py | py | 1,894 | python | en | code | 0 | github-code | 13 |
11593232912 | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# LAB 9-8 트윗 메시지를 깔끔하게 정제하자, 243쪽
#
import re
tweet = input('트윗을 입력하시오: ')
tweet = re.sub('RT', '', tweet) # RT 문자열을 삭제
tweet = re.sub('#\S+', '', tweet) # 해시(#)다음에 나타나는 문자열을 삭제
tweet = re.sub('@\S+', '', tweet) # 앳사인(@)다음에 나타나는 문자열을 삭제
print(tweet) | dongupak/DataSciPy | src/파이썬코드(py)/Ch09/lab_9_8.py | lab_9_8.py | py | 454 | python | ko | code | 12 | github-code | 13 |
73912529619 | from urllib.parse import urlencode
#------------------------------------------------------------------------------
from kivy.network.urlrequest import UrlRequest
#------------------------------------------------------------------------------
_Debug = False
#------------------------------------------------------------------------------
def cryptocurrency_listings(api_key, start=1, limit=10, convert='USD', cb=None):
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start': str(start),
'limit': str(limit),
'convert': convert,
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': api_key,
}
url += '?' + urlencode(parameters)
req = UrlRequest(
url=url,
on_success=cb,
on_redirect=cb,
on_failure=cb,
on_error=cb,
req_headers=headers,
)
if cb:
if _Debug:
print('cryptocurrency_listings', req, cb)
return req
req.wait()
if _Debug:
print('cryptocurrency_listings', req.result)
return req.result
| datahaven-net/recotra | lib/coinmarketcap_client.py | coinmarketcap_client.py | py | 1,126 | python | en | code | 4 | github-code | 13 |
69927201619 | import typing as tp
from pathlib import Path
import dataclasses
import collections
import itertools
import pickle
import logging
import cv2
import imutils
import numpy as np
import shapely.geometry
import matplotlib.pyplot as plt
from .line import Line
from .colors import Colors
from .image_utils import get_image_moment, color_to_grayscale, grayscale_to_color
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class Image:
image_orig: np.ndarray
image: np.ndarray = None
axis = None
scale = None
def __post_init__(self):
self.checkpoint_dict: tp.Dict[str, np.ndarray] = dict()
self.reset_image()
def checkpoint(self, tag: str = None):
"""Set image_orig to current image."""
if tag is None:
self.image_orig = self.copy_image()
else:
self.checkpoint_dict[tag] = self.copy_image()
def copy_image(self):
"""Return a copy of `self.image`."""
return self.image.copy()
def reset_image(self, tag: str = None) -> None:
"""Restore image from checkpoint.
If `tag` does not exist, resotre `image_orig`.
"""
self.image = self.checkpoint_dict.get(tag, self.image_orig).copy()
def bgr_to_gray(self) -> None:
"""Convert image to greyscale."""
self.image = color_to_grayscale(self.image)
def gray_to_bgr(self) -> None:
self.image = grayscale_to_color(self.image)
def threshold(self, thresh_val: float = -1) -> None:
"""Apply a fixed level threshold to each pixel.
dst(x, y) = maxval if src(x, y) > thresh_val else 0
thresh_val is set from the image histogram using Otsu's binarisation, assuming the image
histogram is bimodal.
It is recommended to blur the image before binarisation.
"""
if thresh_val == -1:
cv2.threshold(self.image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU, dst=self.image)
else:
cv2.threshold(self.image, thresh_val, 255, cv2.THRESH_BINARY, dst=self.image)
def gaussian_blur(self, kernel_size: int) -> None:
"""Convolve the image with a zero mean gaussian kernel."""
cv2.GaussianBlur(self.image, (kernel_size, kernel_size), 0, dst=self.image)
def blur(self, kernel_size: int) -> None:
"""Blur the image using a normalised box filter."""
cv2.blur(self.image, (kernel_size, kernel_size), dst=self.image)
def morph(self, transform: int, kernel_size: tp.Tuple[int, int], iterations: int):
"""Apply a geometric transform to the image.
NB! The foregprund should be white!
Valid transforms include:
- cv2.MORPH_ERODE
- cv2.MORPH_OPEN
- cv2.MORPH_CLOSE
- cv2.MORPH_DILATE
"""
kernel = np.ones(kernel_size)
cv2.morphologyEx(
self.image,
transform,
kernel=kernel,
iterations=iterations,
dst=self.image
)
def invert(self, max_value=255) -> None:
"""Invert a binary greyscale image."""
self.image *= np.uint8(-1)
self.image += np.uint8(max_value)
def equalize_hist(self) -> None:
cv2.equalizeHist(self.image, dst=self.image)
def set_axis(self, axis: tp.Tuple[np.ndarray, np.ndarray]) -> None:
self.axis = axis
def set_scale(self, scale: float) -> None:
self.scale = scale
def draw(
self,
features: tp.Sequence[np.ndarray],
draw_axis: bool = True,
show: bool = True,
lw: int = 1
) -> None:
"""Draw the image with overlaid contours."""
color_iterator = itertools.cycle(Colors)
image_draw = self.copy_image()
if len(image_draw.shape) < 3:
image_draw = grayscale_to_color(image_draw)
if self.axis and not draw_axis:
color = next(color_iterator)
x_axis, y_axis = self.axis
pt0, pt1 = x_axis.get_line_segment(image_draw)
cv2.line(image_draw, pt0, pt1, color.bgr, lw)
pt0, pt1 = y_axis.get_line_segment(image_draw)
cv2.line(image_draw, pt0, pt1, color.bgr, lw)
pt0, pt1 = (y_axis -self.scale * self.resample_x_max).get_line_segment(image_draw)
cv2.line(image_draw, pt0, pt1, color.bgr, lw)
pt0, pt1 = (x_axis -self.scale * self.resample_y_max).get_line_segment(image_draw)
cv2.line(image_draw, pt0, pt1, color.bgr, lw)
x0, y0 = get_image_moment(self.image, order=1)
cv2.circle(image_draw, (int(x0), int(y0)), 5, color.bgr())
cv2.circle(image_draw, (int(x0), int(y0)), 25, color.bgr())
color = next(color_iterator)
image_draw = cv2.drawContours(image_draw, features, -2, color.bgr(), lw)
if show:
cv2.imshow("Image", image_draw)
cv2.waitKey(0)
cv2.destroyAllWindows()
return image_draw
def read_image(filepath: Path) -> Image:
if not filepath.exists():
raise FileNotFoundError(filepath)
image_array = cv2.imread(str(filepath))
return Image(image_array)
def save_image(filepath: Path, image: Image):
success = cv2.imwrite(str(filepath.resolve()), image.image)
if not success:
raise IOError("Failed to save image")
def dump_image(filepath: Path, image: Image):
with filepath.open("wb") as outpath:
pickle.dump(image, outpath)
def load_image(filepath: Path) -> Image:
if not filepath.exists():
raise FileNotFoundError(filepath)
with filepath.open("rb") as infile:
return pickle.load(infile)
if __name__ == "__main__":
filepath = Path("data/scan1.png")
reader = read_image(filepath)
values = reader.do_stuff()
| expertanalytics/digeeg | src/dgimage/image.py | image.py | py | 5,793 | python | en | code | 0 | github-code | 13 |
34810975301 | import glob
import json
import os
import re
import sys
HEADING1 = "\n# "
HEADING2 = "\n## "
LINK_RE = re.compile("(\\[)([^\\[]*)(\\])(\\()([^\\)]*)(\\))")
def main(target_dir):
# Iterate over files
local_dir = os.path.dirname(__file__)
for fname in glob.glob(os.path.join(local_dir, "*.md")):
if "readme.md" == fname:
continue
# Output as a dictionary
language_dict = dict()
fp = os.path.join(local_dir, fname)
# Load file
with open(fp) as f:
contents = "\n" + f.read(-1)
pages = contents.split(HEADING1)
assert "" == pages[0], "first page should be empty: %s, %s" % (fname, pages[0])
for page in pages[1:]:
page_sections = page.split(HEADING2)
page_title = page_sections[0].strip()
page_dict = dict()
for page_section in page_sections[1:]:
section_title_ending = page_section.find("\n")
section_title = page_section[:section_title_ending]
section_content = page_section[section_title_ending:].strip()
assert section_title not in page_dict, "Duplicated heading: %s" % section_title
# Verify no tags in the content
assert "<" not in section_content
assert ">" not in section_content
# Convert links to a tags with regular expressions
last_span = 0
section_content_array = []
for m in LINK_RE.finditer(section_content):
span = m.span()
section_content_array.append(section_content[last_span:span[0]])
section_content_array.append('<a href="%s">%s</a>' % (m.group(5), m.group(2)))
last_span = span[1]
section_content_array.append(section_content[last_span:])
new_content = "".join(section_content_array).strip()
# Convert new lines to paragraphs
if "\n\n" in new_content:
new_content = "<p>" + new_content.replace("\n\n", "</p><p>") + "</p>"
new_content = new_content.replace("\n", " ")
page_dict[section_title] = new_content
language_dict[page_title] = page_dict
# Save to disk
target_fp = fname.replace(local_dir, target_dir).replace("//", "/").replace(".md", ".json")
with open(target_fp, "w+") as f:
json.dump(language_dict, f, indent=2, separators=(",", ":"))
print("Wrote to %s" % target_fp)
if "__main__" == __name__:
target_dir = sys.argv[1]
main(target_dir)
| miguelmorin/business | content/sync.py | sync.py | py | 2,703 | python | en | code | 0 | github-code | 13 |
70524679057 | import connector as DB
dbCur = DB.Connection.cursor()
def dbExec(sql):
dbCur.execute(sql)
DB.Connection.commit()
# STUDENT
def newAddress(AddressID, UnitNo, Street, Brgy, City, ZIP):
sql = f"INSERT INTO ADDRESS(AddressID, UnitNo, StreetName, Brgy, City, ZipCode) VALUES('{AddressID}','{UnitNo}','{Street}','{Brgy}','{City}','{ZIP}')"
dbExec(sql)
def updateAddress(AddressID, UnitNo, Street, Brgy, City, ZIP):
sql = f"UPDATE ADDRESS SET UnitNo='{UnitNo}', StreetName='{Street}', Brgy='{Brgy}', City='{City}', ZipCode='{ZIP}' WHERE AddressID='{AddressID}';"
dbExec(sql)
def deleteAddress(AddressID):
sql = f"DELETE FROM ADDRESS WHERE AddressID='{AddressID}'"
dbExec(sql)
def getAddress(AddressID):
sql = f"SELECT * FROM ADDRESS WHERE AddressID='{ AddressID }';"
dbCur.execute(sql)
users = dbCur.fetchall()
for user in users:
_AddressID = user[0]
_UnitNo = user[1]
_StreetNo = user[2]
_Brgy = user[3]
_City = user[4]
_ZipCode = user[5]
print(_AddressID, _UnitNo, _StreetNo, _Brgy, _City, _ZipCode)
# newAddress('A-2018-00137', '1', '1', '1', '1', '1')
# updateAddress('A-2018-00137', '2', '2', '2', '2', '2')
# deleteAddress('A-2018-00137')
getAddress('A-2018-00137')
| DzhonPetrus/SanctionManagementSystem | DB/Address.py | Address.py | py | 1,287 | python | en | code | 0 | github-code | 13 |
17053524224 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InvoiceItemQueryOpenModel import InvoiceItemQueryOpenModel
from alipay.aop.api.domain.InvoiceTradeFundItem import InvoiceTradeFundItem
from alipay.aop.api.domain.InvoiceTradeGoodsItem import InvoiceTradeGoodsItem
class InvoiceTradeInfo(object):
def __init__(self):
self._alipay_trade_no = None
self._create_trade_date = None
self._einv_trade_id = None
self._goods_name = None
self._invoice_content = None
self._m_name = None
self._m_short_name = None
self._merchant_id = None
self._open_id = None
self._out_biz_no = None
self._payment_trade_date = None
self._real_amount = None
self._sub_m_name = None
self._sub_m_short_name = None
self._trade_amount = None
self._trade_fund_list = None
self._trade_goods_list = None
self._trade_no = None
self._user_id = None
@property
def alipay_trade_no(self):
return self._alipay_trade_no
@alipay_trade_no.setter
def alipay_trade_no(self, value):
self._alipay_trade_no = value
@property
def create_trade_date(self):
return self._create_trade_date
@create_trade_date.setter
def create_trade_date(self, value):
self._create_trade_date = value
@property
def einv_trade_id(self):
return self._einv_trade_id
@einv_trade_id.setter
def einv_trade_id(self, value):
self._einv_trade_id = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def invoice_content(self):
return self._invoice_content
@invoice_content.setter
def invoice_content(self, value):
if isinstance(value, list):
self._invoice_content = list()
for i in value:
if isinstance(i, InvoiceItemQueryOpenModel):
self._invoice_content.append(i)
else:
self._invoice_content.append(InvoiceItemQueryOpenModel.from_alipay_dict(i))
@property
def m_name(self):
return self._m_name
@m_name.setter
def m_name(self, value):
self._m_name = value
@property
def m_short_name(self):
return self._m_short_name
@m_short_name.setter
def m_short_name(self, value):
self._m_short_name = value
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def payment_trade_date(self):
return self._payment_trade_date
@payment_trade_date.setter
def payment_trade_date(self, value):
self._payment_trade_date = value
@property
def real_amount(self):
return self._real_amount
@real_amount.setter
def real_amount(self, value):
self._real_amount = value
@property
def sub_m_name(self):
return self._sub_m_name
@sub_m_name.setter
def sub_m_name(self, value):
self._sub_m_name = value
@property
def sub_m_short_name(self):
return self._sub_m_short_name
@sub_m_short_name.setter
def sub_m_short_name(self, value):
self._sub_m_short_name = value
@property
def trade_amount(self):
return self._trade_amount
@trade_amount.setter
def trade_amount(self, value):
self._trade_amount = value
@property
def trade_fund_list(self):
return self._trade_fund_list
@trade_fund_list.setter
def trade_fund_list(self, value):
if isinstance(value, list):
self._trade_fund_list = list()
for i in value:
if isinstance(i, InvoiceTradeFundItem):
self._trade_fund_list.append(i)
else:
self._trade_fund_list.append(InvoiceTradeFundItem.from_alipay_dict(i))
@property
def trade_goods_list(self):
return self._trade_goods_list
@trade_goods_list.setter
def trade_goods_list(self, value):
if isinstance(value, list):
self._trade_goods_list = list()
for i in value:
if isinstance(i, InvoiceTradeGoodsItem):
self._trade_goods_list.append(i)
else:
self._trade_goods_list.append(InvoiceTradeGoodsItem.from_alipay_dict(i))
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.alipay_trade_no:
if hasattr(self.alipay_trade_no, 'to_alipay_dict'):
params['alipay_trade_no'] = self.alipay_trade_no.to_alipay_dict()
else:
params['alipay_trade_no'] = self.alipay_trade_no
if self.create_trade_date:
if hasattr(self.create_trade_date, 'to_alipay_dict'):
params['create_trade_date'] = self.create_trade_date.to_alipay_dict()
else:
params['create_trade_date'] = self.create_trade_date
if self.einv_trade_id:
if hasattr(self.einv_trade_id, 'to_alipay_dict'):
params['einv_trade_id'] = self.einv_trade_id.to_alipay_dict()
else:
params['einv_trade_id'] = self.einv_trade_id
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.invoice_content:
if isinstance(self.invoice_content, list):
for i in range(0, len(self.invoice_content)):
element = self.invoice_content[i]
if hasattr(element, 'to_alipay_dict'):
self.invoice_content[i] = element.to_alipay_dict()
if hasattr(self.invoice_content, 'to_alipay_dict'):
params['invoice_content'] = self.invoice_content.to_alipay_dict()
else:
params['invoice_content'] = self.invoice_content
if self.m_name:
if hasattr(self.m_name, 'to_alipay_dict'):
params['m_name'] = self.m_name.to_alipay_dict()
else:
params['m_name'] = self.m_name
if self.m_short_name:
if hasattr(self.m_short_name, 'to_alipay_dict'):
params['m_short_name'] = self.m_short_name.to_alipay_dict()
else:
params['m_short_name'] = self.m_short_name
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.payment_trade_date:
if hasattr(self.payment_trade_date, 'to_alipay_dict'):
params['payment_trade_date'] = self.payment_trade_date.to_alipay_dict()
else:
params['payment_trade_date'] = self.payment_trade_date
if self.real_amount:
if hasattr(self.real_amount, 'to_alipay_dict'):
params['real_amount'] = self.real_amount.to_alipay_dict()
else:
params['real_amount'] = self.real_amount
if self.sub_m_name:
if hasattr(self.sub_m_name, 'to_alipay_dict'):
params['sub_m_name'] = self.sub_m_name.to_alipay_dict()
else:
params['sub_m_name'] = self.sub_m_name
if self.sub_m_short_name:
if hasattr(self.sub_m_short_name, 'to_alipay_dict'):
params['sub_m_short_name'] = self.sub_m_short_name.to_alipay_dict()
else:
params['sub_m_short_name'] = self.sub_m_short_name
if self.trade_amount:
if hasattr(self.trade_amount, 'to_alipay_dict'):
params['trade_amount'] = self.trade_amount.to_alipay_dict()
else:
params['trade_amount'] = self.trade_amount
if self.trade_fund_list:
if isinstance(self.trade_fund_list, list):
for i in range(0, len(self.trade_fund_list)):
element = self.trade_fund_list[i]
if hasattr(element, 'to_alipay_dict'):
self.trade_fund_list[i] = element.to_alipay_dict()
if hasattr(self.trade_fund_list, 'to_alipay_dict'):
params['trade_fund_list'] = self.trade_fund_list.to_alipay_dict()
else:
params['trade_fund_list'] = self.trade_fund_list
if self.trade_goods_list:
if isinstance(self.trade_goods_list, list):
for i in range(0, len(self.trade_goods_list)):
element = self.trade_goods_list[i]
if hasattr(element, 'to_alipay_dict'):
self.trade_goods_list[i] = element.to_alipay_dict()
if hasattr(self.trade_goods_list, 'to_alipay_dict'):
params['trade_goods_list'] = self.trade_goods_list.to_alipay_dict()
else:
params['trade_goods_list'] = self.trade_goods_list
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvoiceTradeInfo()
if 'alipay_trade_no' in d:
o.alipay_trade_no = d['alipay_trade_no']
if 'create_trade_date' in d:
o.create_trade_date = d['create_trade_date']
if 'einv_trade_id' in d:
o.einv_trade_id = d['einv_trade_id']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'invoice_content' in d:
o.invoice_content = d['invoice_content']
if 'm_name' in d:
o.m_name = d['m_name']
if 'm_short_name' in d:
o.m_short_name = d['m_short_name']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
if 'open_id' in d:
o.open_id = d['open_id']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'payment_trade_date' in d:
o.payment_trade_date = d['payment_trade_date']
if 'real_amount' in d:
o.real_amount = d['real_amount']
if 'sub_m_name' in d:
o.sub_m_name = d['sub_m_name']
if 'sub_m_short_name' in d:
o.sub_m_short_name = d['sub_m_short_name']
if 'trade_amount' in d:
o.trade_amount = d['trade_amount']
if 'trade_fund_list' in d:
o.trade_fund_list = d['trade_fund_list']
if 'trade_goods_list' in d:
o.trade_goods_list = d['trade_goods_list']
if 'trade_no' in d:
o.trade_no = d['trade_no']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InvoiceTradeInfo.py | InvoiceTradeInfo.py | py | 12,499 | python | en | code | 241 | github-code | 13 |
30138741392 | import unittest
import os
from PIL import Image
from werkzeug.datastructures import FileStorage
from zou.app import app
from zou.app.utils import thumbnail, fs
TEST_FOLDER = os.path.join("tests", "tmp")
class ThumbnailTestCase(unittest.TestCase):
def get_fixture_file_path(self, relative_path):
current_path = os.getcwd()
file_path_fixture = os.path.join(
current_path, "tests", "fixtures", relative_path
)
return file_path_fixture
def setUp(self):
super(ThumbnailTestCase, self).setUp()
fs.mkdir_p(TEST_FOLDER)
self.folder_name = os.path.join(TEST_FOLDER, "persons")
def tearDown(self):
super(ThumbnailTestCase, self).tearDown()
fs.rm_rf(self.folder_name)
fs.rm_rf(TEST_FOLDER)
fs.rm_rf(app.config["PREVIEW_FOLDER"])
def test_turn_into_thumbnail(self):
file_path_fixture = self.get_fixture_file_path("thumbnails/th01.png")
full_path = os.path.join(
TEST_FOLDER, thumbnail.get_file_name("instance-id")
)
fs.copyfile(file_path_fixture, full_path)
thumbnail.turn_into_thumbnail(full_path)
im = Image.open(full_path)
(width, height) = im.size
self.assertEqual(width, 180)
self.assertEqual(height, 101)
thumbnail.turn_into_thumbnail(full_path, thumbnail.RECTANGLE_SIZE)
im = Image.open(full_path)
(width, height) = im.size
self.assertEqual(width, 150)
self.assertEqual(height, 100)
def test_convert_jpg_to_png(self):
file_path_fixture = self.get_fixture_file_path("thumbnails/th04.jpg")
file_name = "th04.jpg"
file_path = os.path.join(TEST_FOLDER, file_name)
fs.copyfile(file_path_fixture, file_path)
im = Image.open(file_path)
thumbnail.convert_jpg_to_png(file_path)
result_path = os.path.join(TEST_FOLDER, "th04.png")
im = Image.open(result_path)
self.assertEqual(len(im.info.keys()), 0)
self.assertTrue(os.path.exists(result_path))
def test_save_file(self):
file_path_fixture = self.get_fixture_file_path("thumbnails/th01.png")
th_file = FileStorage(
stream=open(file_path_fixture, "rb"), filename="th01.png"
)
full_path = thumbnail.save_file(TEST_FOLDER, "instance-id", th_file)
thumbnail.turn_into_thumbnail(full_path, thumbnail.RECTANGLE_SIZE)
im = Image.open(full_path)
(width, height) = im.size
self.assertEqual(width, 150)
self.assertEqual(height, 100)
def test_url_path(self):
url_path = thumbnail.url_path("shots", "instance-id")
self.assertEqual(url_path, "pictures/thumbnails/shots/instance-id.png")
url_path = thumbnail.url_path("working_files", "instance-id")
self.assertEqual(
url_path, "pictures/thumbnails/working-files/instance-id.png"
)
def test_flat(self):
flatten_tupple = thumbnail.flat(1.2, 3.1, 4.2)
self.assertEqual(flatten_tupple, (1, 3, 4))
def test_get_full_size_from_width(self):
file_path_fixture = self.get_fixture_file_path("thumbnails/th01.png")
im = Image.open(file_path_fixture)
size = thumbnail.get_full_size_from_width(im, 1200)
self.assertEqual(size, (1200, 674))
def test_prepare_image_for_thumbnail(self):
file_path_fixture = self.get_fixture_file_path("thumbnails/th01.png")
im = Image.open(file_path_fixture)
im = thumbnail.prepare_image_for_thumbnail(im, thumbnail.SQUARE_SIZE)
self.assertEqual(im.size, (101, 101))
file_path_fixture = self.get_fixture_file_path("thumbnails/th02.png")
im = Image.open(file_path_fixture)
im = thumbnail.prepare_image_for_thumbnail(
im, thumbnail.RECTANGLE_SIZE
)
self.assertEqual(im.size, (152, 101))
file_path_fixture = self.get_fixture_file_path("thumbnails/th03.png")
im = Image.open(file_path_fixture)
im = thumbnail.prepare_image_for_thumbnail(
im, thumbnail.RECTANGLE_SIZE
)
self.assertEqual(im.size, (180, 120))
def test_generate_preview_variants(self):
preview_id = "123413-12312"
file_path_fixture = self.get_fixture_file_path("thumbnails/th01.png")
file_name = thumbnail.get_file_name(preview_id)
original_path = os.path.join(TEST_FOLDER, file_name)
fs.copyfile(file_path_fixture, original_path)
thumbnail.generate_preview_variants(original_path, preview_id)
file_path = os.path.join(TEST_FOLDER, "previews-%s.png" % preview_id)
self.assertTrue(os.path.exists(file_path))
self.assertTrue(Image.open(file_path).size, thumbnail.PREVIEW_SIZE)
file_path = os.path.join(TEST_FOLDER, "thumbnails-%s.png" % preview_id)
self.assertTrue(os.path.exists(file_path))
self.assertTrue(Image.open(file_path).size, thumbnail.RECTANGLE_SIZE)
file_path = os.path.join(
TEST_FOLDER, "thumbnails-square-%s.png" % preview_id
)
self.assertTrue(os.path.exists(file_path))
self.assertTrue(Image.open(file_path).size, thumbnail.SQUARE_SIZE)
| cgwire/zou | tests/utils/test_thumbnail.py | test_thumbnail.py | py | 5,232 | python | en | code | 152 | github-code | 13 |
19879049113 | #PyBank#
#Financial records analyzation
#Import libraries and dependencies
import csv
import pandas as pd
import numpy as np
#path for CSV file
file_path_input = ("budget_data.csv")
file_to_output = ("analysis.data.txt")
#Read CSV into Panadas and give it a variable name Budget_DF
Budget_df = pd.read_csv(file_path_input, parse_dates=True)
#Number of month records in the CSV
Months = Budget_df["Date"].count()
#Total amount of money captured in the data converted to currency
Total_Funds = '${:.2f}'.format(Budget_df["Profit/Losses"].sum())
#Determine the amount of increase or decrease from the previous month
AvgChange = Budget_df["Profit/Losses"].diff()
Budget_df['Amount Changed'] = AvgChange
AvgChange = '${:.2f}'.format(Budget_df['Amount Changed'][1:86].mean())
#Identify the greatest positive change
Greatest_Increase = '${:.2f}'.format(Budget_df["Amount Changed"].max())
Greatest_Increase_Date = Budget_df.sort_values('Profit/Losses').tail(1).Date
#Identify the greatest negative change
Greatest_Decrease = '${:.2f}'.format(Budget_df["Amount Changed"].min())
Greatest_Decrease_Date = Budget_df.sort_values('Profit/Losses').head(1).Date
print("Financial Analysis")
print("----------------------------")
print("Total Months: %s" %(Months))
print("Total: %s" %(Total_Funds))
print("Average Change: %s" %(AvgChange))
print("Greatest Increase in Profits: %s %s" %(Greatest_Increase_Date.to_string(index=False), Greatest_Increase))
print("Greatest Decrease in Profits: %s %s" %(Greatest_Decrease_Date.to_string(index=False), Greatest_Decrease))
#Export the results to text file
with open(file_to_output, "w") as txt_file:
txt_file.write(f"Financial Analysis\n")
txt_file.write(f"----------------------------\n")
txt_file.write(f"Total Months: {(Months)}\n")
txt_file.write(f"Total:{(Total_Funds)}\n")
txt_file.write(f"Average Change: {(AvgChange)}\n")
txt_file.write(f"Greatest Increase in Profits: {(Greatest_Increase_Date.to_string(index=False), Greatest_Increase)}\n")
txt_file.write(f"Greatest Decrease in Profits: {(Greatest_Decrease_Date.to_string(index=False), Greatest_Decrease)}\n")
| KeepItOnTheDownload/PyBank | Pybank-Pandas.py | Pybank-Pandas.py | py | 2,142 | python | en | code | 1 | github-code | 13 |
43611299736 | import streamlit as st
import functions
# The order of functions, commands etc. in a webapp matters.
# The script will be executed from top to bottom.
todos = functions.get_todos()
def add_todo():
todo = st.session_state["new_todo"] + "\n"
todos.append(todo)
functions.write_todos(todos)
st.title("My Todo App")
st.subheader("This is my todo app.")
st.write("This app is for increasing your productivity.")
# st.balloons()
for index, todo in enumerate(todos):
checkbox = st.checkbox(todo, key=todo)
if checkbox:
todos.pop(index)
functions.write_todos(todos)
del st.session_state[todo]
st.experimental_rerun() # needed for checkboxes
st.text_input(label="Enter a todo:", placeholder="Add new todo",
on_change=add_todo, key='new_todo')
| Henkel204/my-todo-app | web.py | web.py | py | 808 | python | en | code | 0 | github-code | 13 |
1969198501 | # import concurency
import time
from concurrent.futures.process import ProcessPoolExecutor
import numpy as np
from tqdm import tqdm
def predict(input):
time.sleep(.05)
return np.sum(input*input.T)
def single_process(in_dataset):
result = list()
for arr in tqdm(in_dataset):
result.append(predict(arr) )
return result
def multi_process(in_dataset, num_workers=5):
result = list()
with ProcessPoolExecutor(max_workers=num_workers) as executor:
for pred in executor.map(predict, in_dataset):
result.append(pred)
return result
def main():
dataset = [np.random.random((228, 228)) for arr in range(100)]
tm = time.time()
single_process(dataset)
tm = time.time() - tm
print(f"Single process evaluation takes {tm*1_000:.2f} ms")
for num_w in range(1, 15):
tm = time.time()
multi_process(dataset, num_w )
tm = time.time() - tm
print(f"Multiple process ({num_w} workers) evaluation takes {tm*1_000:.2f} ms")
if __name__=="__main__":
main()
| VadyusikhLTD/prjctr-ML-in-Prod | week2/multiple-process-inference/multiple_process_inference.py | multiple_process_inference.py | py | 1,065 | python | en | code | 0 | github-code | 13 |
11408344026 | from http.client import HTTPResponse
from multiprocessing import context
from django.shortcuts import render, redirect
from django.http import HttpResponse
from pages.models import Ticket
from pages.forms import TicketForm, StatusForm
# Create your views here.
def home(request):
return render(request, 'base.html')
def raiseticket(request):
if request.method == 'POST':
form = TicketForm(request.POST)
if form.is_valid():
ticket = form.save()
form.send()
tkt = Ticket.objects.latest('timestamp')
return render(request,'success.html',{'tkt':tkt})
else:
form = TicketForm()
return render(request,'raiseticket.html',{'form': form}
)
def viewticket(request):
if request.method == 'POST':
form = StatusForm(request.POST)
if form.is_valid():
ticketid = request.POST.get('ticketid')
return redirect('viewstatus', numid=ticketid)
else:
form = StatusForm()
return render(request,'viewticket.html',{'form': form}
)
def viewstatus(request,numid):
num = int(numid)%1000
status = Ticket.objects.get(id=num)
return render(request,'viewstatus.html',{'status': status}) | seepanas10/helpdesk-django | pages/views.py | views.py | py | 1,267 | python | en | code | 0 | github-code | 13 |
9070569930 |
result_sum = 100
nums = []
nanjange_count = 9
for i in range(nanjange_count):
nums.append(int(input()))
sum_heights = sum(nums)
for i in range(nanjange_count - 2):
for j in range(i+1, nanjange_count):
temp_sum = nums[i] + nums[j]
if sum_heights - temp_sum == result_sum:
x1 = nums[i]
y1 = nums[j]
nums.remove(x1)
nums.remove(y1)
nums.sort()
for i in nums:
print(i)
'''
1. 난장이 리스트를 2중포문을 돌아주는것이다ㅣ
2. 첫 포문은 기준 값을 잡고
3. 둘째 포문은 기준값 다음값부터 값을 잡아가면서
4. 첫 포문, 둘째 포문이 잡은 값을 입력받은 난쟁이 값에서 다 뺌
5. 여기서 100을 찾음
''' | mins1031/coding-test | baekjoon/CompleteSearch/SevenNanjange_2309.py | SevenNanjange_2309.py | py | 717 | python | ko | code | 0 | github-code | 13 |
8954248870 | import pickle
import requests
import streamlit as st
from pydantic import BaseModel
DATASET_INFO_PATH = "categorical_features_dict.pkl"
st.title("Car prediction app")
# when running from outside docker, replace api:8080 with localhost:8080
ENDPOINT_URL= f'http://api:8080/api/predict'
class CarInformationRequest(BaseModel):
year: int
odometer: int
posting_date: str
manufacturer: str
condition: str
cylinders: str
fuel: str
title_status: str
transmission: str
drive: str
type: str
paint_color: str
def send_to_backend(car_info):
car_info_json = car_info.json()
print(car_info_json)
response = requests.post(ENDPOINT_URL, data=car_info_json)
if response.status_code == 200:
prediction = response.json()
pred_price = prediction["car_price"]
return pred_price
else:
st.error("Error occurred during prediction.")
def main():
categorical_features_dict = None
with open(DATASET_INFO_PATH, 'rb') as file:
categorical_features_dict = pickle.load(file)
col1, col2 = st.columns(2)
with col1:
year = st.number_input("Year", min_value=1930, max_value=2023)
odometer = st.number_input("Odometer")
posting_date = st.date_input("Posting Date (YYYY-MM-DD)")
manufacturer = st.selectbox("Manufacturer", categorical_features_dict.get("manufacturer"))
condition = st.selectbox("Condition", categorical_features_dict.get("condition"))
cylinders = st.selectbox("Cylinders", categorical_features_dict.get("cylinders"))
with col2:
fuel = st.selectbox("Fuel", categorical_features_dict.get("fuel"))
title_status = st.selectbox("Title Status", categorical_features_dict.get("title_status"))
transmission = st.selectbox("Transmission", categorical_features_dict.get("transmission"))
drive = st.selectbox("Drive", categorical_features_dict.get("drive"))
car_type = st.selectbox("Car Type", categorical_features_dict.get("type"))
paint_color = st.selectbox("Paint Color", categorical_features_dict.get("paint_color"))
# Submit button
if st.button("Submit"):
car_info = CarInformationRequest(
year=year,
odometer=odometer,
posting_date=str(posting_date),
manufacturer=manufacturer,
condition=condition,
cylinders=cylinders,
fuel=fuel,
title_status=title_status,
transmission=transmission,
drive=drive,
type=car_type,
paint_color=paint_color
)
pred_price = send_to_backend(car_info)
st.info(f"Predicted car price is {float(pred_price):.2f}$")
if __name__ == "__main__":
main() | dsmoljan/Car-price-prediction | Code/app/frontend/main.py | main.py | py | 2,775 | python | en | code | 0 | github-code | 13 |
74176775699 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 13:16:47 2019
@author: gregz
"""
import astropy.units as u
import numpy as np
import pickle
from input_utils import setup_logging
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.table import Table
from hetdex_api.extract import Extract
from hetdex_api.survey import Survey
log = setup_logging('toy')
log.info('Loading Survey')
survey = Survey('hdr1')
t = Table(survey.hdfile.root.Survey[:])
log.info('Loading External File')
filename = '/work/03730/gregz/maverick/MUSE_WIDE_sources_for_hetdex.fits'
fitsfile = fits.open(filename)
bintable = fitsfile[1].data
ID = bintable['ID']
coords = SkyCoord(bintable['RA']*u.deg, bintable['DEC']*u.deg)
max_sep = 11.0 * u.arcminute
log.info('Finding shots of interest')
matched_sources = {}
shots_of_interest = []
E = Extract()
# Build aperture PSF for aperture extraction
fixed_aperture = 4.
# Using box size of 10.5 (length of box side) and pixel scale of 0.25
# To see documentation use: help(E.tophat_psf)
aperture = E.tophat_psf(fixed_aperture, 10.5, 0.25)
Sources = {}
for i in ID:
Sources[i] = []
for i, coord in enumerate(survey.coords):
dist = coords.separation(coord)
sep_constraint = dist < max_sep
name = '%sv%03d' % (t['date'][i], t['obsid'][i])
idx = np.where(sep_constraint)[0]
matched_sources[name] = idx
if len(idx) > 0:
shots_of_interest.append(name)
log.info('Number of shots of interest: %i' % len(shots_of_interest))
for i, coord in enumerate(survey.coords):
dist = coords.separation(coord)
sep_constraint = dist < max_sep
name = '%sv%03d' % (t['date'][i], t['obsid'][i])
idx = np.where(sep_constraint)[0]
matched_sources[name] = idx
if len(idx) > 0:
log.info('Working on shot: %s' % name)
E.load_shot(name)
for ind in idx:
info_result = E.get_fiberinfo_for_coord(coords[ind], radius=7.)
if info_result is not None:
log.info('Extracting %i' % ID[ind])
ifux, ifuy, xc, yc, ra, dec, data, error, mask = info_result
weights = E.build_weights(xc, yc, ifux, ifuy, aperture)
result = E.get_spectrum(data, error, mask, weights)
spectrum_aper, spectrum_aper_error = [res for res in result]
Sources[ID[ind]].append([spectrum_aper, spectrum_aper_error,
weights.sum(axis=0)])
E.fibers.close()
pickle.dump(Sources, open( "save.p", "wb" ))
log.info('Done.') | grzeimann/Panacea | toy.py | toy.py | py | 2,578 | python | en | code | 8 | github-code | 13 |
22467477403 | """
Продолжить работу над первым заданием. Разработать методы, отвечающие за приём оргтехники на склад и
передачу в определенное подразделение компании.
Для хранения данных о наименовании и количестве единиц оргтехники, а также других данных,
можно использовать любую подходящую структуру, например словарь.
"""
class OfficeEquipment:
def __init__(self, name, brand, model, price):
self.__name = name
self.__brand = brand
self.__model = model
self.__price = price
class Printer(OfficeEquipment):
def __init__(self, brand, model, price, color):
super().__init__('Принтер', brand, model, price)
self.__color = color
class Scanner(OfficeEquipment):
def __init__(self, brand, model, price, type):
super().__init__('Сканер', brand, model, price)
self.__type = type
class Xerox(OfficeEquipment):
def __init__(self, brand, model, price, tank_size):
super().__init__('Ксерокс', brand, model, price)
self.__tank_size = tank_size
class StockOfficeEquipment:
def __init__(self, name):
self.__name = name
self.__items = {}
def add(self, department: str, equipment):
if not isinstance(equipment, OfficeEquipment):
raise ValueError('Object must be a OfficeEquipment')
self.__items.setdefault(department, []).append(equipment)
def __str__(self):
return str(self.__items)
if __name__ == '__main__':
printer1 = Printer('Canon', 'MP-12900', 12000, 'Черно-белый')
printer2 = Printer('Samsung', 'SM-900', 15000, 'Цветной')
scanner = Scanner('Sony', 'M-9300', 35000, 'Барабанный')
xerox = Xerox('Ксерокс', 'VD-2250', 20000, 3000)
stock = StockOfficeEquipment('Склад Эльдорадо')
stock.add('Бухгалтерия', printer1)
stock.add('Отдел кадров', printer2)
stock.add('Бухгалтерия', scanner)
stock.add('Отдел кадров', xerox)
print(stock)
| slavaprotogor/python_base | homeworks/lesson8/task5.py | task5.py | py | 2,300 | python | ru | code | 0 | github-code | 13 |
33526917443 | """
Character Class is the base class and used to create monsters.
Also could add a skill class so monsters could hit harder.
"""
from assests.items import Weapon
class Character:
def __init__(self, name, hp, maxhp, mp, maxmp, atk, defence, inventory, exp):
self.name = name
# Health point if 0 your dead
self.hp = hp
self.maxhp = maxhp
# magic points
self.mp = mp
self.maxmp = maxmp
# Attack is the amount of damage objet can deal
self.atk = atk
# Defense blunts Attacks
self.defence = defence
# easier to pop from monster inventory to hero's
self.inventory = inventory
# monster need exp to give to hero's
self.exp = exp
def remove_inventory(self):
spoil = self.inventory
return spoil
def attack(self, target):
if target.defence > self.atk:
self.atk = 0
target.defence = 0
target.hp = target.hp - self.atk + target.defence
if target.hp <= 0:
target.hp = 0
dmg = abs(target.defence - self.atk)
print(f"\n{self.name} hits {target.name} for {dmg} \n")
def death(self):
if self.hp <= 0:
print(f'\n{self.name} has died')
def stat(self):
print('\n')
print(f"{'Menu':=^32}")
print(f'Name: {self.name}' + f"{f'Atk: {self.atk}':>12} Def: {self.defence}")
print('=' * 32)
print(f"Hp: {self.hp}/{self.maxhp}" + f'{f"Mp: {self.mp}/{self.maxmp}":>19}')
print('=' * 32)
def __str__(self):
return (
f"{self.name}, {self.hp}, {self.maxhp}, {self.mp}, {self.maxmp}, {self.atk}, {self.defence}, {self.inventory}, {self.exp}")
"""
This creates the hero. Adding class with starter skills could an idea or a skill class.
"""
class Hero(Character):
def __init__(self, name, hp, maxhp, mp, maxmp, atk, defence, inventory, lvl, exp, maxexp, equip):
Character.__init__(self, name, hp, maxhp, mp, maxmp, atk, defence, inventory, exp)
self.lvl = lvl
# Numbers that symbolize when to level up
self.maxexp = maxexp
# Holds players items equip is item in hand
self.equip = equip
def add_inventory(self, drop_item):
self.inventory.append(drop_item)
def equipped_weapon(self):
self.atk = self.atk + self.equip[0].atk
return self.atk
def equip_on(self, hero, spoil):
if hero.equip[0].atk < spoil.atk:
choice = input(f"{spoil.name} is stronger than your current weapon {self.equip[0].name}.\n"
f"Would you like to equip? Yes: Y or No: N")
if choice == "Y".lower():
self.equip.pop()
self.atk = 10
self.equip.append(spoil)
hero.equipped_weapon()
print(f"{hero.name} using his vast wisdom has equipped {self.equip}")
else:
print("You must live with your choice.")
def run_away(self):
pass
def gain_exp(self, target):
self.exp = self.exp + target.exp
if self.exp >= self.maxexp:
self.lvl = self.lvl + 1
self.maxexp = self.maxexp * 2
self.exp = self.exp = 0
print(f'\nYou have LEVELed UP! You are now {self.lvl}')
else:
print(f'\nYou recieved {target.exp} exp')
def stats_up(self):
self.hp = self.hp * 2
self.max = self.hp * 2
self.mp = self.mp * 2
self.maxmp = self.maxmp * 2
self.atk = self.atk * 2
self.defence = self.defence * 2
print(f"Your health is now: {self.hp}/{self.maxhp}"
f"Your Mana pool is now: {self.mp}/{self.maxmp}"
f"Your Attack is now: {self.atk}"
f"Your Defense is now: {self.defence}")
def stat(self):
print('\n')
print(f"{'Menu':=^32}")
print(f'Name: {self.name:15} / Lvl: {self.lvl}')
print('=' * 32)
print(f"{f'Hp: {self.hp}/{self.maxhp}':>15}" + " " * 5 + f'Mp: {self.mp}/{self.maxmp}')
print('=' * 32)
print(f'Atk: {self.atk} Def: {self.defence}' + ' ' * 5 + f'| Exp: {self.exp}/{self.maxexp}')
print('=' * 32)
print(f"{f'Equipment: {self.equip}':^15} | Inventory: {self.inventory}")
print('=' * 32)
"""hero = Hero(name='Asarmir', hp=12, maxhp=12, mp=1, maxmp=1, atk=5, defence=10, inventory={None}, lvl=1, exp=0,maxexp= 25, equip={'Wooden Sword': 5})
monster = Character('Goblin', 100, 100, 5, 5, 20, 10, {'dagger': 1}, exp=25)
hero.stat()
hero.attack(monster)
monster.stat()"""
| Asarmir/HeroQuest | users/char.py | char.py | py | 4,684 | python | en | code | 0 | github-code | 13 |
13298265308 | """
Data.py provides command line convenience access to the modules in the housinginsights.sources
directory. Here is a brief explanation.
BRIEF EXPLANATION
-----------------
Use this script to download a csv file from an API. Specify the output file with the -o flag.
Specify whatever parameters you need with --params in key:value;k2:val2 format. For example,
if the find_location method requires a location, you would specify that location with:
--params "location: some street somewhere in dc".
Example usage:
bin/data.sh -o ~/csvfile --params "location:641 S St NW" mar find_location
Calling from this folder using Python directly (with your virtual environment activated):
python data.py mar find_location --output ../../data/interim/out.csv --params "location:617 Morton Street NW"
DETAILED EXPLANATION:
--------------------
data.py expects the user to supplies at least 4
things when it is run:
1. Output Type [--outtype]
2. Output File (optional) [--output, -o]
3. Api Module name
4. Api Method name
It then tries to import housinginsights.sources + whatever module you specified.
For example if the api module name supplied by the user is "mar", then it tries to import
"housinginsights.sources.mar". It then looks for a class with the module name + "ApiConn"
as a suffix. In this case it would be "MarApiConn". It then calls whatever method the user specied
from that ApiConn class. Whatever parameters specified by the user with the --params argument
are split and passed as keyword arguments (**kwargs) to the function.
The --outtype argument is added as output_type, and --o or --output is added as output_file.
Thus, each public function compatible with data.sh needs to have as a minimum those two parameters
(output_type and output_file). See the mar.py file for an example.
"""
from argparse import ArgumentParser
import importlib
from housinginsights.config.base import HousingInsightsConfig
API_MODULE = 'housinginsights.sources'
def main():
description = "Get csv data from api(s)."
output_help = "Path and filename of a csv file where you'd like to write the output. \
Required if outtype=csv, omit if outtype=stdout"
outtype_help = "Where the output should be written. Options: 'stdout', 'csv'"
params_help = "Keyword variables that passed directly to the api method; \
check the method for required parameters. Parameters are given in \
semicolon separated key:value;key2:value2 format."
parser = ArgumentParser(description=description)
parser.add_argument("--config", "-c", help="Path to the configuration file. \
[Not implemented YET!]")
parser.add_argument("--output", "-o", help=output_help)
parser.add_argument("--outtype", "-t", default="stdout", help=outtype_help)
parser.add_argument("--params", help=params_help)
parser.add_argument("api", help="The name of the api module located in housinginsights.source.")
parser.add_argument("method", help="Method of the api to call.")
ns = parser.parse_args()
result = call_module(ns)
def call_module(ns):
try:
kwargs = parse_params(ns.params)
kwargs['output_type'] = ns.outtype
kwargs['output_file'] = ns.output
# Hack. for now to have sensible defaults...
if ns.output:
kwargs['output_type'] = 'csv'
apimod = API_MODULE + '.' + ns.api
classname = ns.api[0].upper() + ns.api[1:] + 'ApiConn'
module = importlib.import_module(apimod)
api_class = getattr(module, classname)
api = api_class()
apifunc = getattr(api, ns.method)
result = apifunc(**kwargs)
except Exception as e:
print('Your request failed. {0}'.format(e))
def parse_params(params):
kwargs = {}
if not params:
return kwargs
for kv in params.split(';'):
key, value = kv.split(':')
kwargs[key] = value
return kwargs
if __name__ == '__main__':
main()
| jgordo04/housinginsights_temp | python/cmd/data.py | data.py | py | 4,027 | python | en | code | 0 | github-code | 13 |
5891355685 | import sys
import numpy as np
from fwl.helpers import most_common
class KNN:
def __init__(self, k: int = 1):
self.k = k
def fit(self, X: np.ndarray, y: np.ndarray, w: np.ndarray) -> None:
self.X_train = X
self.y_train = y
self.w_train = w
def predict(self, examples: np.ndarray) -> np.ndarray:
predicting_training = False
# Check if we are predicting in training - Leave-one-out
if examples.shape == self.X_train.shape:
predicting_training = True
# Precompute the distances
dists = self.compute_dists(examples)
# Leave-one-out (in training). Set diagonal (distance from itself) to INF
if predicting_training:
np.fill_diagonal(dists, sys.maxsize)
# If k=1, then just take the class of example with minimum distance
if self.k == 1:
return np.apply_along_axis(
lambda x: self.y_train[np.argmin(x)], axis=1, arr=dists
)
classes: list[float] = []
for i in range(examples.shape[0]):
dist = dists[i]
# get the k-nearest neighbours
k_idx = np.argsort(dist)[: self.k]
# if k > 1, the class assigned to the example is the mode of the k classes
classes.append(most_common(self.y_train[k_idx]))
return np.array(classes, dtype=np.float32)
def compute_dists(self, examples):
# Compute the matrix distance of test examples with training examples.
# https://sparrow.dev/pairwise-distance-in-numpy/
dists = np.sqrt(
np.sum(
self.w_train
* (examples[:, np.newaxis, :] - self.X_train[np.newaxis, :, :]) ** 2,
axis=-1,
)
)
return dists
| mayoras/FWL-Metaheuristic | fwl/knn.py | knn.py | py | 1,807 | python | en | code | 0 | github-code | 13 |
43896859182 | class Person:
country = "Bangladesh"
def takeBreak(self):
print('I am breathing...')
class Employee(Person):
company = "Honda"
def getSalary(self):
print(f"Salary is {self.salary}")
def takeBreak(self):
print('I am an Employee so I am breathing')
class Programmer(Employee):
company = "Fiverr"
def getSalary(self):
print('No salary to Programmer')
p = Person()
e = Employee()
pr = Programmer()
p.takeBreak()
e.takeBreak()
pr.takeBreak()
#print(p.company) #throws an error
print(e.company)
print(pr.company)
print(pr.country) | inadia748/PythonByHarry | Inheritance/4-multilevelinheritance.py | 4-multilevelinheritance.py | py | 597 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.