input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
are useless." % ",".join(memeber_set))
class AiAnalysisTaskClassificationOutput(AbstractModel):
"""智能分类结果信息
"""
def __init__(self):
"""
:param ClassificationSet: 视频智能分类列表。
:type ClassificationSet: list of MediaAiAnalysisClassificationItem
"""
self.ClassificationSet = None
def _deserialize(self, params):
if params.get("ClassificationSet") is not None:
self.ClassificationSet = []
for item in params.get("ClassificationSet"):
obj = MediaAiAnalysisClassificationItem()
obj._deserialize(item)
self.ClassificationSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskClassificationResult(AbstractModel):
"""智能分类任务结果类型
"""
def __init__(self):
"""
:param Status: 任务状态,有 PROCESSING,SUCCESS 和 FAIL 三种。
:type Status: str
:param ErrCodeExt: 错误码,空字符串表示成功,其他值表示失败,取值请参考 [视频处理类错误码](https://cloud.tencent.com/document/product/266/50368#.E8.A7.86.E9.A2.91.E5.A4.84.E7.90.86.E7.B1.BB.E9.94.99.E8.AF.AF.E7.A0.81) 列表。
:type ErrCodeExt: str
:param ErrCode: 错误码,0 表示成功,其他值表示失败(该字段已不推荐使用,建议使用新的错误码字段 ErrCodeExt)。
:type ErrCode: int
:param Message: 错误信息。
:type Message: str
:param Input: 智能分类任务输入。
:type Input: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskClassificationInput`
:param Output: 智能分类任务输出。
注意:此字段可能返回 null,表示取不到有效值。
:type Output: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskClassificationOutput`
"""
self.Status = None
self.ErrCodeExt = None
self.ErrCode = None
self.Message = None
self.Input = None
self.Output = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCodeExt = params.get("ErrCodeExt")
self.ErrCode = params.get("ErrCode")
self.Message = params.get("Message")
if params.get("Input") is not None:
self.Input = AiAnalysisTaskClassificationInput()
self.Input._deserialize(params.get("Input"))
if params.get("Output") is not None:
self.Output = AiAnalysisTaskClassificationOutput()
self.Output._deserialize(params.get("Output"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskCoverInput(AbstractModel):
"""智能分类任务输入类型
"""
def __init__(self):
"""
:param Definition: 视频智能封面模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskCoverOutput(AbstractModel):
"""智能封面结果信息
"""
def __init__(self):
"""
:param CoverSet: 智能封面列表。
:type CoverSet: list of MediaAiAnalysisCoverItem
"""
self.CoverSet = None
def _deserialize(self, params):
if params.get("CoverSet") is not None:
self.CoverSet = []
for item in params.get("CoverSet"):
obj = MediaAiAnalysisCoverItem()
obj._deserialize(item)
self.CoverSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskCoverResult(AbstractModel):
"""智能封面结果类型
"""
def __init__(self):
"""
:param Status: 任务状态,有 PROCESSING,SUCCESS 和 FAIL 三种。
:type Status: str
:param ErrCodeExt: 错误码,空字符串表示成功,其他值表示失败,取值请参考 [视频处理类错误码](https://cloud.tencent.com/document/product/266/50368#.E8.A7.86.E9.A2.91.E5.A4.84.E7.90.86.E7.B1.BB.E9.94.99.E8.AF.AF.E7.A0.81) 列表。
:type ErrCodeExt: str
:param ErrCode: 错误码,0 表示成功,其他值表示失败(该字段已不推荐使用,建议使用新的错误码字段 ErrCodeExt)。
:type ErrCode: int
:param Message: 错误信息。
:type Message: str
:param Input: 智能封面任务输入。
:type Input: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskCoverInput`
:param Output: 智能封面任务输出。
注意:此字段可能返回 null,表示取不到有效值。
:type Output: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskCoverOutput`
"""
self.Status = None
self.ErrCodeExt = None
self.ErrCode = None
self.Message = None
self.Input = None
self.Output = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCodeExt = params.get("ErrCodeExt")
self.ErrCode = params.get("ErrCode")
self.Message = params.get("Message")
if params.get("Input") is not None:
self.Input = AiAnalysisTaskCoverInput()
self.Input._deserialize(params.get("Input"))
if params.get("Output") is not None:
self.Output = AiAnalysisTaskCoverOutput()
self.Output._deserialize(params.get("Output"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskFrameTagInput(AbstractModel):
"""智能按帧标签任务输入类型
"""
def __init__(self):
"""
:param Definition: 视频智能按帧标签模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskFrameTagOutput(AbstractModel):
"""智能按帧标签结果信息
"""
def __init__(self):
"""
:param SegmentSet: 视频按帧标签列表。
:type SegmentSet: list of MediaAiAnalysisFrameTagSegmentItem
"""
self.SegmentSet = None
def _deserialize(self, params):
if params.get("SegmentSet") is not None:
self.SegmentSet = []
for item in params.get("SegmentSet"):
obj = MediaAiAnalysisFrameTagSegmentItem()
obj._deserialize(item)
self.SegmentSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskFrameTagResult(AbstractModel):
"""智能按帧标签结果类型
"""
def __init__(self):
"""
:param Status: 任务状态,有 PROCESSING,SUCCESS 和 FAIL 三种。
:type Status: str
:param ErrCodeExt: 错误码,空字符串表示成功,其他值表示失败,取值请参考 [视频处理类错误码](https://cloud.tencent.com/document/product/266/50368#.E8.A7.86.E9.A2.91.E5.A4.84.E7.90.86.E7.B1.BB.E9.94.99.E8.AF.AF.E7.A0.81) 列表。
:type ErrCodeExt: str
:param ErrCode: 错误码,0 表示成功,其他值表示失败(该字段已不推荐使用,建议使用新的错误码字段 ErrCodeExt)。
:type ErrCode: int
:param Message: 错误信息。
:type Message: str
:param Input: 智能按帧标签任务输入。
:type Input: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskFrameTagInput`
:param Output: 智能按帧标签任务输出。
注意:此字段可能返回 null,表示取不到有效值。
:type Output: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskFrameTagOutput`
"""
self.Status = None
self.ErrCodeExt = None
self.ErrCode = None
self.Message = None
self.Input = None
self.Output = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCodeExt = params.get("ErrCodeExt")
self.ErrCode = params.get("ErrCode")
self.Message = params.get("Message")
if params.get("Input") is not None:
self.Input = AiAnalysisTaskFrameTagInput()
self.Input._deserialize(params.get("Input"))
if params.get("Output") is not None:
self.Output = AiAnalysisTaskFrameTagOutput()
self.Output._deserialize(params.get("Output"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskHighlightInput(AbstractModel):
"""智能精彩片段任务输入类型
"""
def __init__(self):
"""
:param Definition: 视频智能精彩片段模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskHighlightOutput(AbstractModel):
"""智能精彩片段结果信息
"""
def __init__(self):
"""
:param HighlightSet: 视频智能精彩片段列表。
:type HighlightSet: list of MediaAiAnalysisHighlightItem
"""
self.HighlightSet = None
def _deserialize(self, params):
if params.get("HighlightSet") is not None:
self.HighlightSet = []
for item in params.get("HighlightSet"):
obj = MediaAiAnalysisHighlightItem()
obj._deserialize(item)
self.HighlightSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskHighlightResult(AbstractModel):
"""智能精彩片段结果类型
"""
def __init__(self):
"""
:param Status: 任务状态,有 PROCESSING,SUCCESS 和 FAIL 三种。
:type Status: str
:param ErrCodeExt: 错误码,空字符串表示成功,其他值表示失败,取值请参考 [视频处理类错误码](https://cloud.tencent.com/document/product/266/50368#.E8.A7.86.E9.A2.91.E5.A4.84.E7.90.86.E7.B1.BB.E9.94.99.E8.AF.AF.E7.A0.81) 列表。
:type ErrCodeExt: str
:param ErrCode: 错误码,0 表示成功,其他值表示失败(该字段已不推荐使用,建议使用新的错误码字段 ErrCodeExt)。
:type ErrCode: int
:param Message: 错误信息。
:type Message: str
:param Input: 智能精彩片段任务输入。
:type Input: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskHighlightInput`
:param Output: 智能精彩片段任务输出。
注意:此字段可能返回 null,表示取不到有效值。
:type Output: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskHighlightOutput`
"""
self.Status = None
self.ErrCodeExt = None
self.ErrCode = None
self.Message = None
self.Input = None
self.Output = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCodeExt = params.get("ErrCodeExt")
self.ErrCode = params.get("ErrCode")
self.Message = params.get("Message")
if params.get("Input") is not None:
self.Input = AiAnalysisTaskHighlightInput()
self.Input._deserialize(params.get("Input"))
if params.get("Output") is not None:
self.Output = AiAnalysisTaskHighlightOutput()
self.Output._deserialize(params.get("Output"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskInput(AbstractModel):
"""AI 视频智能分析输入参数类型
"""
def __init__(self):
"""
:param Definition: 视频内容分析模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskTagInput(AbstractModel):
"""智能标签任务输入类型
"""
def __init__(self):
"""
:param Definition: 视频智能标签模板 ID。
:type Definition: int
"""
self.Definition = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskTagOutput(AbstractModel):
"""智能标签结果信息
"""
def __init__(self):
"""
:param TagSet: 视频智能标签列表。
:type TagSet: list of MediaAiAnalysisTagItem
"""
self.TagSet = None
def _deserialize(self, params):
if params.get("TagSet") is not None:
self.TagSet = []
for item in params.get("TagSet"):
obj = MediaAiAnalysisTagItem()
obj._deserialize(item)
self.TagSet.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiAnalysisTaskTagResult(AbstractModel):
"""智能标签结果类型
"""
def __init__(self):
"""
:param Status: 任务状态,有 PROCESSING,SUCCESS 和 FAIL 三种。
:type Status: str
:param ErrCodeExt: 错误码,空字符串表示成功,其他值表示失败,取值请参考 [视频处理类错误码](https://cloud.tencent.com/document/product/266/50368#.E8.A7.86.E9.A2.91.E5.A4.84.E7.90.86.E7.B1.BB.E9.94.99.E8.AF.AF.E7.A0.81) 列表。
:type ErrCodeExt: str
:param ErrCode: 错误码,0 表示成功,其他值表示失败(该字段已不推荐使用,建议使用新的错误码字段 ErrCodeExt)。
:type ErrCode: int
:param Message: 错误信息。
:type Message: str
:param Input: 智能标签任务输入。
:type Input: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskTagInput`
:param Output: 智能标签任务输出。
注意:此字段可能返回 null,表示取不到有效值。
:type Output: :class:`tencentcloud.vod.v20180717.models.AiAnalysisTaskTagOutput`
"""
self.Status = None
self.ErrCodeExt = None
self.ErrCode = None
self.Message = None
self.Input = None
self.Output = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrCodeExt = params.get("ErrCodeExt")
self.ErrCode = params.get("ErrCode")
self.Message = params.get("Message")
if params.get("Input") is not None:
self.Input = AiAnalysisTaskTagInput()
self.Input._deserialize(params.get("Input"))
if params.get("Output") is not None:
self.Output = AiAnalysisTaskTagOutput()
self.Output._deserialize(params.get("Output"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AiContentReviewResult(AbstractModel):
"""内容审核结果
"""
def __init__(self):
"""
:param Type: 任务的类型,可以取的值有:
<li>Porn:图片鉴黄</li>
<li>Terrorism:图片鉴恐</li>
<li>Political:图片鉴政</li>
<li>Porn.Asr:Asr 文字鉴黄</li>
<li>Porn.Ocr:Ocr 文字鉴黄</li>
<li>Political.Asr:Asr 文字鉴政</li>
<li>Political.Ocr:Ocr 文字鉴政</li>
<li>Terrorism.Ocr:Ocr 文字鉴恐</li>
<li>Prohibited.Asr:Asr 文字鉴违禁</li>
<li>Prohibited.Ocr:Ocr 文字鉴违禁</li>
:type Type: str
:param PornTask: 视频内容审核智能画面鉴黄任务的查询结果,当任务类型为 Porn 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PornTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPornResult`
:param TerrorismTask: 视频内容审核智能画面鉴恐任务的查询结果,当任务类型为 Terrorism 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type TerrorismTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskTerrorismResult`
:param PoliticalTask: 视频内容审核智能画面鉴政任务的查询结果,当任务类型为 Political 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PoliticalTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPoliticalResult`
:param PornAsrTask: 视频内容审核 Asr 文字鉴黄任务的查询结果,当任务类型为 Porn.Asr 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PornAsrTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPornAsrResult`
:param PornOcrTask: 视频内容审核 Ocr 文字鉴黄任务的查询结果,当任务类型为 Porn.Ocr 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PornOcrTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPornOcrResult`
:param PoliticalAsrTask: 视频内容审核 Asr 文字鉴政任务的查询结果,当任务类型为 Political.Asr 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PoliticalAsrTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPoliticalAsrResult`
:param PoliticalOcrTask: 视频内容审核 Ocr 文字鉴政任务的查询结果,当任务类型为 Political.Ocr 时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type PoliticalOcrTask: :class:`tencentcloud.vod.v20180717.models.AiReviewTaskPoliticalOcrResult`
:param TerrorismOcrTask: 视频内容审核 Ocr | |
import argparse
import cv2 as cv
import numpy as np
import os
"""
Link to original paper : https://arxiv.org/abs/1812.11703
Link to original repo : https://github.com/STVIR/pysot
You can download the pre-trained weights of the Tracker Model from https://drive.google.com/file/d/11bwgPFVkps9AH2NOD1zBDdpF_tQghAB-/view?usp=sharing
You can download the target net (target branch of SiamRPN++) from https://drive.google.com/file/d/1dw_Ne3UMcCnFsaD6xkZepwE4GEpqq7U_/view?usp=sharing
You can download the search net (search branch of SiamRPN++) from https://drive.google.com/file/d/1Lt4oE43ZSucJvze3Y-Z87CVDreO-Afwl/view?usp=sharing
You can download the head model (RPN Head) from https://drive.google.com/file/d/1zT1yu12mtj3JQEkkfKFJWiZ71fJ-dQTi/view?usp=sharing
"""
class ModelBuilder():
""" This class generates the SiamRPN++ Tracker Model by using Imported ONNX Nets
"""
def __init__(self, target_net, search_net, rpn_head):
super(ModelBuilder, self).__init__()
# Build the target branch
self.target_net = target_net
# Build the search branch
self.search_net = search_net
# Build RPN_Head
self.rpn_head = rpn_head
def template(self, z):
""" Takes the template of size (1, 1, 127, 127) as an input to generate kernel
"""
self.target_net.setInput(z)
outNames = self.target_net.getUnconnectedOutLayersNames()
self.zfs_1, self.zfs_2, self.zfs_3 = self.target_net.forward(outNames)
def track(self, x):
""" Takes the search of size (1, 1, 255, 255) as an input to generate classification score and bounding box regression
"""
self.search_net.setInput(x)
outNames = self.search_net.getUnconnectedOutLayersNames()
xfs_1, xfs_2, xfs_3 = self.search_net.forward(outNames)
self.rpn_head.setInput(np.stack([self.zfs_1, self.zfs_2, self.zfs_3]), 'input_1')
self.rpn_head.setInput(np.stack([xfs_1, xfs_2, xfs_3]), 'input_2')
outNames = self.rpn_head.getUnconnectedOutLayersNames()
cls, loc = self.rpn_head.forward(outNames)
return {'cls': cls, 'loc': loc}
class Anchors:
""" This class generate anchors.
"""
def __init__(self, stride, ratios, scales, image_center=0, size=0):
self.stride = stride
self.ratios = ratios
self.scales = scales
self.image_center = image_center
self.size = size
self.anchor_num = len(self.scales) * len(self.ratios)
self.anchors = self.generate_anchors()
def generate_anchors(self):
"""
generate anchors based on predefined configuration
"""
anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)
size = self.stride**2
count = 0
for r in self.ratios:
ws = int(np.sqrt(size * 1. / r))
hs = int(ws * r)
for s in self.scales:
w = ws * s
h = hs * s
anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]
count += 1
return anchors
class SiamRPNTracker:
def __init__(self, model):
super(SiamRPNTracker, self).__init__()
self.anchor_stride = 8
self.anchor_ratios = [0.33, 0.5, 1, 2, 3]
self.anchor_scales = [8]
self.track_base_size = 8
self.track_context_amount = 0.5
self.track_exemplar_size = 127
self.track_instance_size = 255
self.track_lr = 0.4
self.track_penalty_k = 0.04
self.track_window_influence = 0.44
self.score_size = (self.track_instance_size - self.track_exemplar_size) // \
self.anchor_stride + 1 + self.track_base_size
self.anchor_num = len(self.anchor_ratios) * len(self.anchor_scales)
hanning = np.hanning(self.score_size)
window = np.outer(hanning, hanning)
self.window = np.tile(window.flatten(), self.anchor_num)
self.anchors = self.generate_anchor(self.score_size)
self.model = model
def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):
"""
Args:
im: bgr based input image frame
pos: position of the center of the frame
model_sz: exemplar / target image size
s_z: original / search image size
avg_chans: channel average
Return:
im_patch: sub_windows for the given image input
"""
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_h, im_w, im_d = im.shape
c = (original_sz + 1) / 2
cx, cy = pos
context_xmin = np.floor(cx - c + 0.5)
context_xmax = context_xmin + sz - 1
context_ymin = np.floor(cy - c + 0.5)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_w + 1))
bottom_pad = int(max(0., context_ymax - im_h + 1))
context_xmin += left_pad
context_xmax += left_pad
context_ymin += top_pad
context_ymax += top_pad
if any([top_pad, bottom_pad, left_pad, right_pad]):
size = (im_h + top_pad + bottom_pad, im_w + left_pad + right_pad, im_d)
te_im = np.zeros(size, np.uint8)
te_im[top_pad:top_pad + im_h, left_pad:left_pad + im_w, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + im_w, :] = avg_chans
if bottom_pad:
te_im[im_h + top_pad:, left_pad:left_pad + im_w, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, im_w + left_pad:, :] = avg_chans
im_patch = te_im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
else:
im_patch = im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv.resize(im_patch, (model_sz, model_sz))
im_patch = im_patch.transpose(2, 0, 1)
im_patch = im_patch[np.newaxis, :, :, :]
im_patch = im_patch.astype(np.float32)
return im_patch
def generate_anchor(self, score_size):
"""
Args:
im: bgr based input image frame
pos: position of the center of the frame
model_sz: exemplar / target image size
s_z: original / search image size
avg_chans: channel average
Return:
anchor: anchors for pre-determined values of stride, ratio, and scale
"""
anchors = Anchors(self.anchor_stride, self.anchor_ratios, self.anchor_scales)
anchor = anchors.anchors
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1], 1)
total_stride = anchors.stride
anchor_num = anchors.anchor_num
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def _convert_bbox(self, delta, anchor):
"""
Args:
delta: localisation
anchor: anchor of pre-determined anchor size
Return:
delta: prediction of bounding box
"""
delta_transpose = np.transpose(delta, (1, 2, 3, 0))
delta_contig = np.ascontiguousarray(delta_transpose)
delta = delta_contig.reshape(4, -1)
delta[0, :] = delta[0, :] * anchor[:, 2] + anchor[:, 0]
delta[1, :] = delta[1, :] * anchor[:, 3] + anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * anchor[:, 3]
return delta
def _softmax(self, x):
"""
Softmax in the direction of the depth of the layer
"""
x = x.astype(dtype=np.float32)
x_max = x.max(axis=1)[:, np.newaxis]
e_x = np.exp(x-x_max)
div = np.sum(e_x, axis=1)[:, np.newaxis]
y = e_x / div
return y
def _convert_score(self, score):
"""
Args:
cls: score
Return:
cls: score for cls
"""
score_transpose = np.transpose(score, (1, 2, 3, 0))
score_con = np.ascontiguousarray(score_transpose)
score_view = score_con.reshape(2, -1)
score = np.transpose(score_view, (1, 0))
score = self._softmax(score)
return score[:,1]
def _bbox_clip(self, cx, cy, width, height, boundary):
"""
Adjusting the bounding box
"""
bbox_h, bbox_w = boundary
cx = max(0, min(cx, bbox_w))
cy = max(0, min(cy, bbox_h))
width = max(10, min(width, bbox_w))
height = max(10, min(height, bbox_h))
return cx, cy, width, height
def init(self, img, bbox):
"""
Args:
img(np.ndarray): bgr based input image frame
bbox: (x, y, w, h): bounding box
"""
x, y, w, h = bbox
self.center_pos = np.array([x + (w - 1) / 2, y + (h - 1) / 2])
self.h = h
self.w = w
w_z = self.w + self.track_context_amount * np.add(h, w)
h_z = self.h + self.track_context_amount * np.add(h, w)
s_z = round(np.sqrt(w_z * h_z))
self.channel_average = np.mean(img, axis=(0, 1))
z_crop = self.get_subwindow(img, self.center_pos, self.track_exemplar_size, s_z, self.channel_average)
self.model.template(z_crop)
def track(self, img):
"""
Args:
img(np.ndarray): BGR image
Return:
bbox(list):[x, y, width, height]
"""
w_z = self.w + self.track_context_amount * np.add(self.w, self.h)
h_z = self.h + self.track_context_amount * np.add(self.w, self.h)
s_z = np.sqrt(w_z * h_z)
scale_z = self.track_exemplar_size / s_z
s_x = s_z * (self.track_instance_size / self.track_exemplar_size)
x_crop = self.get_subwindow(img, self.center_pos, self.track_instance_size, round(s_x), self.channel_average)
outputs = self.model.track(x_crop)
score = self._convert_score(outputs['cls'])
pred_bbox = self._convert_bbox(outputs['loc'], self.anchors)
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
return np.sqrt((w + pad) * (h + pad))
# scale penalty
s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
(sz(self.w * scale_z, self.h * scale_z)))
# aspect ratio penalty
r_c = change((self.w / self.h) /
(pred_bbox[2, :] / pred_bbox[3, :]))
penalty = np.exp(-(r_c * s_c - 1) * self.track_penalty_k)
pscore = penalty * score
# window penalty
pscore = pscore * (1 - self.track_window_influence) + \
self.window * self.track_window_influence
best_idx = np.argmax(pscore)
bbox = pred_bbox[:, best_idx] / scale_z
lr = penalty[best_idx] * score[best_idx] * self.track_lr
cpx, cpy = self.center_pos
x,y,w,h = bbox
cx = x + cpx
cy = y + cpy
# smooth bbox
width = self.w * (1 - lr) + w * lr
height = self.h * (1 - lr) + h * lr
# clip boundary
cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])
# udpate state
self.center_pos = np.array([cx, cy])
self.w = width
self.h = height
bbox = [cx - width / 2, cy - height | |
import re
import math
import numpy as np
class UpstreamAUG:
def __init__(self, allow_ORF=True, verbose_output=False):
"""
Constructor
:param allow_ORF: bool, True by default, whether to check uORFs
:param verbose_output: bool, False by default, whether to return dictionaries in predict_on_sample() and predict_on_batch() methods or not
"""
self.allow_ORF = allow_ORF
self.verbose_output = verbose_output
pass
def predict_on_sample(self, seq):
"""
Predict_on_sample
:param seq: string, 5'UTR's sequence
:return: if verbose_output: dictionary:
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos(self, seq):
"""
In comparison to predict_on_sample(), additionally returns the positions of AUGs
:param seq: string utr's sequence
:return: if verbose_output: dictionary
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
third - pos of the ATG
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0],
"pos": [38, 190, 438, 769, 981]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
ATG_pos = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
ATG_pos.append(ATG.start())
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF), "pos": np.array(ATG_pos)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", "in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
else:
list_11.append(ATG.start() + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
pass
def predict_on_sample_with_stop_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", \
"in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
list_01.append(ORF + start)
else:
list_11.append(ATG.start() + start)
list_11.append(ORF + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
list_01.append(start + (len(seq) - ORF) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
list_11.append(start + (len(seq) - ORF) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ | |
<gh_stars>1-10
"""
Steenrod algebra bases
AUTHORS:
- <NAME> (2008-07-30): version 0.9
- <NAME> (2010-06-30): version 1.0
- <NAME> (2011-10-25): Fix the use of cached functions
This package defines functions for computing various bases of the
Steenrod algebra, and for converting between the Milnor basis and
any other basis.
This packages implements a number of different bases, at least at
the prime 2. The Milnor and Serre-Cartan bases are the most
familiar and most standard ones, and all of the others are defined
in terms of one of these. The bases are described in the
documentation for the function
:func:`steenrod_algebra_basis`; also see the papers by
Monks [Mon1998]_ and Wood [Woo1998]_ for more information about them. For
commutator bases, see the preprint by Palmieri and Zhang [PZ2008]_.
- 'milnor': Milnor basis.
- 'serre-cartan' or 'adem' or 'admissible': Serre-Cartan basis.
Most of the rest of the bases are only defined when `p=2`. The only
exceptions are the `P^s_t`-bases and the commutator bases, which are
defined at all primes.
- 'wood_y': Wood's Y basis.
- 'wood_z': Wood's Z basis.
- 'wall', 'wall_long': Wall's basis.
- 'arnon_a', 'arnon_a_long': Arnon's A basis.
- 'arnon_c': Arnon's C basis.
- 'pst', 'pst_rlex', 'pst_llex', 'pst_deg', 'pst_revz':
various `P^s_t`-bases.
- 'comm', 'comm_rlex', 'comm_llex', 'comm_deg', 'comm_revz',
or these with '_long' appended: various commutator bases.
The main functions provided here are
- :func:`steenrod_algebra_basis`. This computes a tuple representing
basis elements for the Steenrod algebra in a given degree, at a
given prime, with respect to a given basis. It is a cached function.
- :func:`convert_to_milnor_matrix`. This returns the change-of-basis
matrix, in a given degree, from any basis to the Milnor basis. It is
a cached function.
- :func:`convert_from_milnor_matrix`. This returns the inverse of the
previous matrix.
INTERNAL DOCUMENTATION:
If you want to implement a new basis for the Steenrod algebra:
In the file :file:`steenrod_algebra.py`:
For the class :class:`SteenrodAlgebra_generic
<sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic>`, add functionality to the
methods:
- :meth:`_repr_term <sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic._repr_term>`
- :meth:`degree_on_basis <sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic.degree_on_basis>`
- :meth:`_milnor_on_basis <sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic._milnor_on_basis>`
- :meth:`an_element <sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic.an_element>`
In the file :file:`steenrod_algebra_misc.py`:
- add functionality to :func:`get_basis_name
<sage.algebras.steenrod.steenrod_algebra_misc.get_basis_name>`: this
should accept as input various synonyms for the basis, and its
output should be a canonical name for the basis.
- add a function ``BASIS_mono_to_string`` like
:func:`milnor_mono_to_string
<sage.algebras.steenrod.steenrod_algebra_misc.milnor_mono_to_string>`
or one of the other similar functions.
In this file :file:`steenrod_algebra_bases.py`:
- add appropriate lines to :func:`steenrod_algebra_basis`.
- add a function to compute the basis in a given dimension (to be
called by :func:`steenrod_algebra_basis`).
- modify :func:`steenrod_basis_error_check` so it checks the new
basis.
If the basis has an intrinsic way of defining a product, implement it
in the file :file:`steenrod_algebra_mult.py` and also in the
:meth:`product_on_basis
<sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic.product_on_basis>`
method for :class:`SteenrodAlgebra_generic
<sage.algebras.steenrod.steenrod_algebra.SteenrodAlgebra_generic>` in
:file:`steenrod_algebra.py`.
"""
from __future__ import absolute_import, division
#*****************************************************************************
# Copyright (C) 2008-2010 <NAME> <<EMAIL>>
# Distributed under the terms of the GNU General Public License (GPL)
#*****************************************************************************
from sage.misc.cachefunc import cached_function
@cached_function
def convert_to_milnor_matrix(n, basis, p=2, generic='auto'):
r"""
Change-of-basis matrix, 'basis' to Milnor, in dimension
`n`, at the prime `p`.
INPUT:
- ``n`` - non-negative integer, the dimension
- ``basis`` - string, the basis from which to convert
- ``p`` - positive prime number (optional, default 2)
OUTPUT:
``matrix`` - change-of-basis matrix, a square matrix over ``GF(p)``
EXAMPLES::
sage: from sage.algebras.steenrod.steenrod_algebra_bases import convert_to_milnor_matrix
sage: convert_to_milnor_matrix(5, 'adem') # indirect doctest
[0 1]
[1 1]
sage: convert_to_milnor_matrix(45, 'milnor')
111 x 111 dense matrix over Finite Field of size 2 (use the '.str()' method to see the entries)
sage: convert_to_milnor_matrix(12,'wall')
[1 0 0 1 0 0 0]
[1 1 0 0 0 1 0]
[0 1 0 1 0 0 0]
[0 0 0 1 0 0 0]
[1 1 0 0 1 0 0]
[0 0 1 1 1 0 1]
[0 0 0 0 1 0 1]
The function takes an optional argument, the prime `p` over
which to work::
sage: convert_to_milnor_matrix(17,'adem',3)
[0 0 1 1]
[0 0 0 1]
[1 1 1 1]
[0 1 0 1]
sage: convert_to_milnor_matrix(48,'adem',5)
[0 1]
[1 1]
sage: convert_to_milnor_matrix(36,'adem',3)
[0 0 1]
[0 1 0]
[1 2 0]
"""
from sage.matrix.constructor import matrix
from sage.rings.all import GF
from .steenrod_algebra import SteenrodAlgebra
if generic == 'auto':
generic = False if p==2 else True
if n == 0:
return matrix(GF(p), 1, 1, [[1]])
milnor_base = steenrod_algebra_basis(n,'milnor',p, generic=generic)
rows = []
A = SteenrodAlgebra(basis=basis, p=p, generic=generic)
for poly in A.basis(n):
d = poly.milnor().monomial_coefficients()
for v in milnor_base:
entry = d.get(v, 0)
rows = rows + [entry]
d = len(milnor_base)
return matrix(GF(p),d,d,rows)
def convert_from_milnor_matrix(n, basis, p=2, generic='auto'):
r"""
Change-of-basis matrix, Milnor to 'basis', in dimension
`n`.
INPUT:
- ``n`` - non-negative integer, the dimension
- ``basis`` - string, the basis to which to convert
- ``p`` - positive prime number (optional, default 2)
OUTPUT: ``matrix`` - change-of-basis matrix, a square matrix over
GF(p)
.. note::
This is called internally. It is not intended for casual
users, so no error checking is made on the integer `n`, the
basis name, or the prime.
EXAMPLES::
sage: from sage.algebras.steenrod.steenrod_algebra_bases import convert_from_milnor_matrix, convert_to_milnor_matrix
sage: convert_from_milnor_matrix(12,'wall')
[1 0 0 1 0 0 0]
[0 0 1 1 0 0 0]
[0 0 0 1 0 1 1]
[0 0 0 1 0 0 0]
[1 0 1 0 1 0 0]
[1 1 1 0 0 0 0]
[1 0 1 0 1 0 1]
sage: convert_from_milnor_matrix(38,'serre_cartan')
72 x 72 dense matrix over Finite Field of size 2 (use the '.str()' method to see the entries)
sage: x = convert_to_milnor_matrix(20,'wood_y')
sage: y = convert_from_milnor_matrix(20,'wood_y')
sage: x*y
[1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
The function takes an optional argument, the prime `p` over
which to work::
sage: convert_from_milnor_matrix(17,'adem',3)
[2 1 1 2]
[0 2 0 1]
[1 2 0 0]
[0 1 0 0]
"""
mat = convert_to_milnor_matrix(n,basis,p,generic)
if mat.nrows() != 0:
return convert_to_milnor_matrix(n,basis,p,generic).inverse()
else:
return mat
@cached_function
def steenrod_algebra_basis(n, basis='milnor', p=2, **kwds):
r"""
Basis for the Steenrod algebra in degree `n`.
INPUT:
- ``n`` - non-negative integer
- ``basis`` - string, which basis to use (optional, default = 'milnor')
- ``p`` - positive prime number (optional, default = 2)
- ``profile`` - profile function (optional, default None). This
is just passed on to the functions :func:`milnor_basis` and
:func:`pst_basis`.
- ``truncation_type`` - truncation type, either 0 or Infinity
(optional, default Infinity if no profile function is specified,
0 otherwise). This is just passed on to the function
:func:`milnor_basis`.
- ``generic`` - boolean (optional, default = None)
OUTPUT:
Tuple of objects representing basis elements for the Steenrod algebra
in dimension n.
The choices for the string ``basis`` | |
<filename>Tools/config.py<gh_stars>1-10
#=======================================================================
# Copyright <NAME> 2015.
# Distributed under the MIT License.
# (See accompanying file license.txt or copy at
# http://opensource.org/licenses/MIT)
#=======================================================================
#
# KiMony configuration file
#
from remote import *
from ir import *
from ui import *
from device import *
from activity import *
from remoteconfig import *
SCREEN_WIDTH = 240
SCREEN_HEIGHT = 320
BUTTON_COLUMNS = 4
BUTTON_ROWS = 6
BUTTON_WIDTH = SCREEN_WIDTH/BUTTON_COLUMNS
BUTTON_HEIGHT = SCREEN_HEIGHT/BUTTON_ROWS
BUTTON_GRID = [
[ 0x00400000, 0x00040000, 0x00200000, 0x00020000, 0x00100000, 0x00010000 ],
[ 0x00000000, 0x00000001, 0x00000010, 0x00000100, 0x00001000, 0x00000000 ],
[ 0x00000000, 0x00000002, 0x00000020, 0x00000200, 0x00002000, 0x00000000 ],
[ 0x00000000, 0x00000004, 0x00000040, 0x00000400, 0x00004000, 0x00000000 ],
[ 0x00000000, 0x00000008, 0x00000080, 0x00000800, 0x00008000, 0x00000000 ],
[ 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00080000 ],
]
remoteConfig = RemoteConfig()
sony_tv = Device("Sony TV")
sony_tv.create_action("powertoggle", [IrCode(IrEncoding_SIRC, 12, 0xA90), IrCode(IrEncoding_NOP, 0, 500)])
sony_tv.create_action("tvinput", [IrCode(IrEncoding_SIRC, 15, 0x58EE)])
sony_tv.create_action("hdmi1input", [IrCode(IrEncoding_SIRC, 15, 0x4D58)])
sony_tv.create_action("hdmi2input", [IrCode(IrEncoding_SIRC, 15, 0xCD58)])
sony_tv.create_action("hdmi3input", [IrCode(IrEncoding_SIRC, 15, 0x1D58)])
sony_tv.create_action("hdmi4input", [IrCode(IrEncoding_SIRC, 15, 0x5D58)])
sony_tv.create_action("nextinput", [IrCode(IrEncoding_SIRC, 12, 0xA50)])
sony_tv.create_action("numeric1", [IrCode(IrEncoding_SIRC, 12, 0x010)])
sony_tv.create_action("numeric2", [IrCode(IrEncoding_SIRC, 12, 0x810)])
sony_tv.create_action("numeric3", [IrCode(IrEncoding_SIRC, 12, 0x410)])
sony_tv.create_action("numeric4", [IrCode(IrEncoding_SIRC, 12, 0xC10)])
sony_tv.create_action("numeric5", [IrCode(IrEncoding_SIRC, 12, 0x210)])
sony_tv.create_action("numeric6", [IrCode(IrEncoding_SIRC, 12, 0xA10)])
sony_tv.create_action("numeric7", [IrCode(IrEncoding_SIRC, 12, 0x610)])
sony_tv.create_action("numeric8", [IrCode(IrEncoding_SIRC, 12, 0xE10)])
sony_tv.create_action("numeric9", [IrCode(IrEncoding_SIRC, 12, 0x110)])
sony_tv.create_action("numeric0", [IrCode(IrEncoding_SIRC, 12, 0x910)])
sony_tv.create_action("channel_up", [IrCode(IrEncoding_SIRC, 12, 0x090)])
sony_tv.create_action("channel_down", [IrCode(IrEncoding_SIRC, 12, 0x890)])
sony_tv.create_action("info", [IrCode(IrEncoding_SIRC, 12, 0x5D0)])
sony_tv.create_action("red", [IrCode(IrEncoding_SIRC, 15, 0x52E9)])
sony_tv.create_action("yellow", [IrCode(IrEncoding_SIRC, 15, 0x72E9)])
sony_tv.create_action("green", [IrCode(IrEncoding_SIRC, 15, 0x32E9)])
sony_tv.create_action("blue", [IrCode(IrEncoding_SIRC, 15, 0x12E9)])
sony_tv.create_action("guide", [IrCode(IrEncoding_SIRC, 15, 0x6D25)])
sony_tv.create_action("enter", [IrCode(IrEncoding_SIRC, 12, 0xA70)])
sony_tv.create_action("back", [IrCode(IrEncoding_SIRC, 12, 0xC70)])
sony_tv.create_action("home", [IrCode(IrEncoding_SIRC, 12, 0x070)])
sony_tv.create_action("options", [IrCode(IrEncoding_SIRC, 15, 0x36E9)])
sony_tv.create_action("up", [IrCode(IrEncoding_SIRC, 12, 0x2F0)])
sony_tv.create_action("down", [IrCode(IrEncoding_SIRC, 12, 0xAF0)])
sony_tv.create_action("left", [IrCode(IrEncoding_SIRC, 12, 0x2D0)])
sony_tv.create_action("right", [IrCode(IrEncoding_SIRC, 12, 0xCD0)])
sony_tv.create_action("tvpause", [IrCode(IrEncoding_SIRC, 15, 0x7358)])
sony_tv.create_action("pause", [IrCode(IrEncoding_SIRC, 15, 0x4CE9)])
sony_tv.create_action("play", [IrCode(IrEncoding_SIRC, 15, 0x2CE9)])
sony_tv.create_action("stop", [IrCode(IrEncoding_SIRC, 15, 0x0CE9)])
sony_tv.create_action("ffwd", [IrCode(IrEncoding_SIRC, 15, 0x1CE9)])
sony_tv.create_action("rewind", [IrCode(IrEncoding_SIRC, 15, 0x6CE9)])
sony_tv.create_option(name = "power", flags = Option_Cycled|Option_DefaultToZero|Option_ActionOnDefault, max_value = 1, change_action_names = ["powertoggle"])
sony_tv.create_option(name = "input", flags = Option_AlwaysSet, max_value = 4, change_action_names = ["tvinput", "hdmi1input", "hdmi2input", "hdmi3input", "hdmi4input"])
phillips_hts = Device("Phillips HTS")
phillips_hts.create_action("powertoggle", [IrCode(IrEncoding_NOP, 0, 250),
IrCode(IrEncoding_RC6, 21, 0xFFB38),
IrCode(IrEncoding_RC6, 21, 0xEFB38),
IrCode(IrEncoding_NOP, 0, 250)])
phillips_hts.create_action("volume_up", [IrCode(IrEncoding_RC6, 21, 0xEEFEF, 0x10000)])
phillips_hts.create_action("volume_down", [IrCode(IrEncoding_RC6, 21, 0xEEFEE, 0x10000)])
phillips_hts.create_action("mute", [IrCode(IrEncoding_RC6, 21, 0xEEFF2, 0x10000)])
phillips_hts.create_action("surround", [IrCode(IrEncoding_RC6, 21, 0xEEFAD, 0x10000), IrCode(IrEncoding_NOP, 0, 250)])
phillips_hts.create_action("source", [IrCode(IrEncoding_RC6, 21, 0xEEAC0, 0x10000)])
phillips_hts.create_option("power", Option_DefaultToZero|Option_ActionOnDefault, 1, ["powertoggle", "powertoggle"], post_delays = [0, 15000])
phillips_hts.create_option("surround", Option_Cycled|Option_DefaultToZero, 2, ["surround"], "surround")
# Note - has 8 post-data bits constant of 0x47
sony_bluray = Device("<NAME>")
sony_bluray.create_action("power-off", [IrCode(IrEncoding_SIRC, 20, 0xA8B47)])
sony_bluray.create_action("power-on", [IrCode(IrEncoding_SIRC, 20, 0xA8B47)])
sony_bluray.create_action("up", [IrCode(IrEncoding_SIRC, 20, 0x9CB47)])
sony_bluray.create_action("down", [IrCode(IrEncoding_SIRC, 20, 0x5CB47)])
sony_bluray.create_action("left", [IrCode(IrEncoding_SIRC, 20, 0xDCB47)])
sony_bluray.create_action("right", [IrCode(IrEncoding_SIRC, 20, 0x3CB47)])
sony_bluray.create_action("eject", [IrCode(IrEncoding_SIRC, 20, 0x68B47)])
sony_bluray.create_action("play", [IrCode(IrEncoding_SIRC, 20, 0x58B47)])
sony_bluray.create_action("stop", [IrCode(IrEncoding_SIRC, 20, 0x18B47)])
sony_bluray.create_action("ffwd", [IrCode(IrEncoding_SIRC, 20, 0x38B47)])
sony_bluray.create_action("rewind", [IrCode(IrEncoding_SIRC, 20, 0xD8B47)])
sony_bluray.create_action("home", [IrCode(IrEncoding_SIRC, 20, 0x42B47)])
sony_bluray.create_action("menu", [IrCode(IrEncoding_SIRC, 20, 0x34B47)])
sony_bluray.create_action("display", [IrCode(IrEncoding_SIRC, 20, 0x82B47)])
sony_bluray.create_option("power", Option_DefaultToZero|Option_ActionOnDefault, 1, ["power-off", "power-on"], post_delays = [5000, 25000])
sony_stereo = Device("<NAME>")
sony_stereo.create_action("power-on", [IrCode(IrEncoding_SIRC, 12, 0xF16)]) # Use tuner to turn on to force source to 0
sony_stereo.create_action("power-off", [IrCode(IrEncoding_SIRC, 12, 0xA81)])
sony_stereo.create_action("tuner", [IrCode(IrEncoding_SIRC, 12, 0xF16)])
sony_stereo.create_action("volume_up", [IrCode(IrEncoding_SIRC, 12, 0x481)])
sony_stereo.create_action("volume_down", [IrCode(IrEncoding_SIRC, 12, 0xC81)])
sony_stereo.create_action("function", [IrCode(IrEncoding_SIRC, 12, 0x966), IrCode(IrEncoding_NOP, 0, 125)])
sony_stereo.create_option("power", Option_DefaultToZero|Option_ActionOnDefault, 1, ["power-off", "power-on"], post_delays = [6000, 8000])
# values map to: tuner, md, cd, pc, opt, analog, tape
#sony_stereo.create_option("source", Option_Cycled|Option_AbsoluteFromZero|Option_AlwaysSet|Option_DefaultToZero, 6, ["tuner", "function"])
sony_stereo.create_option("source", Option_Cycled|Option_DefaultToZero, 6, ["function"])
remoteConfig.add_device(sony_tv)
remoteConfig.add_device(sony_bluray)
remoteConfig.add_device(sony_stereo)
remoteConfig.add_device(phillips_hts)
# Events
home_activity_event = remoteConfig.create_event("home", Event_HOME)
next_event = remoteConfig.create_event("next", Event_NEXTPAGE)
prev_event = remoteConfig.create_event("prev", Event_PREVPAGE)
download_event = remoteConfig.create_event("download", Event_DOWNLOAD)
poweroff_event = remoteConfig.create_event("power-off", Event_POWEROFF)
numeric1_event = remoteConfig.create_ir_event("tv-1", sony_tv, "numeric1")
numeric2_event = remoteConfig.create_ir_event("tv-2", sony_tv, "numeric2")
numeric3_event = remoteConfig.create_ir_event("tv-3", sony_tv, "numeric3")
numeric4_event = remoteConfig.create_ir_event("tv-4", sony_tv, "numeric4")
numeric5_event = remoteConfig.create_ir_event("tv-5", sony_tv, "numeric5")
numeric6_event = remoteConfig.create_ir_event("tv-6", sony_tv, "numeric6")
numeric7_event = remoteConfig.create_ir_event("tv-7", sony_tv, "numeric7")
numeric8_event = remoteConfig.create_ir_event("tv-8", sony_tv, "numeric8")
numeric9_event = remoteConfig.create_ir_event("tv-9", sony_tv, "numeric9")
numeric0_event = remoteConfig.create_ir_event("tv-0", sony_tv, "numeric0")
volume_up_event = remoteConfig.create_ir_event("vol-up", phillips_hts, "volume_up")
volume_down_event = remoteConfig.create_ir_event("vol-down", phillips_hts, "volume_down")
mute_event = remoteConfig.create_ir_event("mute", phillips_hts, "mute")
surround_event = remoteConfig.create_ir_event("surround", phillips_hts, "surround")
source_event = remoteConfig.create_ir_event("source", phillips_hts, "source")
channel_up_event = remoteConfig.create_ir_event("ch-up", sony_tv, "channel_up")
channel_down_event = remoteConfig.create_ir_event("ch-down", sony_tv, "channel_down")
info_event = remoteConfig.create_ir_event("info", sony_tv, "info")
next_input_event = remoteConfig.create_ir_event("next-input", sony_tv, "nextinput")
red_event = remoteConfig.create_ir_event("red", sony_tv, "red")
yellow_event = remoteConfig.create_ir_event("yellow", sony_tv, "yellow")
green_event = remoteConfig.create_ir_event("green", sony_tv, "green")
blue_event = remoteConfig.create_ir_event("blue", sony_tv, "blue")
guide_event = remoteConfig.create_ir_event("guide", sony_tv, "guide")
enter_event = remoteConfig.create_ir_event("enter", sony_tv, "enter")
back_event = remoteConfig.create_ir_event("back", sony_tv, "back")
home_event = remoteConfig.create_ir_event("tv-home", sony_tv, "home")
options_event = remoteConfig.create_ir_event("options", sony_tv, "options")
tv_play_event = remoteConfig.create_ir_event("tv-play", sony_tv, "play")
tv_stop_event = remoteConfig.create_ir_event("tv-stop", sony_tv, "stop")
tv_pause_event = remoteConfig.create_ir_event("tv-pause", sony_tv, "tvpause")
tv_ffwd_event = remoteConfig.create_ir_event("tv-ffwd", sony_tv, "ffwd")
tv_rewind_event = remoteConfig.create_ir_event("tv-rewind", sony_tv, "rewind")
pause_event = remoteConfig.create_ir_event("pause", sony_tv, "pause")
up_event = remoteConfig.create_ir_event("tv-up", sony_tv, "up")
down_event = remoteConfig.create_ir_event("tv-down", sony_tv, "down")
left_event = remoteConfig.create_ir_event("tv-left", sony_tv, "left")
right_event = remoteConfig.create_ir_event("tv-right", sony_tv, "right")
br_up_event = remoteConfig.create_ir_event("br-up", sony_bluray, "up")
br_down_event = remoteConfig.create_ir_event("br-down", sony_bluray, "down")
br_left_event = remoteConfig.create_ir_event("br-left", sony_bluray, "left")
br_right_event = remoteConfig.create_ir_event("br-right", sony_bluray, "right")
br_eject_event = remoteConfig.create_ir_event("br-eject", sony_bluray, "eject")
br_play_event = remoteConfig.create_ir_event("br-play", sony_bluray, "play")
br_stop_event = remoteConfig.create_ir_event("br-stop", sony_bluray, "stop")
br_ffwd_event = remoteConfig.create_ir_event("br-ffwd", sony_bluray, "ffwd")
br_rewind_event = remoteConfig.create_ir_event("br-rewind", sony_bluray, "rewind")
br_home_event = remoteConfig.create_ir_event("br-home", sony_bluray, "home")
br_menu_event = remoteConfig.create_ir_event("br-menu", sony_bluray, "menu")
br_display_event = remoteConfig.create_ir_event("br-display", sony_bluray, "display")
st_volume_up_event = remoteConfig.create_ir_event("st-vol-up", sony_stereo, "volume_up")
st_volume_down_event = remoteConfig.create_ir_event("st-vol-down", sony_stereo, "volume_down")
# Activities, button mappings and touch button pages
watch_tv_activity = Activity(name = "watch-tv")
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][0], tv_pause_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][1], options_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][2], info_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][4], home_activity_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[0][5], home_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[1][2], numeric1_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[1][3], numeric2_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[1][4], numeric3_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[2][2], numeric4_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[2][3], numeric5_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[2][4], numeric6_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[3][2], numeric7_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[3][3], numeric8_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[3][4], numeric9_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[4][2], mute_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[4][3], numeric0_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[4][4], surround_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[3][1], volume_up_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[4][1], volume_down_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[1][1], channel_up_event)
watch_tv_activity.create_button_mapping(BUTTON_GRID[2][1], channel_down_event)
watch_tv_activity.create_gesture_mapping(Gesture_TAP, mute_event)
watch_tv_activity.create_gesture_mapping(Gesture_DRAGLEFT, volume_up_event)
watch_tv_activity.create_gesture_mapping(Gesture_DRAGRIGHT, volume_down_event)
watch_tv_activity.create_gesture_mapping(Gesture_SWIPELEFT, channel_up_event)
watch_tv_activity.create_gesture_mapping(Gesture_SWIPERIGHT, channel_down_event)
watch_tv_activity.create_touch_button_page([
TouchButton(guide_event, "Guide", 0, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(enter_event, "Enter", BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(back_event, "Back", 2*BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(home_event, "Home", 3*BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(red_event, None, 0, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf800, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Red"),
TouchButton(green_event, None, BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0x07e0, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Green"),
TouchButton(yellow_event, None, 2*BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xffe0, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Yellow"),
TouchButton(blue_event, None, 3*BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0x001f, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Blue"),
TouchButton(up_event, "U", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 2*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(down_event, "D", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 3*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(left_event, "L", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 - BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(right_event, "R", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 + BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_play_event, "Play", 0, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_stop_event, "Stop", BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(next_input_event, "Input", 2*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(source_event, "Srce", 3*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
])
watch_tv_activity.create_device_state(sony_tv, { "power": 1, "input": 0 })
watch_tv_activity.create_device_state(phillips_hts, { "power": 1, "surround": 2 })
watch_video_activity = Activity(name = "watch-video")
watch_video_activity.create_button_mapping(BUTTON_GRID[0][0], pause_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[0][4], home_activity_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[4][2], mute_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[4][4], surround_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[3][1], volume_up_event)
watch_video_activity.create_button_mapping(BUTTON_GRID[4][1], volume_down_event)
watch_video_activity.create_gesture_mapping(Gesture_TAP, pause_event)
watch_video_activity.create_gesture_mapping(Gesture_DRAGLEFT, volume_up_event)
watch_video_activity.create_gesture_mapping(Gesture_DRAGRIGHT, volume_down_event)
watch_video_activity.create_gesture_mapping(Gesture_SWIPELEFT, left_event)
watch_video_activity.create_gesture_mapping(Gesture_SWIPERIGHT, right_event)
watch_video_activity.create_touch_button_page([
TouchButton(guide_event, "Guide", 0, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(enter_event, "Enter", BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(back_event, "Back", 2*BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(home_event, "Home", 3*BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(red_event, None, 0, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf800, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Red"),
TouchButton(green_event, None, BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0x07e0, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Green"),
TouchButton(yellow_event, None, 2*BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xffe0, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Yellow"),
TouchButton(blue_event, None, 3*BUTTON_WIDTH, BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0x001f, TouchButton.FLAGS_PRESS_ACTIVATE, name = "Blue"),
TouchButton(up_event, "U", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 2*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(down_event, "D", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 3*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(left_event, "L", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 - BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(right_event, "R", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 + BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_play_event, "Play", 0, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_stop_event, "Stop", BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_rewind_event, "<<", 2*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(tv_ffwd_event, ">>", 3*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
])
watch_video_activity.create_device_state(sony_tv, { "power": 1, "input": 4 })
watch_video_activity.create_device_state(phillips_hts, { "power": 1, "surround": 2 })
watch_movie_activity = Activity(name = "watch-movie")
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][0], pause_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][1], br_menu_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][2], br_display_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][4], home_activity_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[0][5], br_home_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[3][1], volume_up_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[4][1], volume_down_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[4][4], surround_event)
watch_movie_activity.create_button_mapping(BUTTON_GRID[4][2], mute_event)
watch_movie_activity.create_gesture_mapping(Gesture_TAP, pause_event)
watch_movie_activity.create_gesture_mapping(Gesture_DRAGLEFT, volume_up_event)
watch_movie_activity.create_gesture_mapping(Gesture_DRAGRIGHT, volume_down_event)
watch_movie_activity.create_gesture_mapping(Gesture_SWIPELEFT, left_event)
watch_movie_activity.create_gesture_mapping(Gesture_SWIPERIGHT, right_event)
TRANSPORT_BUTTON_WIDTH = 55
TRANSPORT_BUTTON_HEIGHT = 55
watch_movie_activity.create_touch_button_page([
TouchButton(br_menu_event, "Menu", 0, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_stop_event, None, 1*BUTTON_WIDTH + ((BUTTON_WIDTH - TRANSPORT_BUTTON_WIDTH) / 2), 0, TRANSPORT_BUTTON_WIDTH, TRANSPORT_BUTTON_HEIGHT, 0x0000, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_NO_BORDER, "Resources/stop-button-3.png", "Resources/stop-button-3-pressed.png"),
TouchButton(br_play_event, None, 2*BUTTON_WIDTH + ((BUTTON_WIDTH - TRANSPORT_BUTTON_WIDTH) / 2), 0, TRANSPORT_BUTTON_WIDTH, TRANSPORT_BUTTON_HEIGHT, 0x0000, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_NO_BORDER, "Resources/play-button-3.png", "Resources/play-button-3-pressed.png"),
TouchButton(br_home_event, "Home", 3*BUTTON_WIDTH, 0, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_up_event, "U", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 2*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_down_event, "D", (SCREEN_WIDTH - BUTTON_WIDTH) / 2, 3*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_left_event, "L", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 - BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_right_event, "R", (SCREEN_WIDTH - BUTTON_WIDTH) / 2 + BUTTON_WIDTH, int(2.5*BUTTON_HEIGHT), BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(enter_event, "Enter", 0, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_rewind_event, "<<", BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_ffwd_event, ">>", 2*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(br_eject_event, "Eject", 3*BUTTON_WIDTH, 4*BUTTON_HEIGHT, BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_PRESS_ACTIVATE|TouchButton.FLAGS_CENTRE_TEXT),
])
watch_movie_activity.create_device_state(sony_tv, { "power": 1, "input": 1 })
watch_movie_activity.create_device_state(sony_bluray, { "power": 1 })
watch_movie_activity.create_device_state(phillips_hts, { "power": 1, "surround": 2 })
listen_cd_activity = Activity(name = "listen-cd")
listen_cd_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
listen_cd_activity.create_button_mapping(BUTTON_GRID[0][4], home_activity_event)
listen_cd_activity.create_button_mapping(BUTTON_GRID[3][1], st_volume_up_event)
listen_cd_activity.create_button_mapping(BUTTON_GRID[4][1], st_volume_down_event)
listen_cd_activity.create_device_state(sony_stereo, { "power": 1, "source": 2 })
listen_radio_activity = Activity(name = "listen-radio")
listen_radio_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
listen_radio_activity.create_button_mapping(BUTTON_GRID[0][4], home_activity_event)
listen_radio_activity.create_button_mapping(BUTTON_GRID[3][1], st_volume_up_event)
listen_radio_activity.create_button_mapping(BUTTON_GRID[4][1], st_volume_down_event)
listen_radio_activity.create_device_state(sony_stereo, { "power": 1, "source": 0 })
watch_tv_event = remoteConfig.create_activity_event("watch-tv", watch_tv_activity)
watch_video_event = remoteConfig.create_activity_event("watch-video", watch_video_activity)
watch_movie_event = remoteConfig.create_activity_event("watch-movie", watch_movie_activity)
listen_cd_event = remoteConfig.create_activity_event("listen-cd", listen_cd_activity)
listen_radio_event = remoteConfig.create_activity_event("listen-radio", listen_radio_activity)
home_activity = Activity(name = "home", flags = Activity_NoDevices)
home_activity.create_button_mapping(BUTTON_GRID[0][0], prev_event)
home_activity.create_button_mapping(BUTTON_GRID[0][5], next_event)
home_activity.create_button_mapping(BUTTON_GRID[0][3], poweroff_event)
home_activity.create_touch_button_page([
TouchButton(watch_tv_event, "Watch TV", 0, 0*BUTTON_HEIGHT, 4*BUTTON_WIDTH, BUTTON_HEIGHT, 0xf9e0, TouchButton.FLAGS_CENTRE_TEXT),
TouchButton(watch_video_event, "Watch Video", 0, 1*BUTTON_HEIGHT, 4*BUTTON_WIDTH, | |
r"""
=========================================================
Utilities Abaqus (:mod:`desicos.abaqus.abaqus_functions`)
=========================================================
.. currentmodule:: desicos.abaqus.abaqus_functions
Includes all utilities functions that must be executed from Abaqus.
"""
from __future__ import absolute_import
import math
import numpy as np
from .constants import (TOL, FLOAT, COLORS, COLOR_WHINE, COLOR_DARK_BLUE,
COLOR_BLACK)
from . import utils
def configure_session():
"""Improve layout and colors of the current figures in visualization
"""
from abaqus import session
from abaqusConstants import (ON, OFF, SMALL, DASHED, OUTSIDE,
HOLLOW_CIRCLE, DECIMAL, INCREMENT)
plot_names=session.xyDataObjects.keys()
if not 'XYPlot-1' in session.xyPlots.keys():
xyp=session.XYPlot('XYPlot-1')
else:
xyp=session.xyPlots['XYPlot-1']
chartName=xyp.charts.keys()[0]
chart=xyp.charts[chartName]
tmp=session.xyDataObjects.keys()
if len(tmp)==0:
return
xy1=session.xyDataObjects[tmp[0]]
c1=session.Curve(xyData=xy1)
chart.setValues(curvesToPlot=(c1,),)
session.viewports['Viewport: 1'].setValues(displayedObject=xyp)
chart=session.charts['Chart-1']
chart.minorAxis1GridStyle.setValues(show=True)
chart.majorAxis1GridStyle.setValues(show=True)
chart.majorAxis1GridStyle.setValues(style=DASHED)
chart.minorAxis2GridStyle.setValues(show=True)
chart.majorAxis2GridStyle.setValues(show=True)
chart.majorAxis2GridStyle.setValues(style=DASHED)
chart.gridArea.style.setValues(fill=False)
chart.legend.setValues(show=False) # necessary to update legend values
chart.legend.setValues(show=True)
chart.legend.area.setValues(inset=True)
chart.legend.area.setValues(originOffset=(0.,0.))
chart.legend.area.style.setValues(fill=True)
chart.legend.textStyle.setValues(
font='-*-arial narrow-medium-r-normal-*-*-480-*-*-p-*-*-*')
for name in plot_names:
c=session.Curve(xyData=session.xyDataObjects[name])
chart=session.xyPlots['XYPlot-1'].charts['Chart-1']
chart.setValues(curvesToPlot=(c,))
chart.fitCurves(fitAxes1=True, fitAxes2=True)
curve=session.charts['Chart-1'].curves[name]
curve.curveOptions.setValues(showSymbol=ON)
curve.curveOptions.setValues(symbolSize=SMALL)
curve.lineStyle.setValues(thickness=1.6)
curve.symbolStyle.setValues(size=5,
marker=HOLLOW_CIRCLE)
ax=chart.axes1[0]
ay=chart.axes2[0]
ax.labelStyle.setValues(
font='-*-arial narrow-bold-r-normal-*-*-480-*-*-p-*-*-*',
color=COLOR_BLACK)
ax.titleStyle.setValues(
font='-*-arial narrow-bold-r-normal-*-*-480-*-*-p-*-*-*',
color=COLOR_BLACK)
ay.labelStyle.setValues(
font='-*-arial narrow-bold-r-normal-*-*-480-*-*-p-*-*-*',
color=COLOR_BLACK)
ay.titleStyle.setValues(
font='-*-arial narrow-bold-r-normal-*-*-480-*-*-p-*-*-*',
color=COLOR_BLACK)
ax.setValues(tickPlacement=OUTSIDE)
ax.axisData.setValues(labelFormat=DECIMAL,
labelNumDigits=0,
minorTickCount=4,)
ay.setValues(tickPlacement=OUTSIDE)
ay.axisData.setValues(labelFormat=DECIMAL,
labelNumDigits=0,)
if ax.axisData.title.find('ispl')>-1:
ax.axisData.setValues(labelNumDigits=1)
if name.find('circumference') > -1:
ax.axisData.setValues(tickMode=INCREMENT,
tickIncrement=20,
minorTickCount=0,
minAutoCompute=False,
minValue=-180,
maxAutoCompute=False,
maxValue=185)
#
if (name.find('FI_HSNFCCRT') > -1 or name.find('FI_HSNFTCRT') > -1
or name.find('FI_HSNMCCRT') > -1 or name.find('FI_HSNMTCRT') > -1
or name.find('FI_TSAIW') > -1):
ay.axisData.setValues(labelNumDigits=1,
minAutoCompute=False,
minValue=0,
maxAutoCompute=False,
maxValue=2)
curve.lineStyle.setValues(thickness=1.6,
color=COLOR_WHINE)
curve.curveOptions.setValues(showSymbol=OFF)
ay.titleStyle.setValues(color=COLOR_WHINE)
ay.labelStyle.setValues(color=COLOR_WHINE)
#
if (name.find('MS_HSNFCCRT') > -1 or name.find('MS_HSNFTCRT') > -1
or name.find('MS_HSNMCCRT') > -1 or name.find('MS_HSNMTCRT') > -1
or name.find('MS_TSAIW') > -1
or name.find('MS_MAX') > -1 or name.find('MS_MIN') > -1):
ay.axisData.setValues(labelNumDigits=1,
minAutoCompute=False,
minValue=-0.5,
maxAutoCompute=False,
maxValue=1.0)
curve.lineStyle.setValues(thickness=1.6,
color=COLOR_DARK_BLUE)
curve.curveOptions.setValues(showSymbol=OFF)
ay.titleStyle.setValues(color=COLOR_DARK_BLUE)
ay.labelStyle.setValues(color=COLOR_DARK_BLUE)
def print_png(filename):
"""Print a png file from the current viewport
Parameters
----------
filename : str
The name of the output png file.
"""
from abaqus import session
from abaqusConstants import PNG
viewport=session.viewports[session.currentViewportName]
session.printToFile(fileName=filename,
format=PNG,
canvasObjects=(viewport,))
def set_default_view(cc):
"""Set a default view in order to compare figures from different models
Parameters
----------
cc : :class:`.ConeCyl` object
"""
from abaqusConstants import (USER_SPECIFIED, NODAL, COMPONENT, EXTRA_FINE,
FREE, UNIFORM, CONTINUOUS, ON, OFF)
odb=cc.attach_results()
if not odb:
print('No .odb file found for %s!' % cc.jobname)
return
dtm=odb.rootAssembly.datumCsyses[
'ASSEMBLY__T-INSTANCECYLINDER-CSYSCYLINDER']
viewport=session.viewports[session.currentViewportName]
viewport.odbDisplay.basicOptions.setValues(
averageElementOutput=False, transformationType=USER_SPECIFIED,
datumCsys=dtm)
viewport.odbDisplay.setPrimaryVariable(
variableLabel='U',
outputPosition=NODAL,
refinement=(COMPONENT, 'U1'),)
viewport.obasicOptions.setValues(averageElementOutput=True,
curveRefinementLevel=EXTRA_FINE)
viewport.odbDisplay.commonOptions.setValues(visibleEdges=FREE,
deformationScaling=UNIFORM,
uniformScaleFactor=5)
viewport.odbDisplay.contourOptions.setValues(contourStyle=CONTINUOUS)
viewport.restore()
viewport.viewportAnnotationOptions.setValues(compass=OFF)
viewport.viewportAnnotationOptions.setValues(triad=ON)
viewport.viewportAnnotationOptions.setValues(title=OFF)
viewport.viewportAnnotationOptions.setValues(state=OFF)
viewport.viewportAnnotationOptions.setValues(legend=ON)
viewport.viewportAnnotationOptions.setValues(legendTitle=OFF)
viewport.viewportAnnotationOptions.setValues(legendBox=OFF)
viewport.viewportAnnotationOptions.setValues(
legendFont='-*-arial narrow-bold-r-normal-*-*-140-*-*-p-*-*-*')
viewport.viewportAnnotationOptions.setValues(
legendFont='-*-arial narrow-bold-r-normal-*-*-180-*-*-p-*-*-*')
viewport.viewportAnnotationOptions.setValues(legendPosition=(1, 99))
viewport.viewportAnnotationOptions.setValues(legendDecimalPlaces=1)
viewport.setValues(origin=(0.0, -1.05833435058594),
height=188.030563354492,
width=203.452590942383)
viewport.view.setValues(viewOffsetX=-2.724,
viewOffsetY=-52.6898,
cameraUpVector=(-0.453666, -0.433365, 0.778705),
nearPlane=1192.17,
farPlane=2323.39,
width=750.942,
height=665.183,
cameraPosition=(1236.44, 1079.87, 889.94),
cameraTarget=(27.3027, -54.758, 306.503))
def edit_keywords(mod, text, before_pattern=None, insert=False):
"""Edit the keywords to add commands not available in Abaqus CAE
Parameters
----------
mod : Abaqus Model object
The model for which the keywords will be edited.
text : str
The text to be included.
before_pattern : str, optional
One pattern used to find where to put the given text.
insert : bool, optional
Insert the text instead of replacing it.
"""
mod.keywordBlock.synchVersions(storeNodesAndElements=False)
sieBlocks=mod.keywordBlock.sieBlocks
if before_pattern is None:
index=len(sieBlocks) - 2
else:
index=None
for i in range(len(sieBlocks)):
sieBlock=sieBlocks[i]
if sieBlock.find(before_pattern) > -1:
index=i-1
break
if index is None:
print('WARNING - *edit_keywords failed !')
print(' %s pattern not found !' % before_pattern)
#TODO better error handling here...
if insert:
mod.keywordBlock.insert(index, text)
else:
mod.keywordBlock.replace(index, text)
def create_composite_layup(name, stack, plyts, mat_names, region, part,
part_csys, symmetric=False, scaling_factor=1.,
axis_normal=2):
r"""Creates a composite layup
Parameters
----------
name : str
Name of the new composite layup.
stack : list
Stacking sequence represented by a list of orientations in degress.
The stacking sequence starts inwards a ends outwards. The 0 degree
angle is along the axial direction and the angles are measured using
the right-hand rule with the normal direction being normal to the
shell surface pointing outwards.
plyts : list
List containing the ply thicknesses.
mat_names : list
List containing the material name for each ply.
region : an Abaqus Region object
The region consisting of geometric faces, where this laminate will be
assigned to.
part : an Abaqus part Object
A part object where the layup will be created.
part_csys : a valid Datum object
The cylindrical coordinate system of the part object.
symmetric : bool, optional
A boolean telling whether the laminate is symmetric.
scaling_factor : float, optional
A scaling factor to be applied to each ply thickness. Used to apply
thickness imperfection in some cases.
axis_normal : int, optional
Reference
"""
from abaqusConstants import (MIDDLE_SURFACE, FROM_SECTION, SHELL, ON, OFF,
DEFAULT, UNIFORM, SIMPSON, GRADIENT, SYSTEM, ROTATION_NONE,
AXIS_1, AXIS_2, AXIS_3, SPECIFY_THICKNESS, SPECIFY_ORIENT,
SINGLE_VALUE)
myLayup=part.CompositeLayup(name=name,
description='stack from inside to outside',
offsetType=MIDDLE_SURFACE,
symmetric=False,
thicknessAssignment=FROM_SECTION,
elementType=SHELL)
myLayup.Section(preIntegrate=OFF,
integrationRule=SIMPSON,
thicknessType=UNIFORM,
poissonDefinition=DEFAULT,
temperature=GRADIENT,
useDensity=OFF)
if axis_normal == 1:
axis = AXIS_1
elif axis_normal == 2:
axis = AXIS_2
elif axis_normal == 3:
axis = AXIS_3
else:
raise ValueError('Invalid value for `axis_normal`')
myLayup.ReferenceOrientation(orientationType=SYSTEM,
localCsys=part_csys,
fieldName='',
additionalRotationType=ROTATION_NONE,
angle=0.,
additionalRotationField='',
axis=axis)
#CREATING ALL PLIES
numIntPoints=3
if len(stack)==1:
numIntPoints=5
for i, angle in enumerate(stack):
plyt=plyts[i]
mat_name=mat_names[i]
myLayup.CompositePly(suppressed=False,
plyName='ply_%02d' % (i+1),
region=region,
material=mat_name,
thicknessType=SPECIFY_THICKNESS,
thickness=plyt*scaling_factor,
orientationValue=angle,
orientationType=SPECIFY_ORIENT,
numIntPoints=numIntPoints)
def create_isotropic_section(name, mat_names, region, part, model,T,Sect_name,OFFTS):
"""Creates an isotropic section
"""
from abaqusConstants import (MIDDLE_SURFACE, FROM_SECTION, SHELL, ON, OFF,
DEFAULT, UNIFORM, SIMPSON, GRADIENT, SYSTEM, ROTATION_NONE,
AXIS_1, AXIS_2, AXIS_3, SPECIFY_THICKNESS, SPECIFY_ORIENT,NO_IDEALIZATION,
SINGLE_VALUE)
model.HomogeneousShellSection(name=name,
preIntegrate=OFF, material=mat_names[0],
thicknessType=UNIFORM, thickness=T, thicknessField='',
idealization=NO_IDEALIZATION, poissonDefinition=DEFAULT,
thicknessModulus=None, temperature=GRADIENT, useDensity=OFF,
integrationRule=SIMPSON, numIntPts=5)
region = region
if OFFTS==0.0:
part.SectionAssignment(region=region, sectionName=Sect_name,
offset=OFFTS,offsetType=MIDDLE_SURFACE,
offsetField='',
thicknessAssignment=FROM_SECTION)
else:
part.SectionAssignment(region=region, sectionName=Sect_name,
offset=OFFTS,offsetType=SINGLE_VALUE,
offsetField='',
thicknessAssignment=FROM_SECTION)
def modify_composite_layup(part, layup_name, modify_func):
"""Modify plies within a composite layup
Directly modififying plies within a CompositeLayup is not possible, as
the plies are read-only after creation. This function emulates modifying,
by deleting and then re-creating plies, with modifications.
Parameters
----------
part : an Abaqus part object
The part that the to-be-modified layup is attached to.
layup_name : str
Name of the layup that is to be modified.
modify_func : function
Function that will be called for each ply. It should take as arguments
the ply index and a dictionary of keyword arguments. This dictionary
contains all keyword arguments that would re-create the original ply,
if passed to the ``CompositePly``-constructor. This function should
should make the necessary changes this dictionary and then return it.
The returned dictionary will then be used to create the new ply.
"""
from abaqusConstants import SPECIFY_ORIENT, CSYS
layup = part.compositeLayups[layup_name]
ply_data = []
STORE_PLY_ATTRS = ['additionalRotationField', 'additionalRotationType',
'angle', 'axis', 'material', 'numIntPoints', 'orientation',
'orientationType', 'orientationValue', 'plyName', 'region',
'suppressed', 'thickness', 'thicknessType']
for ply in layup.plies.values():
ply_data.append(dict((attr, getattr(ply, attr)) for attr in STORE_PLY_ATTRS))
layup.deletePlies()
for i, kwargs in enumerate(ply_data):
kwargs['region'] = part.sets[kwargs['region'][0]]
if kwargs['orientationType'] != SPECIFY_ORIENT:
kwargs.pop('orientationValue')
if kwargs['orientationType'] != CSYS:
kwargs.pop('orientation')
kwargs = modify_func(i, kwargs)
layup.CompositePly(**kwargs)
def createDiscreteField(mod, odb, step_name, frame_num):
from abaqusConstants import (NODES, PRESCRIBEDCONDITION_DOF)
u=odb.steps[step_name].frames[frame_num].fieldOutputs['U']
ur=odb.steps[step_name].frames[frame_num].fieldOutputs['UR']
datas=[]
for u_value, ur_value in zip(u.values, ur.values):
id=u_value.nodeLabel
data=np.concatenate((u_value.data, ur_value.data))
datas.append([id, data])
datas.sort(key=lambda x: x[0])
list_ids=[]
list_dof_values=[]
for data in datas:
list_ids += [data[0] for i in range(6)]
for dof in range(1,7):
list_dof_values += [float(dof), data[1][dof-1]]
tuple_ids=tuple(list_ids)
tuple_dof_values=tuple(list_dof_values)
mod.DiscreteField(name='discreteField',
description='',
location=NODES,
fieldType=PRESCRIBEDCONDITION_DOF,
dataWidth=2,
defaultValues=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
data=(('', 2, tuple_ids, tuple_dof_values),))
def create_sketch_plane(cc, entity):
"""Creates a sketch plane tangent to the shell surface
Parameters
----------
cc : :class:`.ConeCyl` object
entity : object
Any object with the attribute: ``thetadeg``, usually a
:class:`.Imperfection`.
Returns
-------
plane : :class:`.Plane` object
"""
from abaqus import mdb
from .utils import geom
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
for plane in cc.sketch_planes:
if abs(plane.thetadeg - entity.thetadeg) < TOL:
return plane
x1, y1, z1 = utils.cyl2rec(1.05*cc.r, entity.thetadeg, 0.)
v1 = np.array([x1, y1, z1], dtype=FLOAT)
x2, y2, z2 = utils.cyl2rec(1.05*cc.r2, entity.thetadeg, cc.h)
v2 = np.array([x2, y2, z2], dtype=FLOAT)
v3 = np.cross(v2, v1)
if abs(v3.max()) > abs(v3.min()):
v3 = v3/v3.max() * cc.h/2.
else:
v3 = v3/abs(v3.min()) * cc.h/2.
x3, y3, z3 = v2 + v3
pt = part.DatumPointByCoordinate(coords=(x1, y1, z1))
p1 = part.datums[pt.id]
pt = part.DatumPointByCoordinate(coords=(x2, y2, z2))
p2 = part.datums[pt.id]
pt = part.DatumPointByCoordinate(coords=(x3, y3, z3))
p3 = part.datums[pt.id]
plane = geom.Plane()
plane.p1 = p1
plane.p2 = p2
plane.p3 = p3
plane.part = part
plane.create()
plane.thetadeg = entity.thetadeg
cc.sketch_planes.append(plane)
return plane
def set_colors_ti(cc):
from abaqus import mdb, session
from abaqusConstants import ON
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
viewport = session.viewports[session.currentViewportName]
if viewport.displayedObject is None:
viewport.setValues(displayedObject=part)
cmap = viewport.colorMappings['Set']
viewport.setColor(colorMapping=cmap)
viewport.enableMultipleColors()
viewport.setColor(initialColor='#BDBDBD')
keys = part.sets.keys()
names = [k for k in keys if 'Set_measured_imp_t' in k]
# If there are not enough colors for all | |
<gh_stars>100-1000
# Copyright (c) 2018 Intel Corporation
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
import argparse
import itertools
from typing import List
from tqdm import tqdm
import math
import toml
from dataset import AudioToTextDataLayer
from helpers import process_evaluation_batch, process_evaluation_epoch, Optimization, add_blank_label, AmpOptimizations, print_dict
from decoders import RNNTGreedyDecoder
from model_rnnt import RNNT
from preprocessing import AudioPreprocessing
from parts.features import audio_from_file
import torch
import random
import numpy as np
import pickle
import time
import torchvision
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=16, type=int, help='data batch size')
parser.add_argument("--steps", default=None, help='if not specified do evaluation on full dataset. otherwise only evaluates the specified number of iterations for each worker', type=int)
parser.add_argument("--model_toml", type=str, help='relative model configuration path given dataset folder')
parser.add_argument("--dataset_dir", type=str, help='absolute path to dataset folder')
parser.add_argument("--val_manifest", type=str, help='relative path to evaluation dataset manifest file')
parser.add_argument("--ckpt", default=None, type=str, required=True, help='path to model checkpoint')
parser.add_argument("--max_duration", default=None, type=float, help='maximum duration of sequences. if None uses attribute from model configuration file')
parser.add_argument("--pad_to", default=None, type=int, help="default is pad to value as specified in model configurations. if -1 pad to maximum duration. If > 0 pad batch to next multiple of value")
parser.add_argument("--fp16", action='store_true', help='use half precision')
parser.add_argument("--cudnn_benchmark", action='store_true', help="enable cudnn benchmark")
parser.add_argument("--save_prediction", type=str, default=None, help="if specified saves predictions in text form at this location")
parser.add_argument("--logits_save_to", default=None, type=str, help="if specified will save logits to path")
parser.add_argument("--seed", default=42, type=int, help='seed')
parser.add_argument("--wav", type=str, help='absolute path to .wav file (16KHz)')
parser.add_argument("--warm_up", help='warm up steps, will only measure the performance from step=warm_up to step=(steps-warm_up)', type=int, default=0)
parser.add_argument("--print-result", action='store_true', help='print prediction results', default=False)
parser.add_argument("--print_time", action='store_true', help='print encoder decoder time', default=False)
parser.add_argument("--ipex", action='store_true', help='use ipex', default=False)
parser.add_argument("--int8", action='store_true', help='use int8', default=False)
parser.add_argument("--jit", action='store_true', help='use jit', default=False)
parser.add_argument("--mix-precision", action='store_true', help='use bf16', default=False)
parser.add_argument("--profiling", action='store_true', help='do profiling', default=False)
parser.add_argument("--sort_by_duration", action='store_true', help='sort sequence by duration', default=False)
parser.add_argument('--calibration', action='store_true', default=False,
help='doing int8 calibration step')
parser.add_argument('--configure-dir', default='configure.json', type=str, metavar='PATH',
help = 'path to int8 configures, default file name is configure.json')
return parser.parse_args()
def eval(
data_layer,
audio_processor,
encoderdecoder,
greedy_decoder,
labels,
multi_gpu,
args):
"""performs inference / evaluation
Args:
data_layer: data layer object that holds data loader
audio_processor: data processing module
encoderdecoder: acoustic model
greedy_decoder: greedy decoder
labels: list of labels as output vocabulary
multi_gpu: true if using multiple gpus
args: script input arguments
"""
if args.ipex:
import intel_extension_for_pytorch as ipex
logits_save_to=args.logits_save_to
encoderdecoder.eval()
with torch.no_grad():
_global_var_dict = {
'predictions': [],
'transcripts': [],
'logits' : [],
}
if args.wav:
# TODO unimplemented in ipex
assert False, "wav unsupported in ipex for now"
features, p_length_e = audio_processor(audio_from_file(args.wav))
# torch.cuda.synchronize()
t0 = time.perf_counter()
t_log_probs_e = encoderdecoder(features)
# torch.cuda.synchronize()
t1 = time.perf_counter()
t_predictions_e = greedy_decoder(log_probs=t_log_probs_e)
hypotheses = __ctc_decoder_predictions_tensor(t_predictions_e, labels=labels)
print("INFERENCE TIME\t\t: {} ms".format((t1-t0)*1000.0))
print("TRANSCRIPT\t\t:", hypotheses[0])
return
# Int8 Calibration
if args.ipex and args.int8 and args.calibration:
print("runing int8 calibration step\n")
conf = ipex.AmpConf(torch.int8)
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
t_predictions_e, conf = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
if args.steps is not None and it + 1 >= args.steps:
break
conf.save(args.configure_dir)
# Inference (vanilla cpu, dnnl fp32 or dnnl int8)
else:
if not args.ipex:
if args.warm_up > 0:
print("\nstart warm up, warmp_up steps = ", args.warm_up)
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
conf = None
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
if it + 1 >= args.warm_up:
break
print("\nstart measure performance, measure steps = ", args.steps)
total_time = 0
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
if args.profiling:
# with torch.autograd.profiler.profile(args.profiling) as prof:
with torch.profiler.profile(on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')) as prof:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
else:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
total_time += (t1 - t0)
values_dict = dict(
predictions=[t_predictions_e],
transcript=[t_transcript_e],
transcript_length=[t_transcript_len_e],
)
process_evaluation_batch(values_dict, _global_var_dict, labels=labels)
if args.steps is not None and it + 1 >= args.steps:
break
else:
if args.mix_precision:
with torch.cpu.amp.autocast():
# warm up
if args.warm_up > 0:
print("\nstart warm up, warmp_up steps = ", args.warm_up)
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
conf = None
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
if it + 1 >= args.warm_up:
break
# measure performance
print("\nstart measure performance, measure steps = ", args.steps)
total_time = 0
# with torch.autograd.profiler.profile(args.profiling) as prof:
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
if args.profiling:
# with torch.autograd.profiler.profile(args.profiling) as prof:
with torch.profiler.profile(on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')) as prof:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
else:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
total_time += (t1 - t0)
values_dict = dict(
predictions=[t_predictions_e],
transcript=[t_transcript_e],
transcript_length=[t_transcript_len_e],
)
process_evaluation_batch(values_dict, _global_var_dict, labels=labels)
if args.steps is not None and it + 1 >= args.steps:
break
else:
# warm up
if args.warm_up > 0:
print("\nstart warm up, warmp_up steps = ", args.warm_up)
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
conf = None
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
if it + 1 >= args.warm_up:
break
# measure performance
print("\nstart measure performance, measure steps = ", args.steps)
total_time = 0
# with torch.autograd.profiler.profile(args.profiling) as prof:
for it, data in enumerate(tqdm(data_layer.data_iterator)):
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = audio_processor(data)
if args.profiling:
# with torch.autograd.profiler.profile(args.profiling) as prof:
with torch.profiler.profile(on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')) as prof:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
else:
conf = None
t0 = time.perf_counter()
t_predictions_e = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, args, conf)
t1 = time.perf_counter()
total_time += (t1 - t0)
values_dict = dict(
predictions=[t_predictions_e],
transcript=[t_transcript_e],
transcript_length=[t_transcript_len_e],
)
process_evaluation_batch(values_dict, _global_var_dict, labels=labels)
if args.steps is not None and it + 1 >= args.steps:
break
if args.print_result:
hypotheses = _global_var_dict['predictions']
references = _global_var_dict['transcripts']
nb = len(hypotheses)
print("print %d sample results: " % (min(len(hypotheses), nb)))
for i, item in enumerate(hypotheses):
print("hyp: ", hypotheses[i])
print("ref: ", references[i])
print()
if i > nb:
break
if args.profiling:
# print(prof.key_averages().table(sort_by="cpu_time_total"))
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
wer, _ = process_evaluation_epoch(_global_var_dict)
if (not multi_gpu or (multi_gpu and torch.distributed.get_rank() == 0)):
print("\n=========================>>>>>>")
print("Evaluation WER: {0}".format(wer))
print("Accuracy: {:.15f} ".format(1 - wer))
if args.save_prediction is not None:
with open(args.save_prediction, 'w') as fp:
fp.write('\n'.join(_global_var_dict['predictions']))
if logits_save_to is not None:
logits = []
for batch in _global_var_dict["logits"]:
for i in range(batch.shape[0]):
logits.append(batch[i].cpu().numpy())
with open(logits_save_to, 'wb') as f:
pickle.dump(logits, f, protocol=pickle.HIGHEST_PROTOCOL)
if args.steps:
if args.steps * args.batch_size > len(data_layer):
total_samples = len(data_layer)
else:
total_samples = args.steps * args.batch_size
else:
total_samples = len(data_layer)
print("total samples tested: ", total_samples)
print("total time (encoder + decoder, excluded audio processing): ", total_time, "s")
print("dataset size: ", len(data_layer))
perf = total_samples / total_time
print("Throughput: {:.3f} fps".format(perf))
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = args.cudnn_benchmark
multi_gpu = args.local_rank is not None
if multi_gpu:
print("DISTRIBUTED with ", torch.distributed.get_world_size())
if args.fp16:
optim_level = Optimization.mxprO3
else:
optim_level = Optimization.mxprO0
model_definition = toml.load(args.model_toml)
dataset_vocab = model_definition['labels']['labels']
ctc_vocab = add_blank_label(dataset_vocab)
val_manifest = args.val_manifest
featurizer_config = model_definition['input_eval']
featurizer_config["optimization_level"] = optim_level
if args.max_duration is not None:
featurizer_config['max_duration'] = args.max_duration
if args.pad_to is not None:
featurizer_config['pad_to'] = args.pad_to if args.pad_to >= 0 else "max"
print('model_config')
print_dict(model_definition)
print('feature_config')
print_dict(featurizer_config)
data_layer = None
if args.wav is None:
data_layer = AudioToTextDataLayer(
dataset_dir=args.dataset_dir,
featurizer_config=featurizer_config,
manifest_filepath=val_manifest,
# sampler='bucket',
sort_by_duration=args.sort_by_duration,
labels=dataset_vocab,
batch_size=args.batch_size,
pad_to_max=featurizer_config['pad_to'] == "max",
shuffle=False,
multi_gpu=multi_gpu)
audio_preprocessor = AudioPreprocessing(**featurizer_config)
#encoderdecoder = JasperEncoderDecoder(jasper_model_definition=jasper_model_definition, feat_in=1024, num_classes=len(ctc_vocab))
model = RNNT(
feature_config=featurizer_config,
rnnt=model_definition['rnnt'],
num_classes=len(ctc_vocab)
)
if args.ckpt is not None:
print("loading model from ", args.ckpt)
checkpoint = torch.load(args.ckpt, map_location="cpu")
model.load_state_dict(checkpoint['state_dict'], strict=False)
if args.ipex:
import intel_extension_for_pytorch as ipex
model.joint_net.eval()
data_type = torch.bfloat16 if args.mix_precision else torch.float32
model.joint_net = ipex.optimize(model.joint_net, dtype=data_type, auto_kernel_selection=True)
model.prediction["embed"] = model.prediction["embed"].to(data_type)
if args.jit:
print("running jit | |
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateCaptureRequest(AbstractModel):
"""CreateCapture请求参数结构体
"""
def __init__(self):
r"""
:param Data: 原始抓拍报文
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCaptureResponse(AbstractModel):
"""CreateCapture返回参数结构体
"""
def __init__(self):
r"""
:param RspData: 原始应答报文
注意:此字段可能返回 null,表示取不到有效值。
:type RspData: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RspData = None
self.RequestId = None
def _deserialize(self, params):
self.RspData = params.get("RspData")
self.RequestId = params.get("RequestId")
class CreateMultiBizAlertRequest(AbstractModel):
"""CreateMultiBizAlert请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param ZoneId: 点位ID
:type ZoneId: int
:param CameraId: 摄像头ID
:type CameraId: int
:param CaptureTime: 时间戳,毫秒
:type CaptureTime: int
:param State: 状态:
1: 侵占
2: 消失
3: 即侵占又消失
:type State: int
:param Image: 图片base64字符串
:type Image: str
:param Warnings: 告警列表
:type Warnings: list of MultiBizWarning
"""
self.GroupCode = None
self.MallId = None
self.ZoneId = None
self.CameraId = None
self.CaptureTime = None
self.State = None
self.Image = None
self.Warnings = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
self.ZoneId = params.get("ZoneId")
self.CameraId = params.get("CameraId")
self.CaptureTime = params.get("CaptureTime")
self.State = params.get("State")
self.Image = params.get("Image")
if params.get("Warnings") is not None:
self.Warnings = []
for item in params.get("Warnings"):
obj = MultiBizWarning()
obj._deserialize(item)
self.Warnings.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateMultiBizAlertResponse(AbstractModel):
"""CreateMultiBizAlert返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateProgramStateRequest(AbstractModel):
"""CreateProgramState请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param ProgramStateItems: 进程监控信息列表
:type ProgramStateItems: list of ProgramStateItem
:param MallId: 商场ID
:type MallId: int
"""
self.GroupCode = None
self.ProgramStateItems = None
self.MallId = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
if params.get("ProgramStateItems") is not None:
self.ProgramStateItems = []
for item in params.get("ProgramStateItems"):
obj = ProgramStateItem()
obj._deserialize(item)
self.ProgramStateItems.append(obj)
self.MallId = params.get("MallId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateProgramStateResponse(AbstractModel):
"""CreateProgramState返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateServerStateRequest(AbstractModel):
"""CreateServerState请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param ServerStateItems: 服务器监控信息列表
:type ServerStateItems: list of ServerStateItem
:param MallId: 商场ID
:type MallId: int
:param ReportTime: 服务器监控信息上报时间戳,单位毫秒
:type ReportTime: int
"""
self.GroupCode = None
self.ServerStateItems = None
self.MallId = None
self.ReportTime = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
if params.get("ServerStateItems") is not None:
self.ServerStateItems = []
for item in params.get("ServerStateItems"):
obj = ServerStateItem()
obj._deserialize(item)
self.ServerStateItems.append(obj)
self.MallId = params.get("MallId")
self.ReportTime = params.get("ReportTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateServerStateResponse(AbstractModel):
"""CreateServerState返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteMultiBizAlertRequest(AbstractModel):
"""DeleteMultiBizAlert请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param ZoneId: 点位ID
:type ZoneId: int
:param CameraId: 摄像头ID
:type CameraId: int
:param ActionType: 消警动作:
1: 误报
2: 正报合规
3: 正报不合规,整改完成
:type ActionType: int
:param Image: 图片base64字符串
:type Image: str
"""
self.GroupCode = None
self.MallId = None
self.ZoneId = None
self.CameraId = None
self.ActionType = None
self.Image = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
self.ZoneId = params.get("ZoneId")
self.CameraId = params.get("CameraId")
self.ActionType = params.get("ActionType")
self.Image = params.get("Image")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteMultiBizAlertResponse(AbstractModel):
"""DeleteMultiBizAlert返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteTaskRequest(AbstractModel):
"""DeleteTask请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param TaskId: 任务ID
:type TaskId: int
"""
self.GroupCode = None
self.MallId = None
self.TaskId = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTaskResponse(AbstractModel):
"""DeleteTask返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeCamerasRequest(AbstractModel):
"""DescribeCameras请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
"""
self.GroupCode = None
self.MallId = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCamerasResponse(AbstractModel):
"""DescribeCameras返回参数结构体
"""
def __init__(self):
r"""
:param Cameras: 摄像头列表
:type Cameras: list of CameraZones
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Cameras = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Cameras") is not None:
self.Cameras = []
for item in params.get("Cameras"):
obj = CameraZones()
obj._deserialize(item)
self.Cameras.append(obj)
self.RequestId = params.get("RequestId")
class DescribeConfigRequest(AbstractModel):
"""DescribeConfig请求参数结构体
"""
def __init__(self):
r"""
:param SessionId: 会话ID
:type SessionId: str
:param CameraSign: 摄像头签名
:type CameraSign: str
:param CameraAppId: 摄像头app id
:type CameraAppId: str
:param CameraTimestamp: 摄像头时间戳,毫秒
:type CameraTimestamp: int
:param ServerMac: MAC地址,字母大写
:type ServerMac: str
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
"""
self.SessionId = None
self.CameraSign = None
self.CameraAppId = None
self.CameraTimestamp = None
self.ServerMac = None
self.GroupCode = None
self.MallId = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
self.CameraSign = params.get("CameraSign")
self.CameraAppId = params.get("CameraAppId")
self.CameraTimestamp = params.get("CameraTimestamp")
self.ServerMac = params.get("ServerMac")
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeConfigResponse(AbstractModel):
"""DescribeConfig返回参数结构体
"""
def __init__(self):
r"""
:param SessionId: 会话ID
:type SessionId: str
:param Version: 配置版本号
:type Version: int
:param Cameras: 摄像头列表
:type Cameras: list of CameraConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SessionId = None
self.Version = None
self.Cameras = None
self.RequestId = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
self.Version = params.get("Version")
if params.get("Cameras") is not None:
self.Cameras = []
for item in params.get("Cameras"):
obj = CameraConfig()
obj._deserialize(item)
self.Cameras.append(obj)
self.RequestId = params.get("RequestId")
class DescribeImageRequest(AbstractModel):
"""DescribeImage请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param CameraId: 摄像头ID
:type CameraId: int
"""
self.GroupCode = None
self.MallId = None
self.CameraId = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
self.CameraId = params.get("CameraId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeImageResponse(AbstractModel):
"""DescribeImage返回参数结构体
"""
def __init__(self):
r"""
:param ImageUrl: cos 临时 url,异步上传图片,client需要轮询
:type ImageUrl: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ImageUrl = None
self.RequestId = None
def _deserialize(self, params):
self.ImageUrl = params.get("ImageUrl")
self.RequestId = params.get("RequestId")
class DescribeMultiBizBaseImageRequest(AbstractModel):
"""DescribeMultiBizBaseImage请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param CameraId: 摄像头ID
:type CameraId: int
:param ZoneId: 点位ID
:type ZoneId: int
"""
self.GroupCode = None
self.MallId = None
self.CameraId = None
self.ZoneId = None
def _deserialize(self, params):
self.GroupCode = params.get("GroupCode")
self.MallId = params.get("MallId")
self.CameraId = params.get("CameraId")
self.ZoneId = params.get("ZoneId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMultiBizBaseImageResponse(AbstractModel):
"""DescribeMultiBizBaseImage返回参数结构体
"""
def __init__(self):
r"""
:param ImageUrl: cos 临时 url
:type ImageUrl: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ImageUrl = None
self.RequestId = None
def _deserialize(self, params):
self.ImageUrl = params.get("ImageUrl")
self.RequestId = params.get("RequestId")
class DescribeTasksRequest(AbstractModel):
"""DescribeTasks请求参数结构体
"""
def __init__(self):
r"""
:param GroupCode: 集团编码
:type GroupCode: str
:param MallId: 广场ID
:type MallId: int
:param TaskType: 任务类型:
1: 底图拉取
:type TaskType: int
| |
"""
# Handle inputs and derive some useful parameters from them
log10T_in = np.log10(temperature_K)
T_in_keV = temperature_K / 11604518 # Convert temperature from K to keV.
# Get energy bins centers based on geometric mean.
energy_gmean_keV = stats.gmean(np.vstack((energy_edges_keV[:-1], energy_edges_keV[1:])))
# Mask Unwanted Abundances
abundance_mask = np.zeros(len(abundances))
abundance_mask[CONTINUUM_GRID["abundance index"]] = 1.
abundances *= abundance_mask
##### Calculate Continuum Intensity Summed Over All Elements
##### For Each Temperature as a function of Energy/Wavelength ######
# Before looping over temperatures, let's perform the calculations that are
# used over again in the for loop.
# 1. If many temperatures are input, convolve intensity grid with abundances for all
# temperatures here. If only a few temperatures are input, do this step only
# when looping over input temperatures. This minimizes computation.
n_tband = 3
n_t_grid = len(CONTINUUM_GRID["log10T"])
n_temperature_K = len(temperature_K)
n_thresh = n_temperature_K * n_tband
if n_thresh >= n_t_grid:
intensity_per_em_at_source_allT = np.zeros(CONTINUUM_GRID["intensity"].shape[1:])
for i in range(0, n_t_grid):
intensity_per_em_at_source_allT[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
CONTINUUM_GRID["intensity"][:, i])
# 2. Add dummy axes to energy and temperature grid arrays for later vectorized operations.
repeat_E_grid = CONTINUUM_GRID["E_keV"][np.newaxis, :]
repeat_T_grid = CONTINUUM_GRID["T_keV"][:, np.newaxis]
dE_grid_keV = CONTINUUM_GRID["energy bin widths keV"][np.newaxis, :]
# 3. Identify the indices of the temperature bins containing each input temperature and
# the bins above and below them. For each input temperature, these three bins will
# act as a temperature band over which we'll interpolate the continuum emission.
selt = np.digitize(log10T_in, CONTINUUM_GRID["log10T"]) - 1
tband_idx = selt[:, np.newaxis] + np.arange(n_tband)[np.newaxis, :]
# Finally, loop over input temperatures and calculate continuum emission for each.
flux = np.zeros((n_temperature_K, len(energy_gmean_keV)))
for j, logt in enumerate(log10T_in):
# If not already done above, calculate continuum intensity summed over
# all elements as a function of energy/wavelength over the temperature band.
if n_thresh < n_t_grid:
element_intensities_per_em_at_source = CONTINUUM_GRID["intensity"][:, tband_idx[j]]
intensity_per_em_at_source = np.zeros(element_intensities_per_em_at_source.shape[1:])
for i in range(0, n_tband):
intensity_per_em_at_source[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
element_intensities_per_em_at_source[:, i])
else:
intensity_per_em_at_source = intensity_per_em_at_source_allT[tband_idx[j]]
##### Calculate Continuum Intensity at Input Temperature ######
##### Do this by interpolating the normalized temperature component
##### of the intensity grid to input temperature(s) and then rescaling.
# Calculate normalized temperature component of the intensity grid.
exponent = (repeat_E_grid / repeat_T_grid[tband_idx[j]])
exponential = np.exp(np.clip(exponent, None, 80))
gaunt = intensity_per_em_at_source / dE_grid_keV * exponential
# Interpolate the normalized temperature component of the intensity grid the the
# input temperature.
flux[j] = _interpolate_continuum_intensities(
gaunt, CONTINUUM_GRID["log10T"][tband_idx[j]], CONTINUUM_GRID["E_keV"], energy_gmean_keV, logt)
# Rescale the interpolated intensity.
flux = flux * np.exp(-(energy_gmean_keV[np.newaxis, :] / T_in_keV[:, np.newaxis]))
# Put intensity into correct units.
return flux * CONTINUUM_GRID["intensity unit"]
def _line_emission(energy_edges_keV, temperature_K, abundances):
"""
Calculates emission-measure-normalized X-ray line spectrum at the source.
Output must be multiplied by emission measure and divided by 4*pi*observer_distance**2
to get physical values.
Parameters
----------
energy_edges_keV: 1-D array-like
Boundaries of contiguous spectral bins in units on keV.
temperature_K: 1-D array-like
The temperature(s) of the plasma in unit of K. Must not be a scalar.
abundances: 1-D `numpy.array` of same length a DEFAULT_ABUNDANCES.
The abundances for the all the elements.
"""
n_energy_bins = len(energy_edges_keV)-1
n_temperatures = len(temperature_K)
# Find indices of lines within user input energy range.
energy_roi_indices = np.logical_and(LINE_GRID["line peaks keV"] >= energy_edges_keV.min(),
LINE_GRID["line peaks keV"] <= energy_edges_keV.max())
n_energy_roi_indices = energy_roi_indices.sum()
# If there are emission lines within the energy range of interest, compile spectrum.
if n_energy_roi_indices > 0:
# Mask Unwanted Abundances
abundance_mask = np.zeros(len(abundances))
abundance_mask[LINE_GRID["abundance index"]] = 1.
abundances *= abundance_mask
# Extract only the lines within the energy range of interest.
line_abundances = abundances[LINE_GRID["line atomic numbers"][energy_roi_indices] - 2]
# Above magic number of of -2 is comprised of:
# a -1 to account for the fact that index is atomic number -1, and
# another -1 because abundance index is offset from abundance index by 1.
##### Calculate Line Intensities within the Input Energy Range #####
# Calculate abundance-normalized intensity of each line in energy range of
# interest as a function of energy and temperature.
line_intensity_grid = LINE_GRID["intensity"][energy_roi_indices]
line_intensities = _calculate_abundance_normalized_line_intensities(
np.log10(temperature_K), line_intensity_grid, LINE_GRID["log10T"])
# Scale line intensities by abundances to get true line intensities.
line_intensities *= line_abundances
##### Weight Line Emission So Peak Energies Maintained Within Input Energy Binning #####
# Split emission of each line between nearest neighboring spectral bins in
# proportion such that the line centroids appear at the correct energy
# when averaged over neighboring bins.
# This has the effect of appearing to double the number of lines as regards
# the dimensionality of the line_intensities array.
line_peaks_keV = LINE_GRID["line peaks keV"][energy_roi_indices]
split_line_intensities, line_spectrum_bins = _weight_emission_bins_to_line_centroid(
line_peaks_keV, energy_edges_keV, line_intensities)
#### Calculate Flux #####
# Use binned_statistic to determine which spectral bins contain
# components of line emission and sum over those line components
# to get the total emission is each spectral bin.
flux = stats.binned_statistic(line_spectrum_bins, split_line_intensities,
"sum", n_energy_bins, (0, n_energy_bins-1)).statistic
else:
flux = np.zeros((n_temperatures, n_energy_bins))
# Scale flux by observer distance, emission measure and spectral bin width
# and put into correct units.
energy_bin_widths = (energy_edges_keV[1:] - energy_edges_keV[:-1]) * u.keV
flux = (flux * LINE_GRID["intensity unit"] / energy_bin_widths)
return flux
def _interpolate_continuum_intensities(data_grid, log10T_grid, energy_grid_keV, energy_keV, log10T):
# Determine valid range based on limits of intensity grid's spectral extent
# and the normalized temperature component of intensity.
n_tband = len(log10T_grid)
vrange, = np.where(data_grid[0] > 0)
for i in range(1, n_tband):
vrange_i, = np.where(data_grid[i] > 0)
if len(vrange) < len(vrange_i):
vrange = vrange_i
data_grid = data_grid[:, vrange]
energy_grid_keV = energy_grid_keV[vrange]
energy_idx, = np.where(energy_keV < energy_grid_keV.max())
# Interpolate temperature component of intensity and derive continuum intensity.
flux = np.zeros(energy_keV.shape)
if len(energy_idx) > 0:
energy_keV = energy_keV[energy_idx]
cont0 = interpolate.interp1d(energy_grid_keV, data_grid[0])(energy_keV)
cont1 = interpolate.interp1d(energy_grid_keV, data_grid[1])(energy_keV)
cont2 = interpolate.interp1d(energy_grid_keV, data_grid[2])(energy_keV)
# Calculate the continuum intensity as the weighted geometric mean
# of the interpolated values across the temperature band of the
# temperature component of intensity.
logelog10T = np.log(log10T)
x0, x1, x2 = np.log(log10T_grid)
flux[energy_idx] = np.exp(
np.log(cont0) * (logelog10T - x1) * (logelog10T - x2) / ((x0 - x1) * (x0 - x2)) +
np.log(cont1) * (logelog10T - x0) * (logelog10T - x2) / ((x1 - x0) * (x1 - x2)) +
np.log(cont2) * (logelog10T - x0) * (logelog10T - x1) / ((x2 - x0) * (x2 - x1)) )
return flux
def _calculate_abundance_normalized_line_intensities(logT, data_grid, line_logT_bins):
"""
Calculates normalized line intensities at a given temperature using interpolation.
Given a 2D array, say of line intensities, as a function of two parameters,
say energy and log10(temperature), and a log10(temperature) value,
interpolate the line intensities over the temperature axis and
extract the intensities as a function of energy at the input temperature.
Note that strictly speaking the code is agnostic to the physical properties
of the axes and values in the array. All the matters is that data_grid
is interpolated over the 2nd axis and the input value also corresponds to
somewhere along that same axis. That value does not have to exactly correspond to
the value of a column in the grid. This is accounted for by the interpolation.
Parameters
----------
logT: 1D `numpy.ndarray` of `float`.
The input value along the 2nd axis at which the line intensities are desired.
If multiple values given, the calculation is done for each and the
output array has an extra dimension.
data_grid: 2D `numpy.ndarray`
Some property, e.g. line intensity, as function two parameters,
e.g. energy (0th dimension) and log10(temperature in kelvin) (1st dimension).
line_logT_bins: 1D `numpy.ndarray`
The value along the 2nd axis at which the data are required,
say a value of log10(temperature in kelvin).
Returns
-------
interpolated_data: 1D or 2D `numpy.ndarray`
The line intensities as a function of energy (1st dimension) at
each of the input temperatures (0th dimension).
Note that unlike the input line intensity table, energy here is the 0th axis.
If there is only one input temperature, interpolated_data is 1D.
"""
# Ensure input temperatures are in an array to consistent | |
pbParam.velocityRatio = velocityRatio
pbParam.accelerationRatio = accelerationRatio
queuedCmdIndex = c_uint64(0)
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
while(True):
result = api.SetPTPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
while(True):
result = api.SetPTPCommonParams(c_int(masterId), c_int(-1), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
while(True):
result = api.SetPTPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
else:
while(True):
result = api.SetPTPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetPTPCommonParams(api):
pbParam = PTPCommonParams()
while(True):
result = api.GetPTPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam ))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.velocityRatio, pbParam.accelerationRatio]
def SetPTPCmd(api, ptpMode, x, y, z, rHead, isQueued=0):
cmd = PTPCmd()
cmd.ptpMode=ptpMode
cmd.x=x
cmd.y=y
cmd.z=z
cmd.rHead=rHead
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetPTPCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
return [queuedCmdIndex.value]
def SetPTPWithLCmd(api, ptpMode, x, y, z, rHead, l, isQueued=0):
cmd = PTPWithLCmd()
cmd.ptpMode=ptpMode
cmd.x=x
cmd.y=y
cmd.z=z
cmd.rHead=rHead
cmd.l = l
queuedCmdIndex = c_uint64(0)
# 滑轨的特殊处理
if slaveDevType == DevType.Magician:
while(True):
result = api.SetPTPWithLCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
elif masterDevType == DevType.Conntroller and slaveDevType == DevType.MagicianLite:
cmd1 = PTPCmd()
cmd1.ptpMode = ptpMode
cmd1.x = x
cmd1.y = y
cmd1.z = z
cmd1.rHead = rHead
queuedCmdIndex1 = c_uint64(0)
while(True):
result = api.SetPTPWithLCmd(c_int(masterId), c_int(-1), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
while(True):
result = api.SetPTPCmd(c_int(masterId), c_int(slaveId), byref(cmd1), isQueued, byref(queuedCmdIndex1))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
else:
while(True):
result = api.SetPTPWithLCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
return [queuedCmdIndex.value]
def SetCPRHoldEnable(api, isEnable):
while(True):
result = api.SetCPRHoldEnable(c_int(masterId), c_int(slaveId), c_bool(isEnable))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
def GetCPRHoldEnable(api):
isEnable = c_bool(False)
while(True):
result = api.GetCPRHoldEnable(c_int(masterId), c_int(slaveId), byref(isEnable))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [isEnable.value]
def SetCPParams(api, planAcc, juncitionVel, acc, realTimeTrack = 0, isQueued=0):
parm = CPParams()
parm.planAcc = planAcc
parm.juncitionVel = juncitionVel
parm.acc = acc
parm.realTimeTrack = realTimeTrack
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCPParams(c_int(masterId), c_int(slaveId), byref(parm), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetCPParams(api):
parm = CPParams()
while(True):
result = api.GetCPParams(c_int(masterId), c_int(slaveId), byref(parm))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [parm.planAcc, parm.juncitionVel, parm.acc, parm.realTimeTrack]
def SetCPCmd(api, cpMode, x, y, z, velocity, isQueued=0):
cmd = CPCmd()
cmd.cpMode = cpMode
cmd.x = x
cmd.y = y
cmd.z = z
cmd.velocity = velocity
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCPCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
return [queuedCmdIndex.value]
def SetCP2Cmd(api, cpMode, x, y, z, isQueued=0):
cmd = CP2Cmd()
cmd.cpMode = cpMode
cmd.x = x
cmd.y = y
cmd.z = z
cmd.velocity = c_float(100)
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCP2Cmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
return [queuedCmdIndex.value]
def SetCPCommonParams(api, velocityRatio, accelerationRatio, isQueued=0):
pbParam = CPCommonParams()
pbParam.velocityRatio = velocityRatio
pbParam.accelerationRatio = accelerationRatio
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetCPCommonParams(api):
pbParam = CPCommonParams()
while(True):
result = api.GetCPCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam ))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.velocityRatio, pbParam.accelerationRatio]
def SetCPLECmd(api, cpMode, x, y, z, power, isQueued=0):
cmd = CPCmd()
cmd.cpMode = cpMode
cmd.x = x
cmd.y = y
cmd.z = z
cmd.velocity = power
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCPLECmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(2)
continue
break
return [queuedCmdIndex.value]
def SetARCParams(api, xyzVelocity, rVelocity, xyzAcceleration, rAcceleration, isQueued=0):
param = ARCParams()
param.xyzVelocity = xyzVelocity
param.rVelocity = rVelocity
param.xyzAcceleration = xyzAcceleration
param.rAcceleration = rAcceleration
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetARCParams(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetARCParams(api):
parm = ARCParams()
while(True):
result = api.GetARCParams(c_int(masterId), c_int(slaveId), byref(parm))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [parm.xyzVelocity, parm.rVelocity, parm.xyzAcceleration, parm.rAcceleration]
def SetARCCmd(api, cirPoint, toPoint, isQueued=0):
cmd = ARCCmd()
cmd.cirPoint.x = cirPoint[0];cmd.cirPoint.y = cirPoint[1];cmd.cirPoint.z = cirPoint[2];cmd.cirPoint.rHead = cirPoint[3]
cmd.toPoint.x = toPoint[0];cmd.toPoint.y = toPoint[1];cmd.toPoint.z = toPoint[2];cmd.toPoint.rHead = toPoint[3]
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetARCCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetCircleCmd(api, cirPoint, toPoint, isQueued=0):
cmd = CircleCmd()
cmd.cirPoint.x = cirPoint[0];cmd.cirPoint.y = cirPoint[1];cmd.cirPoint.z = cirPoint[2];cmd.cirPoint.rHead = cirPoint[3]
cmd.toPoint.x = toPoint[0];cmd.toPoint.y = toPoint[1];cmd.toPoint.z = toPoint[2];cmd.toPoint.rHead = toPoint[3]
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetCircleCmd(c_int(masterId), c_int(slaveId), byref(cmd), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetARCCommonParams(api, velocityRatio, accelerationRatio, isQueued=0):
pbParam = ARCCommonParams()
pbParam.velocityRatio = velocityRatio
pbParam.accelerationRatio = accelerationRatio
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetARCCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetARCCommonParams(api):
pbParam = ARCCommonParams()
while(True):
result = api.GetARCCommonParams(c_int(masterId), c_int(slaveId), byref(pbParam ))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [pbParam.velocityRatio, pbParam.accelerationRatio]
def SetWAITCmd(api, waitTime, isQueued=0):
param = WAITCmd()
param.waitTime = int(waitTime)
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetWAITCmd(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetTRIGCmd(api, address, mode, condition, threshold, isQueued=0):
param = TRIGCmd()
param.address = address
param.mode = mode
param.condition = condition
param.threshold = threshold
queuedCmdIndex = c_uint64(0)
while(True):
result = api.SetTRIGCmd(c_int(masterId), c_int(slaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def SetIOMultiplexing(api, address, multiplex, isQueued=0):
param = IOMultiplexing()
param.address = address
param.multiplex = multiplex
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetIOMultiplexing(c_int(masterId), c_int(tempSlaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetIOMultiplexing(api, addr):
param = IOMultiplexing()
param.address = addr
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetIOMultiplexing(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.multiplex]
def SetIODO(api, address, level, isQueued=0):
param = IODO()
param.address = address
param.level = level
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetIODO(c_int(masterId), c_int(tempSlaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetIODO(api, addr):
param = IODO()
param.address = addr
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetIODO(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.level]
def SetIOPWM(api, address, frequency, dutyCycle, isQueued=0):
param = IOPWM()
param.address = address
param.frequency = frequency
param.dutyCycle = dutyCycle
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetIOPWM(c_int(masterId), c_int(tempSlaveId), byref(param), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def GetIOPWM(api, addr):
param = IOPWM()
param.address = addr
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetIOPWM(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.frequency, param.dutyCycle]
def GetIODI(api, addr):
param = IODI()
param.address = addr
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.GetIODI(c_int(masterId), c_int(tempSlaveId), byref(param))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [param.level]
def SetEMotor(api, index, isEnabled, speed, isQueued=0):
emotor = EMotor()
emotor.index = index
emotor.isEnabled = isEnabled
emotor.speed = speed
queuedCmdIndex = c_uint64(0)
if slaveDevType == DevType.Magician:
tempSlaveId = slaveId
elif masterDevType == DevType.Conntroller and (slaveDevType == DevType.MagicianLite or slaveDevType == DevType.Idle):
tempSlaveId = -1
else:
tempSlaveId = slaveId
while(True):
result = api.SetEMotor(c_int(masterId), c_int(tempSlaveId), byref(emotor), isQueued, byref(queuedCmdIndex))
if result != DobotCommunicate.DobotCommunicate_NoError:
dSleep(5)
continue
break
return [queuedCmdIndex.value]
def | |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from collections import defaultdict
from time import time
from kafka import KafkaAdminClient, KafkaClient
from kafka import errors as kafka_errors
from kafka.protocol.offset import OffsetRequest, OffsetResetStrategy, OffsetResponse
from kafka.structs import TopicPartition
from six import string_types
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from .constants import BROKER_REQUESTS_BATCH_SIZE, CONTEXT_UPPER_BOUND, DEFAULT_KAFKA_TIMEOUT, KAFKA_INTERNAL_TOPICS
from .legacy_0_10_2 import LegacyKafkaCheck_0_10_2
class KafkaCheck(AgentCheck):
"""
Check the offsets and lag of Kafka consumers. This check also returns broker highwater offsets.
For details about the supported options, see the associated `conf.yaml.example`.
"""
__NAMESPACE__ = 'kafka'
def __new__(cls, name, init_config, instances):
"""
Determine whether to use old legacy KafkaClient implementation or the new KafkaAdminClient implementation.
The legacy version of this check uses the KafkaClient and handrolls things like looking up the GroupCoordinator,
crafting the offset requests, handling errors, etc.
The new implementation uses the KafkaAdminClient which lets us offload most of the Kafka-specific bits onto the
kafka-python library, which is used by many other tools and reduces our maintenance burden.
Unfortunately, the KafkaAdminClient requires brokers >= 0.10.0, so we split the check into legacy and new code.
Furthermore, we took the opportunity to simplify the new code by dropping support for:
1) Zookeeper-based offsets. These have been deprecated since Kafka 0.9.
2) Kafka brokers < 0.10.2. It is impossible to support monitor_unlisted_consumer_groups on these older brokers
because they do not provide a way to determine the mapping of consumer groups to topics. For details, see
KIP-88.
To clarify: This check still allows fetching offsets from zookeeper/older kafka brokers, it just uses the
legacy code path.
"""
instance = instances[0] # Tech debt from Agent v5. In Agent v6, instances always contains only one instance
if instance.get('zk_connect_str') is None:
# bury the kafka version check under the zookeeper check because if zookeeper then we should immediately use
# the legacy code path regardless of kafka version
kafka_version = cls._determine_kafka_version(init_config, instance)
if kafka_version >= (0, 10, 2):
return super(KafkaCheck, cls).__new__(cls)
return LegacyKafkaCheck_0_10_2(name, init_config, instances)
def __init__(self, name, init_config, instances):
super(KafkaCheck, self).__init__(name, init_config, instances)
self._context_limit = int(init_config.get('max_partition_contexts', CONTEXT_UPPER_BOUND))
self._custom_tags = self.instance.get('tags', [])
self._monitor_unlisted_consumer_groups = is_affirmative(
self.instance.get('monitor_unlisted_consumer_groups', False)
)
self._monitor_all_broker_highwatermarks = is_affirmative(
self.instance.get('monitor_all_broker_highwatermarks', False)
)
self._consumer_groups = self.instance.get('consumer_groups', {})
self._broker_requests_batch_size = self.instance.get('broker_requests_batch_size', BROKER_REQUESTS_BATCH_SIZE)
self._kafka_client = None
@property
def kafka_client(self):
if self._kafka_client is None:
# if `kafka_client_api_version` is not set, then kafka-python automatically probes the cluster for
# broker version during the bootstrapping process. Note that this returns the first version found, so in
# a mixed-version cluster this will be a non-deterministic result.
kafka_version = self.instance.get('kafka_client_api_version')
if isinstance(kafka_version, str):
kafka_version = tuple(map(int, kafka_version.split(".")))
self._kafka_client = self._create_kafka_admin_client(api_version=kafka_version)
return self._kafka_client
def check(self, instance):
"""The main entrypoint of the check."""
self._consumer_offsets = {} # Expected format: {(consumer_group, topic, partition): offset}
self._highwater_offsets = {} # Expected format: {(topic, partition): offset}
# For calculating consumer lag, we have to fetch both the consumer offset and the broker highwater offset.
# There's a potential race condition because whichever one we check first may be outdated by the time we check
# the other. Better to check consumer offsets before checking broker offsets because worst case is that
# overstates consumer lag a little. Doing it the other way can understate consumer lag to the point of having
# negative consumer lag, which just creates confusion because it's theoretically impossible.
# Fetch Kafka consumer offsets
try:
self._get_consumer_offsets()
except Exception:
self.log.exception("There was a problem collecting consumer offsets from Kafka.")
# don't raise because we might get valid broker offsets
# Fetch the broker highwater offsets
try:
if len(self._consumer_offsets) < self._context_limit:
self._get_highwater_offsets()
else:
self.warning("Context limit reached. Skipping highwater offset collection.")
except Exception:
self.log.exception("There was a problem collecting the highwater mark offsets.")
# Unlike consumer offsets, fail immediately because we can't calculate consumer lag w/o highwater_offsets
raise
total_contexts = len(self._consumer_offsets) + len(self._highwater_offsets)
if total_contexts >= self._context_limit:
self.warning(
"""Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the
check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics
and partitions you wish to monitor.""",
total_contexts,
self._context_limit,
)
# Report the metrics
self._report_highwater_offsets(self._context_limit)
self._report_consumer_offsets_and_lag(self._context_limit - len(self._highwater_offsets))
self._collect_broker_metadata()
def _create_kafka_admin_client(self, api_version):
"""Return a KafkaAdminClient."""
kafka_connect_str = self.instance.get('kafka_connect_str')
# TODO accept None (which inherits kafka-python default of localhost:9092)
# TODO break this out into its own method for validation that can be re-used here and during initial probe
# Have to validate both places becase probe is skipped when kafka_client_api_version is specified
if not isinstance(kafka_connect_str, (string_types, list)):
raise ConfigurationError("kafka_connect_str should be a string or list of strings")
kafka_admin_client = KafkaAdminClient(
bootstrap_servers=kafka_connect_str,
client_id='dd-agent',
request_timeout_ms=self.init_config.get('kafka_timeout', DEFAULT_KAFKA_TIMEOUT) * 1000,
api_version=api_version,
# While we check for SASL/SSL params, if not present they will default to the kafka-python values for
# plaintext connections
security_protocol=self.instance.get('security_protocol', 'PLAINTEXT'),
sasl_mechanism=self.instance.get('sasl_mechanism'),
sasl_plain_username=self.instance.get('sasl_plain_username'),
sasl_plain_password=self.instance.get('sasl_plain_password'),
sasl_kerberos_service_name=self.instance.get('sasl_kerberos_service_name', 'kafka'),
sasl_kerberos_domain_name=self.instance.get('sasl_kerberos_domain_name'),
ssl_cafile=self.instance.get('ssl_cafile'),
ssl_check_hostname=self.instance.get('ssl_check_hostname', True),
ssl_certfile=self.instance.get('ssl_certfile'),
ssl_keyfile=self.instance.get('ssl_keyfile'),
ssl_crlfile=self.instance.get('ssl_crlfile'),
ssl_password=self.instance.get('ssl_password'),
)
self.log.debug("KafkaAdminClient api_version: %s", kafka_admin_client.config['api_version'])
# Force initial population of the local cluster metadata cache
kafka_admin_client._client.poll(future=kafka_admin_client._client.cluster.request_update())
if kafka_admin_client._client.cluster.topics(exclude_internal_topics=False) is None:
raise RuntimeError("Local cluster metadata cache did not populate.")
return kafka_admin_client
def _get_highwater_offsets(self):
"""Fetch highwater offsets for topic_partitions in the Kafka cluster.
Do this for all partitions in the cluster because even if it has no consumers, we may want to measure whether
producers are successfully producing.
If monitor_all_broker_highwatermarks is True, will fetch for all partitions in the cluster. Otherwise highwater
mark offsets will only be fetched for topic partitions where this check run has already fetched a consumer
offset.
Internal Kafka topics like __consumer_offsets, __transaction_state, etc are always excluded.
Any partitions that don't currently have a leader will be skipped.
Sends one OffsetRequest per broker to get offsets for all partitions where that broker is the leader:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI(AKAListOffset)
For speed, all the brokers are queried in parallel using callbacks. The callback flow is:
1. Issue an OffsetRequest to every broker
2. Attach a callback to each OffsetResponse that parses the response and saves the highwater offsets.
"""
highwater_futures = [] # No need to store on object because the callbacks don't create additional futures
# If we aren't fetching all broker highwater offsets, then construct the unique set of topic partitions for
# which this run of the check has at least once saved consumer offset. This is later used as a filter for
# excluding partitions.
if not self._monitor_all_broker_highwatermarks:
tps_with_consumer_offset = {(topic, partition) for (_, topic, partition) in self._consumer_offsets}
for batch in self.batchify(self.kafka_client._client.cluster.brokers(), self._broker_requests_batch_size):
for broker in batch:
broker_led_partitions = self.kafka_client._client.cluster.partitions_for_broker(broker.nodeId)
if broker_led_partitions is None:
continue
# Take the partitions for which this broker is the leader and group them by topic in order to construct
# the OffsetRequest while simultaneously filtering out partitions we want to exclude
partitions_grouped_by_topic = defaultdict(list)
for topic, partition in broker_led_partitions:
# No sense fetching highwater offsets for internal topics
if topic not in KAFKA_INTERNAL_TOPICS and (
self._monitor_all_broker_highwatermarks or (topic, partition) in tps_with_consumer_offset
):
partitions_grouped_by_topic[topic].append(partition)
# Construct the OffsetRequest
max_offsets = 1
request = OffsetRequest[0](
replica_id=-1,
topics=[
(topic, [(partition, OffsetResetStrategy.LATEST, max_offsets) for partition in partitions])
for topic, partitions in partitions_grouped_by_topic.items()
],
)
highwater_future = self.kafka_client._send_request_to_node(node_id=broker.nodeId, request=request)
highwater_future.add_callback(self._highwater_offsets_callback)
highwater_futures.append(highwater_future)
# Loop until all futures resolved.
self.kafka_client._wait_for_futures(highwater_futures)
def _highwater_offsets_callback(self, response):
"""Callback that parses an OffsetFetchResponse and saves it to the highwater_offsets dict."""
if type(response) not in OffsetResponse:
raise RuntimeError("response type should be OffsetResponse, but instead was %s." % type(response))
for topic, partitions_data in response.topics:
for partition, error_code, offsets in partitions_data:
error_type = kafka_errors.for_code(error_code)
if error_type is kafka_errors.NoError:
self._highwater_offsets[(topic, partition)] = offsets[0]
elif error_type is kafka_errors.NotLeaderForPartitionError:
self.log.warning(
"Kafka broker returned %s (error_code %s) for topic %s, partition: %s. This should only happen "
"if the broker that was the partition leader when kafka_admin_client last fetched metadata is "
"no longer the leader.",
error_type.message,
error_type.errno,
topic,
partition,
)
self.kafka_client._client.cluster.request_update() # force metadata update on next poll()
elif error_type is kafka_errors.UnknownTopicOrPartitionError:
self.log.warning(
"Kafka broker returned %s (error_code %s) for topic: %s, partition: %s. This | |
<filename>nzekenovhw8.py
import tkinter
from tkinter import messagebox
import socket
def StartConnection (IPAddress, PortNumber):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.connect((IPAddress,PortNumber))
return server
#this function moves binary values to the left (part of hashing)
def leftrotate(x,c):
return (x << c)&0xFFFFFFFF | (x >> (32-c)&0x7FFFFFFF>>(32-c))
#sends username and receives challenge from server, then breaks alltogether into list
def makeHash(s,username,password):
#send a command to server
s.send(b"LOGIN " + bytes(username, "utf-8")+ b"\n")
#receives challenge from the server
message = s.recv(500)
message = str(message,'utf-8')
message = message.split()[2]
#makes a text in given format
message = password+message
block = message
block += "1"
while len(block)!=509:
block = block + "0"
block += str(len(message)//100)
block += str(len(message)//10)
block += str(len(message)%10)
#splits the text and stores it ASCII values
M=[]
for i in range(16):
strSum = 0
for j in range(32):
strSum += ord(block[i*32:((i+1)*32)][j])
M.append(strSum)
#returns list of ASCII values
return M
#store values of S (used for hashing)
def sValue():
S = []
S[0:15] = [7,12,17,22,7,12,17,22,7,12,17,22,7,12,17,22]
S[16:31] = [5,9,14,20,5,9,14,20,5,9,14,20,5,9,14,20]
S[32:47] = [4,11,16,23,4,11,16,23,4,11,16,23,4,11,16,23]
S[48:63] = [6,10,15,21,6,10,15,21,6,10,15,21,6,10,15,21]
return S
#store values of K (used for hashing)
def kValue():
K = []
K[0:3] = [0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee]
K[4:7] = [0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501]
K[8:11] = [0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be]
K[12:15] = [0x6b901122,0xfd987193,0xa679438e,0x49b40821]
K[16:19] = [0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa]
K[20:23] = [0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8]
K[24:27] = [0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed]
K[28:31] = [0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a]
K[32:35] = [0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c]
K[36:39] = [0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70]
K[40:43] = [0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05]
K[44:47] = [0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665]
K[48:51] = [0xf4292244,0x432aff97,0xab9423a7,0xfc93a039]
K[52:55] = [0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1]
K[56:59] = [0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1]
K[60:63] = [0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391]
return K
#store values of A,B,C,D,a0,b0,c0,d0 (used for hashing)
def abcdValues():
a0 = 0x67452301
b0 = 0xefcdab89
c0 = 0x98badcfe
d0 = 0x10325476
A = a0
B = b0
C = c0
D = d0
return a0,b0,c0,d0,A,B,C,D
#this is the loop where part of hashing goes on
def hashingLoop(A,B,C,D,K,M,S):
for i in range(64):
if 0<=i and i<=15:
F = (B & C) | ((~B) & D)
F = F & 0xFFFFFFFF
g = i
elif 16<=i and i<=31:
F = (D & B) | ((~D) & C)
F = F & 0xFFFFFFFF
g = (5*i + 1) % 16
elif 32<=i and i<=47:
F = B ^ C ^ D
F = F & 0xFFFFFFFF
g = (3*i + 5) % 16
elif 48<=i and i<=63:
F = C ^ (B | (~D))
F = F & 0xFFFFFFFF
g = (7*i) % 16
dTemp = D
D = C
C = B
B = B + leftrotate((A+F+K[i]+M[g]),S[i])
B = B & 0xFFFFFFFF
A = dTemp
return A,B,C,D
#logs in the server
def login (s, username, password):
M = makeHash(s,username,password)
#things that are not my level
S = sValue()
K = kValue()
a0,b0,c0,d0,A,B,C,D = abcdValues()
A,B,C,D = hashingLoop(A,B,C,D,K,M,S)
a0 = (a0 + A) & 0xFFFFFFFF
b0 = (b0 + B) & 0xFFFFFFFF
c0 = (c0 + C) & 0xFFFFFFFF
d0 = (d0 + D) & 0xFFFFFFFF
result = str(a0)+str(b0)+str(c0)+str(d0)
#end of hashing
#sends login and hashed password+challenge to server
s.send(b"LOGIN " + bytes(username+" "+result, "utf-8")+ b"\n")
#retrieve answer from server
message = s.recv(100)
message = str(message,'utf-8')
#if successfully logged in, welcome, else try again
if message[0:16] == "Login Successful":
return True
else:
return False
#retrieve the list of users of chat
def getUsers(s):
#sends request for the list of users
s.send(b"@users \n")
#retrieve the number of chars in answer from server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
#receive list of users and do some format work
message = s.recv(size-6)
message = str(message, "utf-8")
message = message.split("@")[3:]
return message
#retrieve the list of friends in chat
def getFriends(s):
#sends request for the list of friends
s.send(b"@friends \n")
#retrieve the number of chars in answer from server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
#receive list of friends and do some format work
message = s.recv(size-6)
message = str(message, "utf-8")
message = message.split("@")[3:]
return message
#sends request for being friends in chat
def sendFriendRequest(s, friend):
#calculate the size of request message
l = 22+len(friend)
size = str(l//10000)+str(l//1000)+str(l//100)+str(l//10)+str(l%10)
#send a request with its length on the beginning
s.send(b"@"+bytes(size,"utf-8")+b"@request@friend@"+bytes(friend,"utf-8")+b"\n")
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
#show whether added successfully or not
if message[1:3]=="ok":
return True
return False
#accepts request for being friends in chat
def acceptFriendRequest(s, friend):
#calculate the size of accepting message
l = 22+len(friend)
size = str(l//10000)+str(l//1000)+str(l//100)+str(l//10)+str(l%10)
#send a message to accept with its length on the beginning and shown username
s.send(b"@"+bytes(size,"utf-8")+b"@accept@friend@"+bytes(friend,"utf-8")+ b"\n")
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
#show whether accepted successfully or not
if message[1:3]=="ok":
return True
return False
#sends your message to friend
def sendMessage(s, friend, message):
#calculate the size of your message to server
l = 17+len(friend)+len(message)
size = str(l//10000)+str(l//1000)+str(l//100)+str(l//10)+str(l%10)
byteString = bytes(friend,"utf-8")+b"@"+bytes(message,"utf-8")+ b"\n"
#send a message to server to send a message to friend
s.send(b"@"+bytes(size,"utf-8")+b"@sendmsg@"+byteString)
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
#show whether sent successfully or not
if message[1:3]=="ok":
return True
return False
def sendFile(s, friend, filename):
#opens file that needs to be sent
with open(filename,'r') as file:
a = file.read()
#calculate the size of your message to server
l = 18+len(friend)+len(filename)+len(a)
size = str(l//10000)+str(l//1000)+str(l//100)+str(l//10)+str(l%10)
bytedString = b"@"+bytes(filename,"utf-8")+b"@"+bytes(a,"utf-8")+b"\n"
#send a message to server to send a file to friend
s.send(b"@"+bytes(size,"utf-8")+b"@sendfile@"+bytes(friend,"utf-8")+bytedString)
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
#show whether sent successfully or not
if message[1:3]=="ok":
return True
return False
def getRequests(s):
#send a message to server to return requests
s.send(b"@rxrqst \n")
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
message = message.split("@")[2:]
#returns the list of requests
return message
def getNumber(s):
s.send(b"@rxmsg \n")
#receive an answer from the server
message = s.recv(6)
message = str(message, "utf-8")
message = message[1:]
size = int(message)
message = s.recv(size-6)
message = str(message, "utf-8")
#gets number of messages and files
number = int(message[1])
message = message.split("@")[2:]
return message,number
def checkMail(s):
message,number = getNumber(s)
return number,message
#returns all inbox files and messages
def getMail(s,message,number):
messages = []
files = []
#sort messages and files
for i in range(number):
if message[0] == "msg":
#make username,message tuple
a = (message[1],message[2])
messages.append(a)
message = message[3:]
elif message[0] == "file":
#make username,filename tuple
a = (message[1],message[2])
files.append(a)
file = message[3]
#save files in same folder
with open(message[2],"x") as fileout:
fileout.write(file)
fileout.close
message = message[4:]
#returns list received of messages and files
return (messages,files)
class loginWnd():
def __init__(self,root,socket):
self.root = root
self.socket = socket
self.mainFrame = tkinter.Frame(root)
self.mainFrame.grid(sticky = "wens")
self.lbl1 = tkinter.Label(self.mainFrame, text = "Login")
self.lbl2 = tkinter.Label(self.mainFrame, text = "Password")
self.btn1 = tkinter.Button(self.mainFrame, text="OK",command=self.bp)
self.box1 = tkinter.Entry(self.mainFrame)
self.box2 = tkinter.Entry(self.mainFrame, show = "*")
self.lbl1.grid(row = 0, column = 0)
self.lbl2.grid(row = 1, column = 0)
self.box1.grid(row = 0, column = 1)
self.box2.grid(row = 1, column = 1)
self.btn1.grid(row = 2, column = 1)
def bp(self):
#closes window if incorrect password/username entered
while not login (self.socket, self.box1.get(), self.box2.get()):
self.root.destroy()
#logs in
clogin = self.box1.get()
self.root.destroy()
self.root = tkinter.Tk()
self.root.title("Chat Client")
self.app = listsWnd(self.root,socket,clogin)
self.root.mainloop()
class listsWnd():
def __init__(self,root,socket,mylogin):
self.wnd = {}
self.root = root
self.socket = socket
self.mylogin = mylogin
self.mainFrame = tkinter.Frame(root)
self.mainFrame.grid(sticky = "wens")
self.lbl1 = tkinter.Label(self.mainFrame, text = "All Users")
self.lbl2 = tkinter.Label(self.mainFrame, text = "Your Friends")
self.lbl3 = tkinter.Label(self.mainFrame, text = "Pending Requests")
self.listbox1_entries = getUsers(self.socket)
self.listbox1_widget = tkinter.Listbox(self.mainFrame)
self.listbox2_entries = getFriends(self.socket)
self.windows = {}
for friend in self.listbox2_entries:
self.windows[friend] = False
self.listbox2_widget = tkinter.Listbox(self.mainFrame)
self.listbox3_entries = getRequests(self.socket)
self.listbox3_widget = tkinter.Listbox(self.mainFrame)
self.btn1 = tkinter.Button(self.mainFrame, text = "Send Request",command = self.sndRqst)
self.btn2 = tkinter.Button(self.mainFrame, text = "Start Chat" , command = self.strtCht)
self.btn3 = tkinter.Button(self.mainFrame, text = "Accept Request", command = self.accptRqst)
for user in self.listbox1_entries:
self.listbox1_widget.insert(tkinter.END, user)
for friend in | |
<reponame>virdesai/stock-analysis-engine
"""
TA-Lib wrappers
"""
# for unittests, allow passing the mocks into the runtime if not found
try:
import talib as ta
except Exception:
import analysis_engine.mocks.mock_talib as ta
# end of loading talib or mocks
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
"""
Overlap
https://mrjbq7.github.io/ta-lib/func_groups/overlap_studies.html
"""
def BBANDS(
close,
timeperiod=5,
nbdevup=2,
nbdevdn=2,
matype=0,
verbose=False):
"""BBANDS
Wrapper for ta.BBANDS for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
(upperband,
middleband,
lowerband) = BBANDS(
close,
timeperiod=5,
nbdevup=2,
nbdevdn=2,
matype=0)
:return: upperband, middleband, lowerband
:param close: close prices
:param timeperiod: number of values
(default is ``5``)
:param nbdevup: float - standard deviation
to set the upper band
(default is ``2``)
:param nbdevdn: float - standard deviation
to set the lower band
(default is ``2``)
:param matype: moving average type
(default is ``0`` simple moving average)
:param verbose: show logs
"""
if verbose:
log.info(
'bbands - start')
return ta.BBANDS(
close,
timeperiod=timeperiod,
nbdevup=nbdevup,
nbdevdn=nbdevdn,
matype=matype)
# end of BBANDS
def EMA(
close,
timeperiod=30,
verbose=False):
"""EMA
Wrapper for ta.EMA for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = EMA(
close,
timeperiod=30)
:return: float
:param close: close prices
:param timeperiod: number of values
(default is ``5``)
:param verbose: show logs
"""
if verbose:
log.info(
'ema - start')
return ta.EMA(
close,
timeperiod=timeperiod)
# end of EMA
def WMA(
close,
timeperiod=30,
verbose=False):
"""WMA
Wrapper for ta.WMA for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = WMA(
close,
timeperiod=30)
:return: float
:param close: close prices
:param timeperiod: number of values
(default is ``5``)
:param verbose: show logs
"""
if verbose:
log.info(
'ema - start')
return ta.WMA(
close,
timeperiod=timeperiod)
# end of WMA
"""
Momentum
https://mrjbq7.github.io/ta-lib/func_groups/momentum_indicators.html
"""
def ADX(
high=None,
low=None,
close=None,
timeperiod=14,
verbose=False):
"""ADX
Wrapper for ta.ADX for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = ADX(
high,
low,
close,
timeperiod=14)
:param high: high list
:param low: low list
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'adx - start')
return ta.ADX(
high,
low,
close,
timeperiod)
# end of ADX
def MACD(
close=None,
fast_period=12,
slow_period=26,
signal_period=9,
verbose=False):
"""MACD
Wrapper for ta.MACD for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
(macd,
macdsignal,
macdhist) = MACD(
close,
fastperiod=12,
slowperiod=26,
signalperiod=9)
:param value: list of values
(default ``closes``)
:param fast_period: integer fast
line
:param slow_period: integer slow
line
:param signal_period: integer signal
line
:param verbose: show logs
"""
if verbose:
log.info(
'macd - start')
return ta.MACD(
close,
fastperiod=fast_period,
slowperiod=slow_period,
signalperiod=signal_period)
# end of MACD
def MFI(
high=None,
low=None,
close=None,
volume=None,
timeperiod=None,
verbose=False):
"""MFI
Wrapper for ta.MFI for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = MFI(
high,
low,
close,
volume,
timeperiod=14)
:param high: high list
:param low: low list
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'mfi - start')
return ta.MFI(
high,
low,
close,
volume,
timeperiod)
# end of MFI
def MOM(
close=None,
timeperiod=None,
verbose=False):
"""MOM
Wrapper for ta.MOM for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = MOM(
close,
timeperiod=10)
:param high: high list
:param low: low list
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'mom - start')
return ta.MOM(
close,
timeperiod)
# end of MOM
def ROC(
close=None,
timeperiod=None,
verbose=False):
"""ROC
Wrapper for ta.ROC for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = ROC(
close,
timeperiod=10)
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'roc - start')
return ta.ROC(
close,
timeperiod)
# end of ROC
def RSI(
close=None,
timeperiod=None,
verbose=False):
"""RSI
Wrapper for ta.RSI for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = RSI(
close,
timeperiod=14)
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'rsi - start')
return ta.RSI(
close,
timeperiod)
# end of RSI
def STOCH(
high=None,
low=None,
close=None,
fastk_period=None,
slowk_period=None,
slowk_matype=None,
slowd_period=None,
slowd_matype=0,
verbose=False):
"""STOCH
Wrapper for ta.STOCH for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
slowk, slowd = STOCH(
high,
low,
close,
fastk_period=5,
slowk_period=3,
slowk_matype=0,
slowd_period=3,
slowd_matype=0)
:param high: list of high values
:param low: list of low values
:param close: list of close values
:param fastk_period: integer num
of fast k sticks
:param slowk_period: integer num
of slow k sticks
:param slowk_matype: integer moving
average
(default is ``0``)
:param slowd_period: integer num
of slow d sticks
:param slowd_matype: integer moving
average
(default is ``0``)
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'stoch - start')
return ta.STOCH(
high=high,
low=low,
close=close,
fastk_period=fastk_period,
slowk_period=slowk_period,
slowk_matype=slowk_matype,
slowd_period=slowd_period,
slowd_matype=slowd_matype)
# end of STOCH
def STOCHF(
high=None,
low=None,
close=None,
fastk_period=None,
fastd_period=None,
fastd_matype=0,
verbose=False):
"""STOCHF
Wrapper for ta.STOCHF for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
fastk, fastd = STOCHF(
high,
low,
close,
fastk_period=5,
fastd_period=3,
fastd_matype=0)
:param high: list of high values
:param low: list of low values
:param close: list of close values
:param fastk_period: integer num
of fast k sticks
:param fastd_period: integer num
of fast d sticks
:param fastd_matype: integer moving
average
(default is ``0``)
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'stoch - start')
return ta.STOCHF(
high=high,
low=low,
close=close,
fastk_period=fastk_period,
fastd_period=fastd_period,
fastd_matype=fastd_matype)
# end of STOCHF
def WILLR(
high=None,
low=None,
close=None,
timeperiod=None,
verbose=False):
"""WILLR
Wrapper for ta.WILLR for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = WILLR(
high,
low,
close,
timeperiod=14)
:param high: high list
:param low: low list
:param close: close list
:param timeperiod: number of values
in ``high``, ``low`` and ``close``
:param verbose: show logs
"""
if verbose:
log.info(
'willr - start')
return ta.WILLR(
high,
low,
close,
timeperiod)
# end of WILLR
"""
Volume
https://mrjbq7.github.io/ta-lib/func_groups/volume_indicators.html
"""
def Chaikin(
high=None,
low=None,
close=None,
volume=None,
verbose=False):
"""Chaikin
Wrapper for ta.AD for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = AD(
high,
low,
close,
volume)
:param value: list of values
(default should be ``close``)
:param volume: list of volume values
:param verbose: show logs
"""
if verbose:
log.info(
'chaikin - start')
return ta.AD(
high,
low,
close,
volume)
# end of Chaikin
def ChaikinADOSC(
high=None,
low=None,
close=None,
volume=None,
fast_period=3,
slow_period=10,
verbose=False):
"""ChaikinADOSC
Wrapper for ta.ADOSC for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = ADOSC(
high,
low,
close,
volume,
fastperiod=3,
slowperiod=10)
:param value: list of values
(default should be ``close``)
:param volume: list of volume values
:param verbose: show logs
"""
if verbose:
log.info(
'chaikinadosc - start')
return ta.ADOSC(
high,
low,
close,
volume,
fast_period,
slow_period)
# end of ChaikinADOSC
def OBV(
value=None,
volume=None,
verbose=False):
"""OBV
Wrapper for ta.OBV for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = OBV(
close,
volume)
:param value: list of values
(default should be ``close``)
:param volume: list of volume values
:param verbose: show logs
"""
if verbose:
log.info(
'obv - start')
return ta.OBV(
value,
volume)
# end of OBV
"""
Volume
https://mrjbq7.github.io/ta-lib/func_groups/volatility_indicators.html
"""
def ATR(
high=None,
low=None,
close=None,
timeperiod=None,
verbose=False):
"""ATR
Wrapper for ta.ATR for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = ATR(
high,
low,
close,
timeperiod=14)
:param value: list of values
(default should be ``close``)
:param volume: list of volume values
:param verbose: show logs
"""
if verbose:
log.info(
'atr - start')
return ta.ATR(
high,
low,
close,
timeperiod=timeperiod)
# end of ATR
def NATR(
high=None,
low=None,
close=None,
timeperiod=None,
verbose=False):
"""NATR
Wrapper for ta.NATR for running unittests
on ci/cd tools that do not provide talib
.. code-block:: python
real = NATR(
high,
low,
| |
# python3 -m pip install isort autoflake astpretty black
# requires python3.9 to run
import os
import argparse
import ast
import subprocess
import multiprocessing
from pathlib import Path
import astpretty
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_dir", type=str, default="python",
)
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--debug", "-d", action="store_true")
parser.add_argument("--autoflake", "-a", action="store_true")
parser.add_argument("--black", "-b", action="store_true")
parser.add_argument("--isort", "-i", action="store_true")
parser.add_argument("--license", "-l", action="store_true")
parser.add_argument("--ast", action="store_true")
args = parser.parse_args()
OUT_PATH = Path(args.out_dir)
SHOULD_SAVE_AST = args.ast
COMPATIBLE_MODULE = "oneflow.compatible.single_client"
def dumpprint(node):
astpretty.pprint(node)
def is_decorator(d, name=None):
return (isinstance(d, ast.Name) and d.id == name) or (
isinstance(d, ast.Call) and isinstance(d.func, ast.Name) and d.func.id == name
)
def is_stable(node: ast.AST):
for d in node.decorator_list:
if is_decorator(d, "stable_api"):
return True
return False
def is_experimental(node: ast.AST):
for d in node.decorator_list:
if is_decorator(d, "experimental_api"):
return True
return False
def get_parent_module(value):
return ".".join(value.split(".")[0:-1])
def join_module(*args):
return ".".join([m for m in args if m])
def path_from_module(module, is_init=False):
if is_init:
return Path("/".join(module.split(".") + ["__init__.py"]))
else:
return Path("/".join(module.split(".")) + ".py")
def module_from_path(path: Path):
assert path.name.endswith(".py")
parts = path.parts
if parts[-1] == "__init__.py":
return ".".join(path.parts[0:-1])
else:
return ".".join(path.parts)[0:-3]
def is_compatible_root_module(module: str):
if module == COMPATIBLE_MODULE:
return True
assert module == "oneflow"
return False
class ReservedKeywordsVisitor(ast.NodeVisitor):
def __init__(self, keywords=None) -> None:
self.keywords = keywords
self.has_reserved_keyword = False
def visit_Name(self, node: ast.Name):
if node.id in self.keywords:
self.has_reserved_keyword = True
def replace_filename(name: str):
return name.replace("name_scope", "namescope")
def replace_str(name: str):
name = replace_filename(name)
name = name.replace("lib.core", "support")
name = name.replace("compatible.single_client.core", "core")
name = name.replace("enable_typing_check", "typing_check")
if name.startswith("oneflow.python."):
return name.replace("oneflow.python.", "oneflow.")
elif name == "oneflow.python":
return "oneflow"
elif "single_client.python." in name or name.endswith("single_client.python"):
return name.replace("single_client.python", "single_client")
else:
return name
class ExportVisitor(ast.NodeTransformer):
def __init__(self, root_module="oneflow", src_target_module: str = None) -> None:
super().__init__()
self.staging_decorators = []
self.root_module = root_module
self.export_modules = {}
self.top_imports = []
self.src_target_module = src_target_module
def append_export(self, target_module=None, node=None):
if target_module not in self.export_modules:
module = ast.Module(body=[], type_ignores=[])
self.export_modules[target_module] = module
else:
module = self.export_modules[target_module]
if isinstance(node, list):
module.body += node
else:
module.body.append(node)
def visit_Expr(self, node):
if isinstance(node.value, ast.Constant) and isinstance(node.value.value, str):
if "Copyright 2020 The OneFlow Authors" in node.value.value:
return None
return node
def visit_ImportFrom(self, node):
for name in node.names:
if isinstance(name, ast.alias) and name.name == "oneflow_deprecate":
return ast.ImportFrom(
module="oneflow",
names=[ast.alias(name="oneflow_deprecate")],
level=0,
)
for name in node.names:
if not self.visit(name):
return None
if node.module:
if node.module == "__future__" or "oneflow_export" in node.module:
return None
node.module = replace_str(node.module)
self.top_imports.append(node)
return node
def visit_Import(self, node):
for name in node.names:
if not super().visit(name):
return None
self.top_imports.append(node)
return node
def visit_alias(self, node: ast.alias) -> ast.alias:
if node.name.startswith("oneflow.python."):
node.name = replace_str(node.name)
return node
elif node.name == "oneflow_export":
return None
elif "__export_symbols__" in node.name:
return None
else:
node.name = replace_str(node.name)
return node
def visit_Name(self, node: ast.AST):
if node.id == "oneflow_export":
return None
if node.id == "stable_api":
return None
if node.id == "experimental_api":
return None
return node
def visit_Call(self, node: ast.AST):
if not self.visit(node.func):
return None
return node
def visit_ClassDef(self, node):
node.body = [self.visit(n) for n in node.body]
return self.visit_FunctionDef(node)
def visit_FunctionDef(self, node):
is_compatible_and_experimental = is_compatible_root_module(
self.root_module
) and is_experimental(node)
if not is_compatible_root_module(self.root_module) and is_stable(node):
return None
compact_decorator_list = [self.visit(d) for d in node.decorator_list]
compact_decorator_list = [d for d in compact_decorator_list if d]
node.body = [self.visit(n) for n in node.body]
rkv = ReservedKeywordsVisitor(keywords=set({"int", "float"}))
rkv.visit(node)
has_reserved_keyword = rkv.has_reserved_keyword
is_deprecated = False
for d in node.decorator_list:
if is_decorator(d, name="oneflow_deprecate"):
is_deprecated = True
for d in node.decorator_list:
if is_decorator(d, name="register_tensor_op"):
import_src = ast.parse(f"import {self.src_target_module}")
self.append_export(target_module=self.root_module, node=import_src)
if is_decorator(d, name="oneflow_export"):
is_kept_in_src = (
True
or has_reserved_keyword
or self.src_target_module == target_module
or target_module in ["oneflow", "oneflow.scope", COMPATIBLE_MODULE]
)
arg0 = d.args[0]
experimental_module = None
if is_compatible_and_experimental:
experimental_module = "experimental"
target_module0 = join_module(
self.root_module, experimental_module, get_parent_module(arg0.value)
)
target_symbol0 = arg0.value.split(".")[-1]
if ".".join([target_module0, target_symbol0]) == self.src_target_module:
raise ValueError(
"[colition][both func and module]", self.src_target_module
)
if is_kept_in_src:
target_module = self.src_target_module
target_symbol = node.name
else:
target_module = target_module0
target_symbol = target_symbol0
# nth export: import from first export
for argN in d.args[1::]:
target_moduleN = join_module(
self.root_module,
experimental_module,
get_parent_module(argN.value),
)
target_nameN = argN.value.split(".")[-1]
assert arg0 != argN, {"arg0": arg0, "argN": argN}
import_from_first_export = ast.ImportFrom(
module=target_module,
names=[ast.alias(name=target_symbol, asname=target_nameN),],
level=0,
)
self.append_export(
target_module=target_moduleN, node=import_from_first_export
)
if is_deprecated:
import_oneflow_deprecate = ast.ImportFrom(
module="oneflow",
names=[ast.alias(name="oneflow_deprecate")],
level=0,
)
node.decorator_list = compact_decorator_list
if is_kept_in_src:
asname = target_symbol0
if node.name == target_symbol0:
asname = None
if target_module0 == target_module and node.name == target_symbol0:
# print("[skip]", target_module0, target_symbol0)
pass
else:
import_from_src = ast.ImportFrom(
module=self.src_target_module,
names=[ast.alias(name=node.name, asname=asname),],
level=0,
)
self.append_export(
target_module=target_module0, node=import_from_src
)
if is_deprecated:
return [import_oneflow_deprecate, node]
else:
return node
else:
if is_deprecated:
self.append_export(
target_module=target_module, node=import_oneflow_deprecate
)
# prepend imports in target module
self.append_export(
target_module=target_module, node=self.top_imports
)
if target_module != "oneflow":
import_star_from_src = ast.ImportFrom(
module=self.src_target_module,
names=[ast.alias(name="*")],
level=0,
)
# node.body.insert(0, import_star_from_src)
self.append_export(
target_module=target_module, node=import_star_from_src
)
# save func name for src import as before modifing node.name
src_asname = None
if node.name != target_symbol:
src_asname = node.name
# save first export in target module
node.name = target_symbol
self.append_export(target_module=target_module, node=node)
# src: import from first export
return ast.ImportFrom(
module=target_module,
names=[ast.alias(name=target_symbol, asname=src_asname),],
level=0,
)
if is_decorator(d, name="oneflow_export_value"):
assert len(node.body) == 2
assert len(d.args) == 1
target_module = join_module(
self.root_module, get_parent_module(d.args[0].value)
)
call = node.body[1].value
assign = ast.Assign(
targets=[
ast.Name(id=d.args[0].value.split(".")[-1], ctx=ast.Store())
],
value=call,
)
self.append_export(target_module=target_module, node=assign)
# TODO: the doc is not dumped properly
# doc = node.body[0]
# self.append_export(target_module=target_module, node=doc)
return None
node.decorator_list = compact_decorator_list
return node
class SrcFile:
def __init__(self, spec) -> None:
is_test = "is_test" in spec and spec["is_test"]
self.export_visitor = None
self.tree = None
self.dst = Path(spec["dst"])
self.src: Path = spec["src"]
self.target_module = module_from_path(self.dst)
self.target_module = replace_str(self.target_module)
if is_test and args.verbose:
print("[skip test]", self.src)
else:
txt = self.src.read_text()
self.tree = ast.parse(txt)
root_module = "oneflow"
if (
"compatible_single_client_python" in self.src.parts
or self.src.name == "single_client_init.py"
or self.src.name == "single_client_main.py"
):
root_module = COMPATIBLE_MODULE
self.export_visitor = ExportVisitor(
root_module=root_module, src_target_module=self.target_module
)
self.export_visitor.visit(self.tree)
if self.target_module == root_module:
self.export_visitor.append_export(
target_module=root_module,
node=ast.parse(f"from . import distributed"),
)
if self.target_module == "oneflow":
self.export_visitor.append_export(
target_module=root_module,
node=ast.parse(f"from . import saved_model"),
)
else:
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client",
node=ast.parse(
f"from . import env, scope, math, optimizer, losses, config, layers, summary, random, typing, train, data, profiler, sysconfig, checkpoint, distribute, util, model, image, tensorrt, saved_model, regularizers"
),
)
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client.experimental",
node=ast.parse(
f"""from . import scope
from oneflow.compatible.single_client import unittest
"""
),
)
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client.deprecated",
node=ast.parse(f"from . import nn"),
)
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client.config",
node=ast.parse(f"from . import collective_boxing"),
)
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client.unittest",
node=ast.parse(f"from . import env"),
)
self.export_visitor.append_export(
target_module="oneflow.compatible.single_client.optimizer",
node=ast.parse(
f"from . import warmup, loss_scale, grad_clipping"
),
)
# self.export_visitor.append_export(
# target_module=".".join([root_module, "lib.core"]), node=ast.parse(f"from . import async_util")
# )
def get_specs_under_python(python_path=None, dst_path=None):
specs = []
for p in Path(python_path).rglob("*.py"):
if p.name == "version.py":
continue
rel = p.relative_to(python_path)
dst = Path(dst_path).joinpath(rel)
dst = Path(replace_filename(str(dst)))
spec = {"src": p, "dst": dst}
if rel.parts[0] == "test":
spec["is_test"] = True
specs.append(spec)
return specs
def get_files():
srcs = (
get_specs_under_python(python_path="oneflow/python", dst_path="oneflow")
+ get_specs_under_python(
python_path="oneflow/compatible_single_client_python",
dst_path="oneflow/compatible/single_client",
)
+ [
{"src": Path("oneflow/init.py"), "dst": "oneflow/__init__.py"},
{"src": Path("oneflow/__main__.py"), "dst": "oneflow/__main__.py"},
{
"src": Path("oneflow/single_client_init.py"),
"dst": "oneflow/compatible/single_client/__init__.py",
},
{
"src": Path("oneflow/single_client_main.py"),
"dst": "oneflow/compatible/single_client/__main__.py",
},
]
)
srcs = list(filter(lambda x: ("oneflow_export" not in x["src"].name), srcs))
if args.debug:
srcs = [
{
"src": Path("oneflow/python/ops/nn_ops.py"),
"dst": "oneflow/ops/nn_ops.py",
},
{
"src": Path("oneflow/python/advanced/distribute_ops.py"),
"dst": "oneflow/advanced/distribute_ops.py",
},
]
pool = multiprocessing.Pool()
srcs = pool.map(SrcFile, srcs,)
pool.close()
return srcs
class ModuleNode:
def __init__(self, name=None, parent=None) -> None:
self.children = dict()
self.parent = parent
self.level = 0
if parent:
self.level = parent.level + 1
self.name = name
def add_or_get_child(self, name):
if name in self.children:
return self.children[name]
else:
self.children[name] = ModuleNode(name=name, parent=self)
return self.children[name]
@property
def is_leaf(self):
return len(self.children.keys()) == 0
def walk(self, cb):
cb(self)
for child in self.children.values():
child.walk(cb)
@property
def leafs(self):
ret = []
def add_leafs(node: ModuleNode):
if node.is_leaf:
ret.append(node)
self.walk(add_leafs)
return ret
@property
def full_name(self):
current_parent = self
ret = self.name
while current_parent.parent:
current_parent = current_parent.parent
ret = current_parent.name + "." + ret
return ret
def __str__(self) -> str:
return "\n".join(
[f"{self.full_name}"]
+ [child.__str__() for child in self.children.values()]
)
@staticmethod
def add_sub_module(root=None, module=None):
parts = module.split(".")
current_node = root
assert current_node.name == parts[0]
for part in parts[1::]:
current_node = current_node.add_or_get_child(part)
def save_trees(args=None):
dst: Path = args["dst"]
trees = args["trees"]
dst_full = OUT_PATH.joinpath(dst)
dst_full.parent.mkdir(parents=True, exist_ok=True)
dst_full.touch(exist_ok=False)
# TODO: append "doctest.testmod(raise_on_error=True)"
trees = [ast.fix_missing_locations(tree) for tree in trees]
if SHOULD_SAVE_AST:
new_txt = "\n".join([str(astpretty.pformat(tree)) for tree in trees])
new_txt = f"""from ast import *
{new_txt}
"""
dst_full.with_suffix(".ast.py").write_text(new_txt)
new_txt = ""
if | |
struct nx_action_sample */
NXAST_SET_MPLS_LABEL = 30, # /* struct nx_action_ttl */
NXAST_SET_MPLS_TC = 31 # /* struct nx_action_ttl */
)
nx_stats_subtype = enum('nx_stats_subtype', globals(), uint32,
NXST_FLOW = 0,
NXST_AGGREGATE = 1,
NXST_FLOW_MONITOR = 2
)
def create_ofs_nbits(ofs, n_bits):
return (ofs << 6) | (n_bits - 1)
'''
/* NXAST_MULTIPATH: Multipath link choice algorithm to apply.
*
* In the descriptions below, 'n_links' is max_link + 1. */
'''
nx_mp_algorithm = enum('nx_mp_algorithm', globals(), uint16,
#===========================================================================
# /* link = hash(flow) % n_links.
# *
# * Redistributes all traffic when n_links changes. O(1) performance. See
# * RFC 2992.
# *
# * Use UINT16_MAX for max_link to get a raw hash value. */
#===========================================================================
NX_MP_ALG_MODULO_N = 0,
#===========================================================================
# /* link = hash(flow) / (MAX_HASH / n_links).
# *
# * Redistributes between one-quarter and one-half of traffic when n_links
# * changes. O(1) performance. See RFC 2992.
# */
#===========================================================================
NX_MP_ALG_HASH_THRESHOLD = 1,
#===========================================================================
# /* for i in [0,n_links):
# * weights[i] = hash(flow, i)
# * link = { i such that weights[i] >= weights[j] for all j != i }
# *
# * Redistributes 1/n_links of traffic when n_links changes. O(n_links)
# * performance. If n_links is greater than a threshold (currently 64, but
# * subject to change), Open vSwitch will substitute another algorithm
# * automatically. See RFC 2992. */
#===========================================================================
NX_MP_ALG_HRW = 2, # /* Highest Random Weight. */
#===========================================================================
# /* i = 0
# * repeat:
# * i = i + 1
# * link = hash(flow, i) % arg
# * while link > max_link
# *
# * Redistributes 1/n_links of traffic when n_links changes. O(1)
# * performance when arg/max_link is bounded by a constant.
# *
# * Redistributes all traffic when arg changes.
# *
# * arg must be greater than max_link and for best performance should be no
# * more than approximately max_link * 2. If arg is outside the acceptable
# * range, Open vSwitch will automatically substitute the least power of 2
# * greater than max_link.
# *
# * This algorithm is specific to Open vSwitch.
# */
#===========================================================================
NX_MP_ALG_ITER_HASH = 3 # /* Iterative Hash. */
)
# Isn't it 0x7ff if it has 11 bits?
NX_LEARN_N_BITS_MASK = 0x3ff
NX_LEARN_SRC_MASK = (1 << 13)
NX_LEARN_DST_MASK = (3 << 11)
NX_LEARN_SRC_FIELD = (0 << 13) # /* Copy from field. */
NX_LEARN_DST_MATCH = (0 << 11) # /* Add match criterion. */
nx_flow_mod_spec_header = enum('nx_flow_mod_spec_header', globals(), uint16, True,
NX_LEARN_SRC_IMMEDIATE = (1 << 13), # /* Copy from immediate value. */
NX_LEARN_DST_LOAD = (1 << 11), # /* Add NXAST_REG_LOAD action. */
NX_LEARN_DST_OUTPUT = (2 << 11), # /* Add OFPAT_OUTPUT action. */
NX_LEARN_DST_RESERVED = (3 << 11) # /* Not yet defined. */
)
def NX_FLOWMODSPEC_SRC(x):
return x & NX_LEARN_SRC_MASK
def NX_FLOWMODSPEC_DST(x):
return x & NX_LEARN_DST_MASK
def NX_FLOWMODSPEC_NBITS(x):
return x & NX_LEARN_N_BITS_MASK
def _createdesc(descr):
def formatter(x):
x['_desc'] = descr(x)
return x
return formatter
_nx_flow_mod_spec_src = nstruct(
name = '_nx_flow_mod_spec_src',
padding = 1,
size = lambda x: (((NX_FLOWMODSPEC_NBITS(x.header) + 15) // 16 * 2) if NX_FLOWMODSPEC_SRC(x.header) else 6)
)
_nx_flow_mod_spec_dst = nstruct(
name = '_nx_flow_mod_spec_dst',
padding = 1,
size = lambda x: 0 if NX_FLOWMODSPEC_DST(x.header) == NX_LEARN_DST_OUTPUT else 6
)
_nx_flow_mod_spec_src_value = nstruct(
(raw, 'value'),
name = '_nx_flow_mod_spec_src_value',
base = _nx_flow_mod_spec_src,
criteria = lambda x: NX_FLOWMODSPEC_SRC(x.header)
)
_nx_flow_mod_spec_src_field = nstruct(
(nxm_header, 'src'),
(uint16, 'src_ofs'),
name = '_nx_flow_mod_spec_src_field',
base = _nx_flow_mod_spec_src,
criteria = lambda x: not NX_FLOWMODSPEC_SRC(x.header)
)
_nx_flow_mod_spec_dst_field = nstruct(
(nxm_header, 'dst'),
(uint16, 'dst_ofs'),
name = '_nx_flow_mod_spec_dst_field',
base = _nx_flow_mod_spec_dst,
criteria = lambda x: NX_FLOWMODSPEC_DST(x.header) == NX_LEARN_DST_MATCH or NX_FLOWMODSPEC_DST(x.header) == NX_LEARN_DST_LOAD
)
_nx_flow_mod_spec_dst_output = nstruct(
name = '_nx_flow_mod_spec_dst_output',
base = _nx_flow_mod_spec_dst,
criteria = lambda x: NX_FLOWMODSPEC_DST(x.header) == NX_LEARN_DST_OUTPUT
)
def _create_field(dst, ofs):
if NXM_HASMASK(dst):
raise ValueError('Must specify a nxm_header without mask')
return _nx_flow_mod_spec_dst_field.new(dst = dst, dst_ofs = ofs)._tobytes()
def _create_header(src, dst, n_bits):
return uint16.tobytes((src & NX_LEARN_SRC_MASK) | (dst & NX_LEARN_DST_MASK) | (n_bits & NX_LEARN_N_BITS_MASK))
'''
/* NXAST_BUNDLE: Bundle slave choice algorithm to apply.
*
* In the descriptions below, 'slaves' is the list of possible slaves in the
* order they appear in the OpenFlow action. */
'''
nx_bd_algorithm = enum('nx_bd_algorithm', globals(), uint16,
#===========================================================================
# /* Chooses the first live slave listed in the bundle.
# *
# * O(n_slaves) performance. */
#===========================================================================
NX_BD_ALG_ACTIVE_BACKUP = 0,
#===========================================================================
# /* for i in [0,n_slaves):
# * weights[i] = hash(flow, i)
# * slave = { slaves[i] such that weights[i] >= weights[j] for all j != i }
# *
# * Redistributes 1/n_slaves of traffic when a slave's liveness changes.
# * O(n_slaves) performance.
# *
# * Uses the 'fields' and 'basis' parameters. */
#===========================================================================
NX_BD_ALG_HRW = 1 # /* Highest Random Weight. */
)
nx_flow_format = enum('nx_flow_format', globals(), uint32,
NXFF_OPENFLOW10 = 0, # /* Standard OpenFlow 1.0 compatible. */
NXFF_NXM = 2 # /* Nicira extended match. */
)
'''
/* 'flags' bits in struct nx_flow_monitor_request. */
'''
nx_flow_monitor_flags = enum('nx_flow_monitor_flags', globals(), uint16, True,
# /* When to send updates. */
NXFMF_INITIAL = 1 << 0, #/* Initially matching flows. */
NXFMF_ADD = 1 << 1, #/* New matching flows as they are added. */
NXFMF_DELETE = 1 << 2, #/* Old matching flows as they are removed. */
NXFMF_MODIFY = 1 << 3, #/* Matching flows as they are changed. */
# /* What to include in updates. */
NXFMF_ACTIONS = 1 << 4, #/* If set, actions are included. */
NXFMF_OWN = 1 << 5, #/* If set, include own changes in full. */
)
'''
/* 'event' values in struct nx_flow_update_header. */
'''
nx_flow_update_event = enum('nx_flow_update_event', globals(), uint16,
# /* struct nx_flow_update_full. */
NXFME_ADDED = 0, # /* Flow was added. */
NXFME_DELETED = 1, # /* Flow was deleted. */
NXFME_MODIFIED = 2, # /* Flow (generally its actions) was changed. */
# /* struct nx_flow_update_abbrev. */
NXFME_ABBREV = 3, # /* Abbreviated reply. */
)
def create_extension(namespace, nicira_header, nx_action, nx_stats_request, nx_stats_reply,
msg_subtype, action_subtype, stats_subtype):
'''
/* This command enables or disables an Open vSwitch extension that allows a
* controller to specify the OpenFlow table to which a flow should be added,
* instead of having the switch decide which table is most appropriate as
* required by OpenFlow 1.0. Because NXM was designed as an extension to
* OpenFlow 1.0, the extension applies equally to ofp10_flow_mod and
* nx_flow_mod. By default, the extension is disabled.
*
* When this feature is enabled, Open vSwitch treats struct ofp10_flow_mod's
* and struct nx_flow_mod's 16-bit 'command' member as two separate fields.
* The upper 8 bits are used as the table ID, the lower 8 bits specify the
* command as usual. A table ID of 0xff is treated like a wildcarded table ID.
*
* The specific treatment of the table ID depends on the type of flow mod:
*
* - OFPFC_ADD: Given a specific table ID, the flow is always placed in that
* table. If an identical flow already exists in that table only, then it
* is replaced. If the flow cannot be placed in the specified table,
* either because the table is full or because the table cannot support
* flows of the given type, the switch replies with an OFPFMFC_TABLE_FULL
* error. (A controller can distinguish these cases by comparing the
* current and maximum number of entries reported in ofp_table_stats.)
*
* If the table ID is wildcarded, the switch picks an appropriate table
* itself. If an identical flow already exist in the selected flow table,
* then | |
2
xlVerbPrimary = 1
# XlCalculation
xlCalculationAutomatic = -4105
xlCalculationManual = -4135
xlCalculationSemiautomatic = 2
# XlFileAccess
xlReadOnly = 3
xlReadWrite = 2
# XlEditionType
xlPublisher = 1
xlSubscriber = 2
# XlObjectSize
xlFitToPage = 2
xlFullPage = 3
xlScreenSize = 1
# XlLookAt
xlPart = 2
xlWhole = 1
# XlMailSystem
xlMAPI = 1
xlNoMailSystem = 0
xlPowerTalk = 2
# XlLinkInfoType
xlLinkInfoOLELinks = 2
xlLinkInfoPublishers = 5
xlLinkInfoSubscribers = 6
# XlCVError
xlErrDiv0 = 2007
xlErrNA = 2042
xlErrName = 2029
xlErrNull = 2000
xlErrNum = 2036
xlErrRef = 2023
xlErrValue = 2015
# XlEditionFormat
xlBIFF = 2
xlPICT = 1
xlRTF = 4
xlVALU = 8
# XlLink
xlExcelLinks = 1
xlOLELinks = 2
xlPublishers = 5
xlSubscribers = 6
# XlCellType
xlCellTypeBlanks = 4
xlCellTypeConstants = 2
xlCellTypeFormulas = -4123
xlCellTypeLastCell = 11
xlCellTypeComments = -4144
xlCellTypeVisible = 12
xlCellTypeAllFormatConditions = -4172
xlCellTypeSameFormatConditions = -4173
xlCellTypeAllValidation = -4174
xlCellTypeSameValidation = -4175
# XlArrangeStyle
xlArrangeStyleCascade = 7
xlArrangeStyleHorizontal = -4128
xlArrangeStyleTiled = 1
xlArrangeStyleVertical = -4166
# XlMousePointer
xlIBeam = 3
xlDefault = -4143
xlNorthwestArrow = 1
xlWait = 2
# XlEditionOptionsOption
xlAutomaticUpdate = 4
xlCancel = 1
xlChangeAttributes = 6
xlManualUpdate = 5
xlOpenSource = 3
xlSelect = 3
xlSendPublisher = 2
xlUpdateSubscriber = 2
# XlAutoFillType
xlFillCopy = 1
xlFillDays = 5
xlFillDefault = 0
xlFillFormats = 3
xlFillMonths = 7
xlFillSeries = 2
xlFillValues = 4
xlFillWeekdays = 6
xlFillYears = 8
xlGrowthTrend = 10
xlLinearTrend = 9
# XlAutoFilterOperator
xlAnd = 1
xlBottom10Items = 4
xlBottom10Percent = 6
xlOr = 2
xlTop10Items = 3
xlTop10Percent = 5
# XlClipboardFormat
xlClipboardFormatBIFF = 8
xlClipboardFormatBIFF2 = 18
xlClipboardFormatBIFF3 = 20
xlClipboardFormatBIFF4 = 30
xlClipboardFormatBinary = 15
xlClipboardFormatBitmap = 9
xlClipboardFormatCGM = 13
xlClipboardFormatCSV = 5
xlClipboardFormatDIF = 4
xlClipboardFormatDspText = 12
xlClipboardFormatEmbeddedObject = 21
xlClipboardFormatEmbedSource = 22
xlClipboardFormatLink = 11
xlClipboardFormatLinkSource = 23
xlClipboardFormatLinkSourceDesc = 32
xlClipboardFormatMovie = 24
xlClipboardFormatNative = 14
xlClipboardFormatObjectDesc = 31
xlClipboardFormatObjectLink = 19
xlClipboardFormatOwnerLink = 17
xlClipboardFormatPICT = 2
xlClipboardFormatPrintPICT = 3
xlClipboardFormatRTF = 7
xlClipboardFormatScreenPICT = 29
xlClipboardFormatStandardFont = 28
xlClipboardFormatStandardScale = 27
xlClipboardFormatSYLK = 6
xlClipboardFormatTable = 16
xlClipboardFormatText = 0
xlClipboardFormatToolFace = 25
xlClipboardFormatToolFacePICT = 26
xlClipboardFormatVALU = 1
xlClipboardFormatWK1 = 10
# XlFileFormat
xlAddIn = 18
xlCSV = 6
xlCSVMac = 22
xlCSVMSDOS = 24
xlCSVWindows = 23
xlDBF2 = 7
xlDBF3 = 8
xlDBF4 = 11
xlDIF = 9
xlExcel2 = 16
xlExcel2FarEast = 27
xlExcel3 = 29
xlExcel4 = 33
xlExcel5 = 39
xlExcel7 = 39
xlExcel9795 = 43
xlExcel4Workbook = 35
xlIntlAddIn = 26
xlIntlMacro = 25
xlWorkbookNormal = -4143
xlSYLK = 2
xlTemplate = 17
xlCurrentPlatformText = -4158
xlTextMac = 19
xlTextMSDOS = 21
xlTextPrinter = 36
xlTextWindows = 20
xlWJ2WD1 = 14
xlWK1 = 5
xlWK1ALL = 31
xlWK1FMT = 30
xlWK3 = 15
xlWK4 = 38
xlWK3FM3 = 32
xlWKS = 4
xlWorks2FarEast = 28
xlWQ1 = 34
xlWJ3 = 40
xlWJ3FJ3 = 41
xlUnicodeText = 42
xlHtml = 44
xlWebArchive = 45
xlXMLSpreadsheet = 46
# XlApplicationInternational
xl24HourClock = 33
xl4DigitYears = 43
xlAlternateArraySeparator = 16
xlColumnSeparator = 14
xlCountryCode = 1
xlCountrySetting = 2
xlCurrencyBefore = 37
xlCurrencyCode = 25
xlCurrencyDigits = 27
xlCurrencyLeadingZeros = 40
xlCurrencyMinusSign = 38
xlCurrencyNegative = 28
xlCurrencySpaceBefore = 36
xlCurrencyTrailingZeros = 39
xlDateOrder = 32
xlDateSeparator = 17
xlDayCode = 21
xlDayLeadingZero = 42
xlDecimalSeparator = 3
xlGeneralFormatName = 26
xlHourCode = 22
xlLeftBrace = 12
xlLeftBracket = 10
xlListSeparator = 5
xlLowerCaseColumnLetter = 9
xlLowerCaseRowLetter = 8
xlMDY = 44
xlMetric = 35
xlMinuteCode = 23
xlMonthCode = 20
xlMonthLeadingZero = 41
xlMonthNameChars = 30
xlNoncurrencyDigits = 29
xlNonEnglishFunctions = 34
xlRightBrace = 13
xlRightBracket = 11
xlRowSeparator = 15
xlSecondCode = 24
xlThousandsSeparator = 4
xlTimeLeadingZero = 45
xlTimeSeparator = 18
xlUpperCaseColumnLetter = 7
xlUpperCaseRowLetter = 6
xlWeekdayNameChars = 31
xlYearCode = 19
# XlPageBreakExtent
xlPageBreakFull = 1
xlPageBreakPartial = 2
# XlCellInsertionMode
xlOverwriteCells = 0
xlInsertDeleteCells = 1
xlInsertEntireRows = 2
# XlFormulaLabel
xlNoLabels = -4142
xlRowLabels = 1
xlColumnLabels = 2
xlMixedLabels = 3
# XlHighlightChangesTime
xlSinceMyLastSave = 1
xlAllChanges = 2
xlNotYetReviewed = 3
# XlCommentDisplayMode
xlNoIndicator = 0
xlCommentIndicatorOnly = -1
xlCommentAndIndicator = 1
# XlFormatConditionType
xlCellValue = 1
xlExpression = 2
# XlFormatConditionOperator
xlBetween = 1
xlNotBetween = 2
xlEqual = 3
xlNotEqual = 4
xlGreater = 5
xlLess = 6
xlGreaterEqual = 7
xlLessEqual = 8
# XlEnableSelection
xlNoRestrictions = 0
xlUnlockedCells = 1
xlNoSelection = -4142
# XlDVType
xlValidateInputOnly = 0
xlValidateWholeNumber = 1
xlValidateDecimal = 2
xlValidateList = 3
xlValidateDate = 4
xlValidateTime = 5
xlValidateTextLength = 6
xlValidateCustom = 7
# XlIMEMode
xlIMEModeNoControl = 0
xlIMEModeOn = 1
xlIMEModeOff = 2
xlIMEModeDisable = 3
xlIMEModeHiragana = 4
xlIMEModeKatakana = 5
xlIMEModeKatakanaHalf = 6
xlIMEModeAlphaFull = 7
xlIMEModeAlpha = 8
xlIMEModeHangulFull = 9
xlIMEModeHangul = 10
# XlDVAlertStyle
xlValidAlertStop = 1
xlValidAlertWarning = 2
xlValidAlertInformation = 3
# XlChartLocation
xlLocationAsNewSheet = 1
xlLocationAsObject = 2
xlLocationAutomatic = 3
# XlPaperSize
xlPaper10x14 = 16
xlPaper11x17 = 17
xlPaperA3 = 8
xlPaperA4 = 9
xlPaperA4Small = 10
xlPaperA5 = 11
xlPaperB4 = 12
xlPaperB5 = 13
xlPaperCsheet = 24
xlPaperDsheet = 25
xlPaperEnvelope10 = 20
xlPaperEnvelope11 = 21
xlPaperEnvelope12 = 22
xlPaperEnvelope14 = 23
xlPaperEnvelope9 = 19
xlPaperEnvelopeB4 = 33
xlPaperEnvelopeB5 = 34
xlPaperEnvelopeB6 = 35
xlPaperEnvelopeC3 = 29
xlPaperEnvelopeC4 = 30
xlPaperEnvelopeC5 = 28
xlPaperEnvelopeC6 = 31
xlPaperEnvelopeC65 = 32
xlPaperEnvelopeDL = 27
xlPaperEnvelopeItaly = 36
xlPaperEnvelopeMonarch = 37
xlPaperEnvelopePersonal = 38
xlPaperEsheet = 26
xlPaperExecutive = 7
xlPaperFanfoldLegalGerman = 41
xlPaperFanfoldStdGerman = 40
xlPaperFanfoldUS = 39
xlPaperFolio = 14
xlPaperLedger = 4
xlPaperLegal = 5
xlPaperLetter = 1
xlPaperLetterSmall = 2
xlPaperNote = 18
xlPaperQuarto = 15
xlPaperStatement = 6
xlPaperTabloid = 3
xlPaperUser = 256
# XlPasteSpecialOperation
xlPasteSpecialOperationAdd = 2
xlPasteSpecialOperationDivide = 5
xlPasteSpecialOperationMultiply = 4
xlPasteSpecialOperationNone = -4142
xlPasteSpecialOperationSubtract = 3
# XlPasteType
xlPasteAll = -4104
xlPasteAllExceptBorders = 7
xlPasteFormats = -4122
xlPasteFormulas = -4123
xlPasteComments = -4144
xlPasteValues = -4163
xlPasteColumnWidths = 8
xlPasteValidation = 6
xlPasteFormulasAndNumberFormats = 11
xlPasteValuesAndNumberFormats = 12
# XlPhoneticCharacterType
xlKatakanaHalf = 0
xlKatakana = 1
xlHiragana = 2
xlNoConversion = 3
# XlPhoneticAlignment
xlPhoneticAlignNoControl = 0
xlPhoneticAlignLeft = 1
xlPhoneticAlignCenter = 2
xlPhoneticAlignDistributed = 3
# XlPictureAppearance
xlPrinter = 2
xlScreen = 1
# XlPivotFieldOrientation
xlColumnField = 2
xlDataField = 4
xlHidden = 0
xlPageField = 3
xlRowField = 1
# XlPivotFieldCalculation
xlDifferenceFrom = 2
xlIndex = 9
xlNoAdditionalCalculation = -4143
xlPercentDifferenceFrom = 4
xlPercentOf = 3
xlPercentOfColumn = 7
xlPercentOfRow = 6
xlPercentOfTotal = 8
xlRunningTotal = 5
# XlPlacement
xlFreeFloating = 3
xlMove = 2
xlMoveAndSize = 1
# XlPlatform
xlMacintosh = 1
xlMSDOS = 3
xlWindows = 2
# XlPrintLocation
xlPrintSheetEnd = 1
xlPrintInPlace = 16
xlPrintNoComments = -4142
# XlPriority
xlPriorityHigh = -4127
xlPriorityLow = -4134
xlPriorityNormal = -4143
# XlPTSelectionMode
xlLabelOnly = 1
xlDataAndLabel = 0
xlDataOnly = 2
xlOrigin = 3
xlButton = 15
xlBlanks = 4
xlFirstRow = 256
# XlRangeAutoFormat
xlRangeAutoFormat3DEffects1 = 13
xlRangeAutoFormat3DEffects2 = 14
xlRangeAutoFormatAccounting1 = 4
xlRangeAutoFormatAccounting2 = 5
xlRangeAutoFormatAccounting3 = 6
xlRangeAutoFormatAccounting4 = 17
xlRangeAutoFormatClassic1 = 1
xlRangeAutoFormatClassic2 = 2
xlRangeAutoFormatClassic3 = 3
xlRangeAutoFormatColor1 = 7
xlRangeAutoFormatColor2 = 8
xlRangeAutoFormatColor3 = 9
xlRangeAutoFormatList1 = 10
xlRangeAutoFormatList2 = 11
xlRangeAutoFormatList3 = 12
xlRangeAutoFormatLocalFormat1 = 15
xlRangeAutoFormatLocalFormat2 = 16
xlRangeAutoFormatLocalFormat3 = 19
xlRangeAutoFormatLocalFormat4 = 20
xlRangeAutoFormatReport1 = 21
xlRangeAutoFormatReport2 = 22
xlRangeAutoFormatReport3 = 23
xlRangeAutoFormatReport4 = 24
xlRangeAutoFormatReport5 = 25
xlRangeAutoFormatReport6 = 26
xlRangeAutoFormatReport7 = 27
xlRangeAutoFormatReport8 = 28
xlRangeAutoFormatReport9 = 29
xlRangeAutoFormatReport10 = 30
xlRangeAutoFormatClassicPivotTable = 31
xlRangeAutoFormatTable1 = 32
xlRangeAutoFormatTable2 = 33
xlRangeAutoFormatTable3 = 34
xlRangeAutoFormatTable4 = 35
xlRangeAutoFormatTable5 = 36
xlRangeAutoFormatTable6 = 37
xlRangeAutoFormatTable7 = 38
xlRangeAutoFormatTable8 = 39
xlRangeAutoFormatTable9 = 40
xlRangeAutoFormatTable10 = 41
xlRangeAutoFormatPTNone = 42
xlRangeAutoFormatNone = -4142
xlRangeAutoFormatSimple = -4154
# XlReferenceType
xlAbsolute = 1
xlAbsRowRelColumn = 2
xlRelative = 4
xlRelRowAbsColumn = 3
# XlLayoutFormType
xlTabular = 0
xlOutline = 1
# XlRoutingSlipDelivery
xlAllAtOnce = 2
xlOneAfterAnother = 1
# XlRoutingSlipStatus
xlNotYetRouted = 0
xlRoutingComplete = 2
xlRoutingInProgress = 1
# XlRunAutoMacro
xlAutoActivate = 3
xlAutoClose = 2
xlAutoDeactivate = 4
xlAutoOpen = 1
# XlSaveAction
xlDoNotSaveChanges = 2
xlSaveChanges = 1
# XlSaveAsAccessMode
xlExclusive = 3
xlNoChange = 1
xlShared = 2
# XlSaveConflictResolution
xlLocalSessionChanges = 2
xlOtherSessionChanges = 3
xlUserResolution = 1
# XlSearchDirection
xlNext = 1
xlPrevious = 2
# XlSearchOrder
xlByColumns = 2
xlByRows = 1
# XlSheetVisibility
xlSheetVisible = -1
xlSheetHidden = 0
xlSheetVeryHidden = 2
# XlSortMethod
xlPinYin = 1
xlStroke = 2
# XlSortMethodOld
xlCodePage = 2
xlSyllabary = 1
# XlSortOrder
xlAscending = 1
xlDescending = 2
# XlSortOrientation
xlSortRows = 2
xlSortColumns = 1
# XlSortType
xlSortLabels = 2
xlSortValues = 1
# XlSpecialCellsValue
xlErrors = 16
xlLogical = 4
xlNumbers = 1
xlTextValues = 2
# XlSubscribeToFormat
xlSubscribeToPicture = -4147
xlSubscribeToText = -4158
# XlSummaryRow
xlSummaryAbove = 0
xlSummaryBelow = 1
# XlSummaryColumn
xlSummaryOnLeft = -4131
xlSummaryOnRight = -4152
# XlSummaryReportType
xlSummaryPivotTable = -4148
xlStandardSummary = 1
# XlTabPosition
xlTabPositionFirst = 0
xlTabPositionLast = 1
# XlTextParsingType
xlDelimited = 1
xlFixedWidth = 2
# XlTextQualifier
xlTextQualifierDoubleQuote = 1
xlTextQualifierNone = -4142
xlTextQualifierSingleQuote = 2
# XlWBATemplate
xlWBATChart = -4109
xlWBATExcel4IntlMacroSheet = 4
xlWBATExcel4MacroSheet = 3
xlWBATWorksheet = -4167
# XlWindowView
xlNormalView = 1
xlPageBreakPreview = 2
# XlXLMMacroType
xlCommand = 2
xlFunction = 1
xlNotXLM = 3
# XlYesNoGuess
xlGuess = 0
xlNo = 2
xlYes = 1
# XlBordersIndex
xlInsideHorizontal = 12
xlInsideVertical = 11
xlDiagonalDown = 5
xlDiagonalUp = 6
xlEdgeBottom = 9
xlEdgeLeft = 7
xlEdgeRight = 10
xlEdgeTop = 8
# XlToolbarProtection
xlNoButtonChanges = 1
xlNoChanges = 4
xlNoDockingChanges = 3
xlToolbarProtectionNone = -4143
xlNoShapeChanges = 2
# XlBuiltInDialog
xlDialogOpen = 1
xlDialogOpenLinks = 2
xlDialogSaveAs = 5
xlDialogFileDelete = 6
xlDialogPageSetup = 7
xlDialogPrint = 8
xlDialogPrinterSetup = 9
xlDialogArrangeAll = 12
xlDialogWindowSize = 13
xlDialogWindowMove = 14
xlDialogRun = 17
xlDialogSetPrintTitles = 23
xlDialogFont = 26
xlDialogDisplay = 27
xlDialogProtectDocument = 28
xlDialogCalculation = 32
xlDialogExtract = 35
xlDialogDataDelete = 36
xlDialogSort = 39
xlDialogDataSeries = 40
xlDialogTable = 41
xlDialogFormatNumber = 42
xlDialogAlignment = 43
xlDialogStyle = 44
xlDialogBorder = 45
xlDialogCellProtection = 46
xlDialogColumnWidth = 47
xlDialogClear = 52
xlDialogPasteSpecial = 53
xlDialogEditDelete = 54
xlDialogInsert = 55
xlDialogPasteNames = 58
xlDialogDefineName = 61
xlDialogCreateNames = 62
xlDialogFormulaGoto = 63
xlDialogFormulaFind = 64
xlDialogGalleryArea = 67
xlDialogGalleryBar = 68
xlDialogGalleryColumn = 69
xlDialogGalleryLine = 70
xlDialogGalleryPie = 71
xlDialogGalleryScatter = 72
xlDialogCombination = 73
xlDialogGridlines = 76
xlDialogAxes = 78
xlDialogAttachText = 80
xlDialogPatterns = 84
xlDialogMainChart = 85
xlDialogOverlay = 86
xlDialogScale = 87
xlDialogFormatLegend = 88
xlDialogFormatText = 89
xlDialogParse = 91
xlDialogUnhide = 94
xlDialogWorkspace = 95
xlDialogActivate = 103
xlDialogCopyPicture = 108
xlDialogDeleteName = 110
xlDialogDeleteFormat = 111
xlDialogNew = 119
xlDialogRowHeight = 127
xlDialogFormatMove = 128
xlDialogFormatSize = 129
xlDialogFormulaReplace = 130
xlDialogSelectSpecial = 132
xlDialogApplyNames = 133
xlDialogReplaceFont = 134
xlDialogSplit = 137
xlDialogOutline = 142
xlDialogSaveWorkbook = 145
xlDialogCopyChart = 147
xlDialogFormatFont = 150
xlDialogNote = 154
xlDialogSetUpdateStatus = 159
xlDialogColorPalette = 161
xlDialogChangeLink = 166
xlDialogAppMove = 170
xlDialogAppSize = 171
xlDialogMainChartType = 185
xlDialogOverlayChartType = 186
xlDialogOpenMail = 188
xlDialogSendMail = 189
xlDialogStandardFont = 190
xlDialogConsolidate = 191
xlDialogSortSpecial = 192
xlDialogGallery3dArea = 193
xlDialogGallery3dColumn = 194
xlDialogGallery3dLine = 195
xlDialogGallery3dPie = 196
xlDialogView3d = 197
xlDialogGoalSeek = 198
xlDialogWorkgroup = 199
xlDialogFillGroup = 200
xlDialogUpdateLink = 201
xlDialogPromote = 202
xlDialogDemote = 203
xlDialogShowDetail = 204
xlDialogObjectProperties = 207
xlDialogSaveNewObject = 208
xlDialogApplyStyle = 212
xlDialogAssignToObject = 213
xlDialogObjectProtection = 214
xlDialogCreatePublisher = 217
xlDialogSubscribeTo = 218
xlDialogShowToolbar = 220
xlDialogPrintPreview = 222
xlDialogEditColor = 223
xlDialogFormatMain = 225
xlDialogFormatOverlay = 226
xlDialogEditSeries = 228
xlDialogDefineStyle = 229
xlDialogGalleryRadar = 249
xlDialogEditionOptions = 251
xlDialogZoom = 256
xlDialogInsertObject = 259
xlDialogSize = 261
xlDialogMove = 262
xlDialogFormatAuto = 269
xlDialogGallery3dBar = 272
xlDialogGallery3dSurface = 273
xlDialogCustomizeToolbar = 276
xlDialogWorkbookAdd = 281
xlDialogWorkbookMove = 282
xlDialogWorkbookCopy = | |
#!/usr/bin/env python3
# Author: <NAME>
import os
import os.path as osp
import time
from typing import Any, Dict, List, Optional
from sklearn.metrics import r2_score, explained_variance_score
import h5py
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_transformers import AdamW, WarmupCosineSchedule
from torch.utils import data
from src import (
get_model_class, is_learning_model, is_input_masked_model,
TensorboardWriter,
create_logger,
)
from src.utils import get_inverse_sqrt_schedule
from src.dataset import DATASET_MODES, SpikesDataset
from src.mask import Masker, UNMASKED_LABEL, DEFAULT_MASK_VAL
"""
Runner class for NDT
"""
def get_lightest_gpus(num_gpus):
# TODO update with better CUDA_VISIBLE_DEVICES support (or just use ray)
if torch.cuda.device_count() == 1:
return [0]
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return np.argsort(memory_available)[-num_gpus:].tolist()
def exp_smooth(new_metric, old_metric, mu=0.5):
r""" Higher mu is smoother """
return (1.0 - mu) * new_metric + mu * old_metric
def exp_smooth_dict(new_metrics, rolling_metrics, mu=0.5):
for m in new_metrics:
if m in rolling_metrics:
rolling_metrics[m] = exp_smooth(new_metrics[m], rolling_metrics[m], mu)
class Runner:
r"""
Two paths to inference.
A:
Have a config file.
Load device.
Load a checkpoint.
B:
Pass a checkpoint path (other steps automated)
We prioritize path A.
"""
def __init__(self, config=None, checkpoint_path=None):
assert config is not None or checkpoint_path is not None
self.flush_secs = 10
self.model = None
self.optimizer = None
self.lr_scheduler = None
self.device = None
self.num_neurons = 0
self.pth_time = 0
self.count_updates = 0
self.count_checkpoints = 0
self.num_gpus = 0
self.masker = None
self.rolling_metrics = {} # For PBT
if checkpoint_path is not None:
tmp_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ckpt_dict = torch.load(checkpoint_path, map_location=tmp_device)
config = ckpt_dict["config"]
self.config = config
if not osp.exists(config.LOG_DIR):
os.makedirs(config.LOG_DIR, exist_ok=True)
logfile_path = osp.join(config.LOG_DIR, f"{config.VARIANT}.log")
# if osp.exists(logfile_path):
# os.remove(logfile_path)
self.logger = create_logger()
self.logger.clear_filehandlers()
self.logger.add_filehandler(logfile_path)
if hasattr(config.TRAIN, "TUNE_MODE") and config.TRAIN.TUNE_MODE:
self.logger.clear_streamhandlers()
self.best_val = {
"value": 100,
"update": -1,
}
self.best_unmasked_val = {
"value": 100,
"update": -1,
}
self.best_R2 = {
"value": -100,
"update": -1,
}
if checkpoint_path is not None:
self.load_device()
self.load_checkpoint(checkpoint_path, map_location=self.device)
def setup_model(self, device):
r""" Creates model and assigns to device """
self.model = get_model_class(self.config.MODEL.NAME)(
self.config.MODEL,
self.trial_length,
self.num_neurons,
device,
max_spikes=self.max_spikes
)
num_hidden = self.model.get_hidden_size()
if self.num_gpus > 1:
if self.config.SYSTEM.GPU_AUTO_ASSIGN:
gpu_indices = get_lightest_gpus(self.num_gpus)
else:
gpu_indices = list(range(self.num_gpus))
if self.device_gpu in gpu_indices:
gpu_indices.remove(self.device_gpu)
else:
gpu_indices = gpu_indices[:-1]
gpu_indices = [self.device_gpu] + gpu_indices # Make sure our primary gpu is first
self.model = nn.DataParallel(self.model, device_ids=gpu_indices)
self.model = self.model.to(device)
return num_hidden
def _get_parameters(self):
return list(self.model.parameters())
def _do_log(self, update):
return (
update > 0 and update % self.config.TRAIN.LOG_INTERVAL == 0
)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
checkpoint = {
"state_dict": self.model.state_dict(),
"optim_state": None if self.optimizer is None else self.optimizer.state_dict(),
"lr_scheduler": None if self.lr_scheduler is None else self.lr_scheduler.state_dict(),
"config": self.config,
"best_val": self.best_val,
"best_unmasked_val": self.best_unmasked_val,
"best_r2": self.best_R2,
"max_spikes": self.max_spikes,
"num_neurons": self.num_neurons,
"trial_length": self.trial_length,
}
checkpoint["extra_state"] = dict( # metadata
update=self.count_updates,
checkpoint=self.count_checkpoints,
pth_time=self.pth_time,
max_spikes=self.max_spikes
)
if extra_state is not None:
checkpoint["extra_state"].update(extra_state)
if len(osp.split(file_name)[0]) > 0:
full_path = file_name
else:
os.makedirs(self.config.CHECKPOINT_DIR, exist_ok=True)
full_path = osp.join(self.config.CHECKPOINT_DIR, file_name)
#self.logger.info("Saving {} with val {}, dropout {}. Decoder weights: {}".format(
# full_path,
# self.best_val,
# self.config.MODEL.DROPOUT,
# self.model.state_dict()['decoder.0.bias'][:5]
# ))
torch.save(
checkpoint, full_path
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Will fully load model if not already configured. Expects runner devices to be set.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
ckpt_dict = torch.load(checkpoint_path, *args, **kwargs)
if "num_neurons" in ckpt_dict:
self.num_neurons = ckpt_dict["num_neurons"]
if "trial_length" in ckpt_dict:
self.trial_length = ckpt_dict["trial_length"]
if "max_spikes" in ckpt_dict:
self.max_spikes = ckpt_dict["max_spikes"]
if self.model is None:
self.setup_model(self.device)
self.model.load_state_dict(ckpt_dict["state_dict"])
if "optim_state" in ckpt_dict and self.optimizer is not None:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
if "lr_scheduler" in ckpt_dict and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(ckpt_dict["lr_scheduler"])
if "best_val" in ckpt_dict:
self.best_val = ckpt_dict["best_val"]
if "best_unmasked_val" in ckpt_dict:
self.best_unmasked_val = ckpt_dict["best_unmasked_val"]
if "best_r2" in ckpt_dict:
self.best_R2 = ckpt_dict["best_r2"]
if "extra_state" in ckpt_dict:
self.count_updates = ckpt_dict["extra_state"]["update"]
self.logger.info("Update loaded -- {}".format(self.count_updates))
self.count_checkpoints = ckpt_dict["extra_state"]["checkpoint"]
self.pth_time = ckpt_dict["extra_state"]["pth_time"]
#self.logger.info("Loading {} with val {}, dropout {}. Decoder weight {}".format(
# checkpoint_path,
# self.best_val,
# self.config.MODEL.DROPOUT,
# self.model.state_dict()['decoder.0.bias'][:5]
# ))
return ckpt_dict
def load_device(self):
if not torch.cuda.is_available():
self.device = torch.device("cpu")
else:
self.num_gpus = min(self.config.SYSTEM.NUM_GPUS, torch.cuda.device_count())
self.logger.info(f"Using {self.num_gpus} GPUs")
gpu_id = self.config.SYSTEM.TORCH_GPU_ID
if self.config.SYSTEM.GPU_AUTO_ASSIGN:
gpu_id = get_lightest_gpus(1)[0]
self.device = (
torch.device("cuda", gpu_id)
)
self.device_gpu = gpu_id
self.logger.info(f"Using {self.device}")
def update_config(self, config):
r""" Update config node and propagate through model. Used for pbt.
"""
# Diff LR
#self.logger.info(f"\n\n Updating config! {config.TRAIN.LR.SCHEDULE} \n\n")
if self.config.TRAIN.LR.INIT != config.TRAIN.LR.INIT and self.optimizer is not None:
for g in self.optimizer.param_groups:
g['lr'] = config.TRAIN.LR.INIT # Manualy override of LR
self.config = config
if self.masker is not None:
self.masker.config = config.TRAIN
self.model.update_config(config.MODEL)
def load_train_val_data_and_masker(self):
training_set = SpikesDataset(self.config, self.config.DATA.TRAIN_FILENAME, mode=DATASET_MODES.train, logger=self.logger)
self.training_generator = data.DataLoader(training_set,
batch_size=self.config.TRAIN.BATCH_SIZE, shuffle=True
)
# We'll need this to embed spikes. Hoping max spikes for val/train isn't too far off
self.max_spikes = training_set.get_max_spikes() + 3
self.logger.info(f"Clipping all spikes to {self.max_spikes}.")
self.logger.info(f"Training on {len(training_set)} samples.")
if self.config.TRAIN.DO_VAL:
self.validation_set = SpikesDataset(self.config, self.config.DATA.VAL_FILENAME, mode=DATASET_MODES.val, logger=self.logger)
self.validation_set.clip_spikes(self.max_spikes)
# Typically this is small enough
# validation_generator = data.DataLoader(validation_set,
# batch_size=len(validation_set), shuffle=False,
# )
self.num_neurons = training_set.get_num_neurons()
self.trial_length = training_set.trial_length
self.masker = Masker(self.config.TRAIN, self.device)
def load_optimizer(self, num_hidden):
train_cfg = self.config.TRAIN
if is_learning_model(self.config.MODEL.NAME):
self.optimizer = AdamW(
list(filter(lambda p: p.requires_grad, self._get_parameters())),
lr=train_cfg.LR.INIT,
weight_decay=train_cfg.WEIGHT_DECAY,
eps=train_cfg.EPS,
)
self.logger.info(
"number of trainable parameters: {}".format(
sum(
param.numel()
for param in self.model.parameters()
if param.requires_grad
)
)
)
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
if train_cfg.LR.SCHEDULER == "cosine":
self.lr_scheduler = WarmupCosineSchedule(
self.optimizer,
warmup_steps=train_cfg.LR.WARMUP,
t_total=train_cfg.NUM_UPDATES
)
else:
self.lr_scheduler = get_inverse_sqrt_schedule(
self.optimizer,
warmup_steps=train_cfg.LR.WARMUP,
lr_max=train_cfg.LR.INIT
)
def train(self, checkpoint_path=None) -> None:
r"""Main method for training model.
Args:
checkpoint_path: path of checkpoint to load
Returns:
None
"""
self.load_device()
train_cfg = self.config.TRAIN
self.load_train_val_data_and_masker()
num_hidden = self.setup_model(self.device)
self.load_optimizer(num_hidden)
if checkpoint_path is not None:
self.load_checkpoint(checkpoint_path, map_location="cpu")
start_updates = self.count_updates
for update in range(start_updates, train_cfg.NUM_UPDATES):
metrics = self.train_epoch()
if metrics["done"]:
break
if not metrics["done"]:
self.logger.info("Reached max updates without early stopping. Consider training some more.")
if not train_cfg.TUNE_MODE:
metrics_dict = {
"Loss": self.best_val["value"],
"Unmasked Loss": self.best_unmasked_val["value"],
}
if train_cfg.DO_R2:
metrics_dict.update({ "R2": self.best_R2["value"] })
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
writer.add_hparams(self.extract_hps_dict(), metrics_dict)
torch.cuda.empty_cache()
def train_epoch(self):
r"""
One (PBT) epoch of training. Model and data should be set up and on device at this point.
Note: LFADS runs an epoch every pass through the data. This may be too frequently for transformers.
i.e. we may need to do multiple passes through the data. For now, we're changing to report every pass through data.
Returns:
metrics: Information about the epoch.
"done" -- should stop this run (e.g. due to early stopping). Keyword for Tune PBT.
"""
if self.training_generator is None:
raise Exception("No dataset generator set")
update = self.count_updates
#self.logger.info(f"update {update}")
train_cfg = self.config.TRAIN
expand_prob = min((update - train_cfg.MASK_SPAN_RAMP_START) / (train_cfg.MASK_SPAN_RAMP_END - train_cfg.MASK_SPAN_RAMP_START), 1)
self.model.train()
t_start = time.time()
for spikes, rates, heldout_spikes, forward_spikes in self.training_generator:
spikes = spikes.to(self.device)
rates = rates.to(self.device) if self.config.MODEL.REQUIRES_RATES else None
if self.training_generator.dataset.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
forward_spikes = forward_spikes.to(self.device)
else:
heldout_spikes = None
forward_spikes = None
masked_spikes, labels = self.masker.mask_batch(
spikes,
max_spikes=self.max_spikes,
should_mask=is_input_masked_model(self.config.MODEL.NAME),
expand_prob=expand_prob,
heldout_spikes=heldout_spikes,
forward_spikes=forward_spikes
)
mlm_loss, _, layer_outputs, *_ = self.model(
masked_spikes,
mask_labels=labels,
rates=rates,
return_outputs=False,
)
loss = mlm_loss.mean()
if self.optimizer is not None:
self.optimizer.zero_grad()
loss.backward()
params = self._get_parameters()
nn.utils.clip_grad_norm_(
params, train_cfg.MAX_GRAD_NORM
)
self.optimizer.step()
self.pth_time += time.time() - t_start
self.count_updates += 1
update = self.count_updates
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
self.lr_scheduler.step()
if self._do_log(update):
# * Note we're only logging the loss of the last train step
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if self.optimizer is not None and train_cfg.LR.SCHEDULE:
writer.add_scalar("lr", self.lr_scheduler.get_last_lr()[0])
self.logger.queue_stat("LR", self.lr_scheduler.get_last_lr()[0])
writer.add_scalar(
"loss", # train loss
loss,
update,
)
self.logger.queue_stat("loss", loss.item())
metrics_dict = dict(
done = False,
epoch = self.count_updates,
# r2 = self.best_r2["value"],
best_masked_loss = self.best_val["value"] # Tune will reference this value to select best model.
)
if (train_cfg.DO_VAL and update % train_cfg.VAL_INTERVAL == 0):
self.model.eval()
with torch.no_grad():
spikes, rates, heldout_spikes, forward_spikes = self.validation_set.get_dataset()
spikes = spikes.to(self.device)
rates = rates.to(self.device)
if self.validation_set.has_heldout:
heldout_spikes = heldout_spikes.to(self.device)
| |
<filename>ppq/IR/morph.py
from typing import Any, List
import numpy as np
import torch
from ppq.core import (DataType, TargetPlatform,
convert_any_to_python_primary_type,
convert_any_to_torch_tensor, ppq_warning)
from ppq.IR.quantize import DeviceSwitchOP
from ppq.IR.search import SearchableGraph
from ppq.scheduler import value_tracing_pattern
from .base.command import (GraphCommand, GraphCommandType,
ReplaceOperationCommand, ReplaceVariableCommand,
TruncateGraphCommand)
from .base.graph import Operation, Variable
from .processer import GraphCommandProcesser
class GraphReplacer(GraphCommandProcesser):
def process(self, command: GraphCommand) -> Any:
if command.command_type == GraphCommandType.REPLACE_OP:
assert isinstance(command, ReplaceOperationCommand), \
'Use ReplaceOperationCommand instead of GraphCommand'
return self.replace_op(command.op_name, command.replace_to)
if command.command_type == GraphCommandType.REPLACE_VAR:
assert isinstance(command, ReplaceVariableCommand), \
'Use ReplaceOperationCommand instead of GraphCommand'
return self.replace_var(command.op_name, command.replace_to)
def replace_op(self, op_name: str, replace_to: Operation):
if op_name not in self._graph.operations:
raise KeyError(f'Opeartion {op_name} is not in current graph')
operation = self._graph.operations[op_name]
replace_to.inputs.clear()
replace_to.inputs.extend(operation.inputs)
for input_var in operation.inputs:
dest_idx = input_var.dest_ops.index(operation)
input_var.dest_ops[dest_idx] = replace_to
replace_to.outputs.clear()
replace_to.outputs.extend(operation.outputs)
for output_var in operation.outputs:
output_var.source_op = replace_to
replace_to.parameters.clear()
replace_to.parameters.extend(operation.parameters)
self._graph.operations[op_name] = replace_to
def replace_var(self, var_name: str, replace_to: Variable):
if var_name not in self._graph.variables:
raise KeyError(f'Variable {var_name} is not in current graph')
variable = self._graph.variables[var_name]
replace_to.dest_ops.clear()
replace_to.dest_ops.extend(variable.dest_ops)
for dest_op in replace_to.dest_ops:
dest_idx = dest_op.inputs.index(variable)
dest_op.inputs[dest_idx] = replace_to
replace_to.source_op = variable.source_op
if variable.source_op is not None:
source_idx = variable.source_op.outputs.index(variable)
variable.source_op.outputs[source_idx] = replace_to
self._graph.variables[var_name] = replace_to
if var_name in self._graph.inputs:
self._graph.inputs[var_name] = replace_to
if var_name in self._graph.outputs:
self._graph.outputs[var_name] = replace_to
def _acceptable_command_types(self) -> List[GraphCommandType]:
return [
GraphCommandType.REPLACE_OP,
GraphCommandType.REPLACE_VAR,
]
class GraphFormatter(GraphCommandProcesser):
def _acceptable_command_types(self) -> List[GraphCommandType]:
return [
GraphCommandType.FORMAT_CLIP,
GraphCommandType.FORMAT_PAD,
GraphCommandType.FORMAT_GATHER,
GraphCommandType.FORMAT_CAST,
GraphCommandType.FORMAT_INT64_CONSTANT,
GraphCommandType.DELETE_ISOLATED,
GraphCommandType.REPLACE_SUB,
GraphCommandType.FORMAT_PARAMETERS,
GraphCommandType.FORMAT_CONSTANT_INPUT,
GraphCommandType.FORMAT_SLICE,
GraphCommandType.TRUNCATE_ON_VAR
]
def process(self, command: GraphCommand) -> Any:
if command.command_type == GraphCommandType.FORMAT_CLIP:
return self.format_clip()
if command.command_type == GraphCommandType.FORMAT_PAD:
return self.format_pad()
if command.command_type == GraphCommandType.FORMAT_GATHER:
return self.format_gather()
if command.command_type == GraphCommandType.FORMAT_CAST:
return self.format_cast()
if command.command_type == GraphCommandType.DELETE_ISOLATED:
return self.delete_isolated()
if command.command_type == GraphCommandType.FORMAT_INT64_CONSTANT:
return self.format_int64_constant()
if command.command_type == GraphCommandType.REPLACE_SUB:
return self.replace_substarction()
if command.command_type == GraphCommandType.FORMAT_PARAMETERS:
return self.format_parameter_variables()
if command.command_type == GraphCommandType.FORMAT_CONSTANT_INPUT:
return self.format_constant_input()
if command.command_type == GraphCommandType.FORMAT_SLICE:
return self.format_slice()
if command.command_type == GraphCommandType.TRUNCATE_ON_VAR:
assert isinstance(command, TruncateGraphCommand), f'Use TruncateGraphCommand here.'
return self.truncate_on_var(command.var, command.mark_as_output)
def format_slice(self) -> None:
"""
Slice: opset1 格式跟其他的不太一样,这个 pass 将 opset1 的 slice 强行转换为 opset 11
"""
interested_ops = []
for operation in self.graph.operations.values():
if operation.type == 'Slice':
if 'starts' in operation.attributes:
assert 'starts' in operation.attributes and 'ends' in operation.attributes, (
f'Invalid Slice Operation Format, Slice operation is expected to have axes, '
'starts and ends attributes with opset 1, '
f'however your operation {operation.name}, do not have completed attributes')
interested_ops.append(operation)
for slice in interested_ops:
assert isinstance(slice, Operation)
axes = slice.attributes.get('axes', None)
starts = slice.attributes['starts']
ends = slice.attributes['ends']
if axes == None: axes = [_ for _ in range(starts)]
slice.attributes.pop('starts')
slice.attributes.pop('ends')
if 'axes' in slice.attributes: slice.attributes.pop('axes')
self.__add_constant_input(slice, convert_any_to_torch_tensor(starts))
self.__add_constant_input(slice, convert_any_to_torch_tensor(ends))
self.__add_constant_input(slice, convert_any_to_torch_tensor(axes))
def format_pad(self) -> None:
"""
对于不同的模型格式, pad 算子将有两种不同的输入格式:
for different models, possibly Pad op has the following input formats
1. pads 参数由第二个输入变量给出
pads parameter is given by the second input variable
2. pads 参数被放置于 operation.attribute 中
pads parameter is set in attribute
此函数统一 pad 算子行为:所有 pad 算子的 pads 参数均由 operation.attribute 给出
this func unifies behaviors of Pad op: pads paramter will always given in
attribute
同时当 padding mode 设置为 constant 时,pads 将存在一个用来确定 padding value 的值
存在该值时,该函数返回 ValueError
when the padding mode is set to constant, its constant input will be used as
padding value
"""
interested_ops = []
for _, operation in self.graph.operations.items():
if operation.type == 'Pad': interested_ops.append(operation)
for operation in interested_ops:
assert isinstance(operation, Operation)
padding_value = operation.attributes.get('pads_value', 0)
padding_mode = operation.attributes.get('mode', 'constant')
if padding_mode == 'constant' and len(operation.inputs) == 3:
pads_variable = operation.inputs[1]
pads_constant_op = pads_variable.source_op
padding_value = pads_constant_op.attributes['value']
self.__delete_constant_input(operation, 1)
if len(operation.inputs) > 1:
# here exist a pattern: constant -> pad
pads_variable = operation.inputs[1]
pads_constant_op = pads_variable.source_op
pads = pads_constant_op.attributes['value']
self.__delete_constant_input(operation, 1)
operation.attributes['pads'] = convert_any_to_python_primary_type(pads)
if padding_mode == 'constant': operation.attributes['pads_value'] = padding_value
def format_clip(self) -> None:
"""
对于不同的模型格式, clip 算子将有两种不同的输入格式:
for different models, possibly clip op has the following input formats
1. min, max 参数由 第二、第三个输入变量给出
min, max parameter will be given by the second and third input variable
2. min, max 参数由 attribute 给出
min, max parameter will be given by the attribute
此函数统一 clip 算子行为:所有 clip 算子的 min, max 参数第二第三个变量给出
this func unifies behaviors of clip op: min, max parameter will be given by input vars
针对可能存在的 min, max 为空的情况,将其直接置为 2 << 30(保证处理后非空)
当 min, max 参数由 第二、第三个输入变量给出时,其中一个为空时直接返回 ValueError
ValueError will be raised when any of min, max parameters is null
"""
interested_ops = []
for _, operation in self.graph.operations.items():
if operation.type == 'Clip' and ('min' in operation.attributes or 'max' in operation.attributes):
interested_ops.append(operation)
for op in interested_ops:
assert isinstance(op, Operation)
min = op.attributes.get('min', - 2 << 30)
max = op.attributes.get('max', + 2 << 30)
min_var = Variable(name=op.name + '_min', value=min, is_parameter=True, dest_ops=[op])
max_var = Variable(name=op.name + '_max', value=max, is_parameter=True, dest_ops=[op])
self.graph.append_variable(min_var)
self.graph.append_variable(max_var)
op.inputs.append(min_var)
op.inputs.append(max_var)
if 'min' in op.attributes: op.attributes.pop('min')
if 'max' in op.attributes: op.attributes.pop('max')
def format_gather(self) -> None:
"""
gather op 的参数 index 可能由 input variable 给出
但 index 参数不可以被量化,同时后端运算需要其作为Python 原生类型
因此将其转移到 gather op 的属性上。
index parameter of gather op can be given by input variable,
however, it can't be quantized, thus we transfer index parameter
to attribute of gather op
axis is set to 0 when it's not given
gather op 的参数 axis 可能不存在,此时强制植入 0 作为 axis 参数
"""
interested_ops = []
for _, operation in self.graph.operations.items():
if operation.type == 'Gather': interested_ops.append(operation)
for operation in interested_ops:
assert isinstance(operation, Operation)
if len(operation.inputs) == 2:
index_op = operation.inputs[1].source_op
if index_op.type == 'Constant':
index = index_op.attributes['value']
self.__delete_constant_input(operation, 1)
operation.attributes['gather_index'] = convert_any_to_python_primary_type(index)
if 'axis' not in operation.attributes:
operation.attributes['axis'] = 0
if 'indices' in operation.attributes:
operation.attributes['gather_index'] = operation.attributes['indices']
operation.attributes.pop('indices')
def format_cast(self) -> None:
"""
cast op 的参数 to 默认为 int,使用该函数将其封装为 ppq.core.DataType
"""
interested_ops = []
for _, operation in self.graph.operations.items():
assert isinstance(operation, Operation)
if operation.type == 'Cast': interested_ops.append(operation)
for operation in interested_ops:
assert isinstance(operation, Operation)
assert 'to' in operation.attributes
operation.attributes['to'] = DataType.convert_from_numpy(operation.attributes['to'])
def format_int64_constant(self) -> None:
"""
convert all int64 constants to int32, check if direct dtype cast is available
将所有 int64 的 Constant 转换为 int32
将检查所有 Constant value, 如果 value 范围在 int32 表示范围内则执行转换。
"""
for operation in self.graph.operations.values():
if operation.type == 'Constant':
assert 'value' in operation.attributes
value = operation.attributes['value']
assert isinstance(value, torch.Tensor)
if value.dtype != torch.int64: continue
pvalue = convert_any_to_python_primary_type(value)
check = [0xFFFFFFFF > v >= -0xFFFFFFFF for v in pvalue]
if all(check): value = value.int()
def format_constant_input(self) -> None:
"""
部分部署平台不支持 Constant Op,在这种情况下我们使用这个 pass 把 Constant Op 的输入切换成 parameter variable 的形式
some backend platform doesn't support Constant Op, we use this pass to replace it by forcing its value to
be a parameter variable
"""
constant_ops = []
for operation in self.graph.operations.values():
if operation.type == 'Constant':
assert len(operation.outputs) == 1, (
f"Constant Operation {operation.name} has more than 1 output, is there a network parsing error?")
constant_ops.append(operation)
for operation in constant_ops:
assert isinstance(operation, Operation)
output_var = operation.outputs[0]
constant_value = operation.attributes['value']
output_var.value = constant_value
# force output variable to a parameter.
output_var._is_parameter = True
operation.outputs.clear()
output_var.source_op = None
self.graph.delete_operation(op_name=operation.name)
def truncate_on_var(self, var: Variable, mark_as_output: bool):
"""
从一个指定位置将图截断
Args:
var (Variable): _description_
mark_as_output (bool): _description_
Raises:
TypeError: _description_
KeyError: _description_
"""
graph = self.graph
if not isinstance(var, Variable):
raise TypeError(f'Except variable instance here, however {type(var)} was given.')
if var.name not in graph.variables:
raise KeyError(f'Can not find vairiable {var.name} in current graph')
mark_to_delete, delete_queue, didx = set(), [], 0
delete_queue.extend(var.dest_ops)
while didx < len(delete_queue):
first_op = delete_queue[didx]
if first_op not in mark_to_delete:
mark_to_delete.add(first_op)
delete_queue.extend(graph.get_downstream_operations(first_op))
didx += 1
for operation in mark_to_delete:
graph.remove_operation(operation)
if mark_as_output:
graph.mark_variable_as_graph_output(var)
self.delete_isolated()
def delete_isolated(self):
blacklist = [None]
while len(blacklist) > 0:
blacklist = []
# delete all operations which are not links to a valid graph output
for op in self.graph.operations.values():
if len(self.graph.get_downstream_operations(op)) == 0:
output_names = [var.name for var in op.outputs]
if all([name not in self.graph.outputs for name in output_names]):
blacklist.append(op)
for op in blacklist:
for var in op.outputs:
self.graph.remove_variable(var)
self.graph.remove_operation(op)
var_blacklist = [None]
while len(var_blacklist) > 0:
var_blacklist = set()
# delete all variables that links to invalid operations:
for var in self.graph.variables.values():
| |
import sys
sys.path.insert(0, '../reports/code_blocks')
import ref_help as mv
def get_lgts(head, tail, sol, forcefield):
if forcefield == 'martini':
nc3 = {'N': 1, 'C': 5, head: 13}
po4 = {'P': 1, 'O': 4}
gl1 = {'C': 2, 'O': 2, tail: 2}
gl2 = {'C': 3, 'O': 2, tail: 3}
c1a = {'C': 5, tail: 10}
c2a = {'C': 4, tail: 8}
c3a = {'C': 4, tail: 8}
c4a = {'C': 4, tail: 9}
c1b = {'C': 5, tail: 10}
c2b = {'C': 4, tail: 8}
c3b = {'C': 4, tail: 8}
c4b = {'C': 4, tail: 9}
w = {'O': 2, sol: 4}
wp = {'O': 1, sol: 2}
wm = {'O': 1, sol: 2}
scat_lens = []
types = [nc3, po4, gl1, gl2, c1a, c2a, c3a, c4a, c1b, c2b, c3b, c4b]
types_strings = ['NC3', 'PO4', 'GL1', 'GL2', 'C1A', 'C2A', 'C3A', 'C4A', 'C1B', 'C2B', 'C3B', 'C4B', 'W', 'WP', 'WM']
for atom in types:
comp = (mv.get_scattering_length(atom, neutron=True))
scat_lens.append([comp.real, comp.imag])
if sol == 'acmw':
scat_lens.append([0, 0])
scat_lens.append([0, 0])
scat_lens.append([0, 0])
else:
types = [w, wp, wm]
for atom in types:
comp = (mv.get_scattering_length(atom, neutron=True))
scat_lens.append([comp.real, comp.imag])
elif forcefield == 'berger':
c1 = {'C': 1, head: 3}
c2 = {'C': 1, head: 3}
c3 = {'C': 1, head: 3}
n4 = {'N': 1}
c5 = {'C': 1, head: 2}
c6 = {'C': 1, head: 2}
o7 = {'O': 1}
p8 = {'P': 1}
o9 = {'O': 1}
o10 = {'O': 1}
o11 = {'O': 1}
c12 = {'C': 1, tail: 2}
c13 = {'C': 1, tail: 1}
o14 = {'O': 1}
c15 = {'C': 1}
o16 = {'O': 1}
c17 = {'C': 1, tail: 2}
c18 = {'C': 1, tail: 2}
c19 = {'C': 1, tail: 2}
c20 = {'C': 1, tail: 2}
c21 = {'C': 1, tail: 2}
c22 = {'C': 1, tail: 2}
c23 = {'C': 1, tail: 2}
c24 = {'C': 1, tail: 2}
c25 = {'C': 1, tail: 2}
c26 = {'C': 1, tail: 2}
c27 = {'C': 1, tail: 2}
c28 = {'C': 1, tail: 2}
c29 = {'C': 1, tail: 2}
c30 = {'C': 1, tail: 2}
c31 = {'C': 1, tail: 2}
c32 = {'C': 1, tail: 2}
c33 = {'C': 1, tail: 3}
c34 = {'C': 1, tail: 2}
o35 = {'O': 1}
c36 = {'C': 1}
o37 = {'O': 1}
c38 = {'C': 1, tail: 2}
c39 = {'C': 1, tail: 2}
c40 = {'C': 1, tail: 2}
c41 = {'C': 1, tail: 2}
c42 = {'C': 1, tail: 2}
c43 = {'C': 1, tail: 2}
c44 = {'C': 1, tail: 2}
c45 = {'C': 1, tail: 2}
c46 = {'C': 1, tail: 2}
c47 = {'C': 1, tail: 2}
c48 = {'C': 1, tail: 2}
c49 = {'C': 1, tail: 2}
c50 = {'C': 1, tail: 2}
c51 = {'C': 1, tail: 2}
c52 = {'C': 1, tail: 2}
c53 = {'C': 1, tail: 2}
c54 = {'C': 1, tail: 3}
ow = {'O': 1}
hw1 = {sol: 1}
hw2 = {sol: 1}
scat_lens = []
types = [c1, c2, c3, n4, c5, c6, o7, p8, o9, o10, o11, c12, c13, o14, c15, o16, c17, c18, c19,
c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, o35, c36,
o37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54]
types_strings = ['C1', 'C2', 'C3', 'N4', 'C5', 'C6', 'O7', 'P8', 'O9', 'O10', 'O11', 'C12', 'C13',
'O14', 'C15', 'O16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24',
'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32', 'C33', 'C34', 'O35',
'C36', 'O37', 'C38', 'C39', 'C40', 'C41', 'C42', 'C43', 'C44', 'C45', 'C46',
'C47', 'C48', 'C49', 'C50', 'C51', 'C52', 'C53', 'C54', 'OW', 'HW1', 'HW2']
for atom in types:
comp = (mv.get_scattering_length(atom, neutron=True))
scat_lens.append([comp.real, comp.imag])
if sol == 'acmw':
scat_lens.append([0, 0])
scat_lens.append([0, 0])
scat_lens.append([0, 0])
else:
types = [ow, hw1, hw2]
for atom in types:
comp = (mv.get_scattering_length(atom, neutron=True))
scat_lens.append([comp.real, comp.imag])
elif forcefield == 'slipids':
n = {'N': 1}
c13 = {'C': 1}
h13a = {head: 1}
h13b = {head: 1}
h13c = {head: 1}
c14 = {'C': 1}
h14a = {head: 1}
h14b = {head: 1}
h14c = {head: 1}
c15 = {'C': 1}
h15a = {head: 1}
h15b = {head: 1}
h15c = {head: 1}
c12 = {'C': 1}
h12a = {head: 1}
h12b = {head: 1}
c11 = {'C': 1}
h11a = {head: 1}
h11b = {head: 1}
p = {'P': 1}
o13 = {'O': 1}
o14 = {'O': 1}
o11 = {'O': 1}
o12 = {'O': 1}
c1 = {'C': 1}
ha = {tail: 1}
hb = {tail: 1}
c2 = {'C': 1}
hs = {tail: 1}
o21 = {'O': 1}
c21 = {'C': 1}
o22 = {'O': 1}
c22 = {'C': 1}
h2r = {tail: 1}
h2s = {tail: 1}
c3 = {'C': 1}
hx = {tail: 1}
hy = {tail: 1}
o31 = {'O': 1}
c31 = {'C': 1}
o32 = {'O': 1}
c32 = {'C': 1}
h2x = {tail: 1}
h2y = {tail: 1}
c23 = {'C': 1}
h3r = {tail: 1}
h3s = {tail: 1}
c24 = {'C': 1}
h4r = {tail: 1}
h4s = {tail: 1}
c25 = {'C': 1}
h5r = {tail: 1}
h5s = {tail: 1}
c26 = {'C': 1}
h6r = {tail: 1}
h6s = {tail: 1}
c27 = {'C': 1}
h7r = {tail: 1}
h7s = {tail: 1}
c28 = {'C': 1}
h8r = {tail: 1}
h8s = {tail: 1}
c29 = {'C': 1}
h9r = {tail: 1}
h9s = {tail: 1}
c210 = {'C': 1}
h10r = {tail: 1}
h10s = {tail: 1}
c211 = {'C': 1}
h11r = {tail: 1}
h11s = {tail: 1}
c212 = {'C': 1}
h12r = {tail: 1}
h12s = {tail: 1}
c213 = {'C': 1}
h13r = {tail: 1}
h13s = {tail: 1}
c214 = {'C': 1}
h14r = {tail: 1}
h14s = {tail: 1}
c215 = {'C': 1}
h15r = {tail: 1}
h15s = {tail: 1}
c216 = {'C': 1}
h16r = {tail: 1}
h16s = {tail: 1}
c217 = {'C': 1}
h17r = {tail: 1}
h17s = {tail: 1}
c218 = {'C': 1}
h18r = {tail: 1}
h18s = {tail: 1}
h18t = {tail: 1}
c33 = {'C': 1}
h3x = {tail: 1}
h3y = {tail: 1}
c34 = {'C': 1}
h4x = {tail: 1}
h4y = {tail: 1}
c35 = {'C': 1}
h5x = {tail: 1}
h5y = {tail: 1}
c36 = {'C': 1}
h6x = {tail: 1}
h6y = {tail: 1}
c37 = {'C': 1}
h7x = {tail: 1}
h7y = {tail: 1}
c38 = {'C': 1}
h8x = {tail: 1}
h8y = {tail: 1}
c39 = {'C': 1}
h9x = {tail: 1}
h9y = {tail: 1}
c310 = {'C': 1}
h10x = {tail: 1}
h10y = {tail: 1}
c311 = {'C': 1}
h11x = {tail: 1}
h11y = {tail: 1}
c312 = {'C': 1}
h12x = {tail: 1}
h12y = {tail: 1}
c313 = {'C': 1}
h13x = {tail: 1}
h13y = {tail: 1}
c314 = {'C': 1}
h14x = {tail: 1}
h14y = {tail: 1}
c315 = {'C': 1}
h15x = {tail: 1}
h15y = {tail: 1}
c316 = {'C': 1}
h16x = {tail: 1}
h16y = {tail: 1}
c317 = {'C': 1}
h17x = {tail: 1}
h17y = {tail: 1}
c318 = {'C': 1}
h18x = {tail: 1}
h18y = {tail: 1}
h18z = {tail: 1}
ow = {'O': 1}
hw1 = {sol: 1}
hw2 = {sol: 1}
scat_lens = []
types = [n, c13, h13a, h13b, h13c, c14, h14a, h14b, h14c, c15, h15a, h15b, h15c, c12, h12a, h12b,
c11, h11a, h11b, p, o13, o14, o11, o12, c1, ha, hb, c2, hs, o21, c21, o22, c22, h2r, h2s,
c3, hx, hy, o31, c31, o32, | |
<gh_stars>1-10
from typing import Any, Callable, List, Optional
import torch
import torch.distributed as dist
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _OverlapStatus
from torch.nn.parallel.distributed import DistributedDataParallel
# Functional optimizers require passing a list of gradients to their `step()`
# method, and ZeRO requires a functional optimizer to overlap with DDP
# Passing a `None` instead of an actual gradient indicates to the optimizer
# to not update the corresponding parameter
_NO_PARAM_UPDATE = None
def hook_with_zero_step(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
ddp: DistributedDataParallel,
zero: ZeroRedundancyOptimizer,
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
r"""
Modifies the given ``hook`` to overlap the :class:`ZeroRedundancyOptimizer`
optimizer step with the :class:`DistributedDataParallel` backward pass,
where the optimizer step computation begins after the last gradient bucket
computation has finished.
This approach overlaps the optimizer computation and communication with the
backward communication. In particular, the backward computation proceeds
contiguously, and the optimizer computation follows, overlapping with
outstanding backward communication (i.e. all-reduces) and possibly other
optimizer communication (i.e. broadcasts).
This approach may be preferred over :meth:`hook_with_zero_step_interleaved`
if communication is relatively slow compared to computation.
Arguments:
hook (Callable[[Any, dist.GradBucket], torch.futures.Future]): the hook
to modify.
ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
instance to use.
zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
instance to use.
Returns:
The modified hook.
Raises:
ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
RuntimeError: if using any backend other than NCCL since currently
Gloo may hang.
.. warning::
Given the way that overlapping :class:`DistributedDataParallel` with
:class:`ZeroRedundancyOptimizer` is currently implemented, the first
two training iterations do not perform parameter updates in the
optimizer step. This is because it needs information about the gradient
bucketing strategy used by :class:`DistributedDataParallel`, which is
not finalized until the second forward pass.
"""
if not zero._overlap_with_ddp:
raise ValueError(
"ZeroRedundancyOptimizer must be constructed with "
"`overlap_with_ddp=True` to use this hook properly"
)
# NOTE: Gloo may hang with this overlapping approach, so we require
# NCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
if dist.get_backend() != dist.Backend.NCCL:
raise RuntimeError(
"Overlapping DDP with ZeRO using this approach currently requires "
"NCCL backend to avoid hangs"
)
def hook_with_zero_fn(
state: Any,
bucket: dist.GradBucket,
) -> torch.futures.Future[torch.Tensor]:
r"""
Returns a :class:`Future` that gives a gradient bucket tensor and
performs the equivalent of a :class:`ZeroRedundancyOptimizer`
:meth:`step` if ``bucket`` is the last gradient bucket.
The function performs additional computation on the iteration that
the :class:`DistributedDataParallel` buckets are rebuilt to collect
information used to implement the modified hook.
Arguments:
state (Any): any state for the hook.
bucket (dist.GradBucket): the :class:`DistributedDataParallel`
gradient bucket.
"""
fut = hook(state, bucket)
overlap_info = zero._overlap_info
bucket_index = bucket.index()
# Proceed as normal until the DDP buckets have been rebuilt
if not ddp._has_rebuilt_buckets:
assert overlap_info.status == _OverlapStatus.UNINITIALIZED
return fut
if overlap_info.status == _OverlapStatus.UNINITIALIZED:
overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS
rank = zero.global_rank
rank_to_update = zero._ddp_bucket_index_to_rank(bucket_index)
# Once DDP buckets have been rebuilt but ZeRO has not been
# properly initialized yet, collect the information needed
if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS:
bucket_params = bucket.parameters()
assert len(bucket_params) > 0, "Empty bucket"
params_per_rank = overlap_info.params_per_rank
params_per_bucket = overlap_info.params_per_bucket
if rank_to_update == rank:
overlap_info.offsets[bucket_index] = len(params_per_rank[rank_to_update])
params_per_rank[rank_to_update].extend(bucket_params)
params_per_bucket.append(bucket_params)
return fut
assert overlap_info.status == _OverlapStatus.INITIALIZED
# Save the bucket reference and all-reduce future for the final bucket
if rank_to_update == rank:
overlap_info.bucket_index_to_bucket[bucket_index] = bucket
overlap_info.bucket_index_to_future[bucket_index] = fut
# NOTE: The implementation from this point forward assumes that the
# buckets are indexed incrementally starting from 0 in the order of
# their autograd hooks firing
num_buckets = len(overlap_info.params_per_bucket)
is_last_bucket = bucket_index == num_buckets - 1
if not is_last_bucket:
return fut
# Perform partial optimizer step on all buckets after the final
# bucket has been computed
# NOTE: This should not be chained as a callback to the last bucket's
# all-reduce future since that would add synchronization that delays
# all optimizer computation to wait for that last all-reduce
for bucket_index in range(num_buckets):
rank_to_update = zero._ddp_bucket_index_to_rank(bucket_index)
num_local_optim_params = len(zero.optim.param_groups[0]["params"])
if rank_to_update == rank:
gradients: List[Optional[torch.Tensor]] = \
[_NO_PARAM_UPDATE for _ in range(num_local_optim_params)]
assert bucket_index in overlap_info.offsets, \
f"Bucket index {bucket_index} was not assigned to rank {rank}"
offset = overlap_info.offsets[bucket_index]
# Ensure that the all-reduce completes before performing the
# the parameter update
assert bucket_index in overlap_info.bucket_index_to_future, \
f"All-reduce future for bucket {bucket_index} not saved " \
f"on rank {rank}"
allreduce_future = overlap_info.bucket_index_to_future[bucket_index]
allreduce_future.wait()
bucket_gradients = overlap_info.bucket_index_to_bucket[bucket_index].gradients()
for i, grad in enumerate(bucket_gradients):
gradients[offset + i] = grad
zero._local_step(gradients)
device = overlap_info.params_per_bucket[bucket_index][0].device
device_index = zero._device_to_device_index[device]
assert bucket_index in zero._buckets[device_index][rank_to_update]
overlap_info.broadcast_handles.append(
dist.broadcast(
zero._buckets[device_index][rank_to_update][bucket_index],
src=rank_to_update,
async_op=True
)
)
# Ensure that all parameter updates are finished before the
# next forward pass
assert len(overlap_info.broadcast_handles) == num_buckets, \
f"Missing at least one broadcast handle on rank {rank}"
_ = list(map(lambda x: x.wait(), overlap_info.broadcast_handles))
overlap_info.broadcast_handles.clear()
# Reset per-iteration information
overlap_info.bucket_index_to_future.clear()
overlap_info.bucket_index_to_bucket.clear()
return fut
return hook_with_zero_fn
def hook_with_zero_step_interleaved(
hook: Callable[[Any, dist.GradBucket], torch.futures.Future],
ddp: DistributedDataParallel,
zero: ZeroRedundancyOptimizer,
) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]:
r"""
Modifies the given ``hook`` to overlap the :class:`ZeroRedundancyOptimizer`
optimizer step with the :class:`DistributedDataParallel` backward pass,
where the optimizer step computation interleaves with the backward
computation.
This approach overlaps the optimizer computation and communication with the
backward computation and communication. In particular, once a bucket's
gradients have been computed, the optimizer computation using those
gradients is launched (though the actual computation must wait for the
bucket's all-reduce to complete). This yields an interleaving of all-
reduces and broadcasts in the communication stream.
This approach may be preferred over :meth:`hook_with_zero_step` if
communication is relatively fast compared to computation.
Arguments:
hook (Any * dist.GradBucket -> torch.futures.Future): the hook to
modify.
ddp (DistributedDataParallel): the :class:`DistributedDataParallel`
instance to use.
zero (ZeroRedundancyOptimizer): the :class:`ZeroRedundancyOptimizer`
instance to use.
Returns:
The modified hook.
Raises:
ValueError: if ``zero`` was constructed with ``overlap_with_ddp=False``.
RuntimeError: if using any backend other than NCCL since currently
Gloo may hang.
.. warning::
Given the way that overlapping :class:`DistributedDataParallel` with
:class:`ZeroRedundancyOptimizer` is currently implemented, the first
two training iterations do not perform parameter updates in the
optimizer step. This is because it needs information about the gradient
bucketing strategy used by :class:`DistributedDataParallel`, which is
not finalized until the second forward pass.
"""
if not zero._overlap_with_ddp:
raise ValueError(
"ZeroRedundancyOptimizer must be constructed with "
"`overlap_with_ddp=True` to use this hook properly"
)
# NOTE: Gloo may hang with this overlapping approach, so we require
# NCCL backend for now; see https://github.com/pytorch/pytorch/issues/62300
if dist.get_backend() != dist.Backend.NCCL:
raise RuntimeError(
"Overlapping DDP with ZeRO using this approach currently requires "
"NCCL backend to avoid hangs"
)
def hook_with_zero_interleaved_fn(
state,
bucket: dist.GradBucket,
) -> torch.futures.Future[torch.Tensor]:
r"""
Returns a :class:`Future` that gives a gradient bucket tensor and
performs a partial :class:`ZeroRedundancyOptimizer` :meth:`step` using
the gradients in that bucket.
Arguments:
state: any state for the hook.
bucket (dist.GradBucket): the :class:`DistributedDataParallel`
gradient bucket.
"""
fut = hook(state, bucket)
# Proceed as normal until the DDP buckets have been rebuilt
if not ddp._has_rebuilt_buckets:
assert zero._overlap_info.status == _OverlapStatus.UNINITIALIZED
return fut
def zero_step(fut: torch.futures.Future) -> torch.Tensor:
r"""
Performs a partial :class:`ZeroRedundancyOptimizer` :meth:`step`
using the gradients in the given :class:`DistributedDataParallel`
gradient bucket.
Returns:
A :class:`torch.Tensor` representing the contents of the
gradient bucket.
"""
assert ddp._has_rebuilt_buckets
bucket_index = bucket.index()
rank = zero.global_rank
overlap_info = zero._overlap_info
if overlap_info.status == _OverlapStatus.UNINITIALIZED:
overlap_info.status = _OverlapStatus.DDP_HAS_REBUILT_BUCKETS
bucket_params = bucket.parameters()
assert len(bucket_params) > 0, "Empty bucket"
rank_to_update = zero._ddp_bucket_index_to_rank(bucket_index)
# Once DDP buckets have been rebuilt but ZeRO has not been
# properly initialized yet, collect the information needed
if overlap_info.status == _OverlapStatus.DDP_HAS_REBUILT_BUCKETS:
params_per_rank = overlap_info.params_per_rank
params_per_bucket = overlap_info.params_per_bucket
if rank_to_update == rank:
overlap_info.offsets[bucket_index] = len(params_per_rank[rank_to_update])
params_per_rank[rank_to_update].extend(bucket_params)
params_per_bucket.append(bucket_params)
return bucket.get_tensor()
overlap_info.bucket_indices_seen.append(bucket_index)
if rank_to_update == rank:
assert len(zero.optim.param_groups) == 1, \
"Overlapping DDP with ZeRO only supports a single " \
"parameter group"
# Construct the `gradients` input for the local optimizer step,
# which expects `None` in a list position to indicate that the
# corresponding parameter should not be updated
num_local_optim_params = len(zero.optim.param_groups[0]["params"])
gradients: List[Optional[torch.Tensor]] = \
[_NO_PARAM_UPDATE for _ in range(num_local_optim_params)]
assert bucket_index in overlap_info.offsets, \
f"Bucket index {bucket_index} was not assigned to rank " \
f"{rank}"
offset = overlap_info.offsets[bucket_index]
bucket_gradients = bucket.gradients()
for i, grad in enumerate(bucket_gradients):
| |
from rpython.annotator import model as annmodel, unaryop, binaryop, description
from rpython.flowspace.model import Constant
from rpython.rtyper.error import TyperError, MissingRTypeOperation
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType
from rpython.tool.pairtype import pairtype, extendabletype, pair
# initialization states for Repr instances
class setupstate(object):
NOTINITIALIZED = 0
INPROGRESS = 1
BROKEN = 2
FINISHED = 3
DELAYED = 4
class Repr(object):
""" An instance of Repr is associated with each instance of SomeXxx.
It defines the chosen representation for the SomeXxx. The Repr subclasses
generally follows the SomeXxx subclass hierarchy, but there are numerous
exceptions. For example, the annotator uses SomeIter for any iterator, but
we need different representations according to the type of container we are
iterating over.
"""
__metaclass__ = extendabletype
_initialized = setupstate.NOTINITIALIZED
__NOT_RPYTHON__ = True
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.lowleveltype)
def compact_repr(self):
return '%s %s' % (self.__class__.__name__.replace('Repr','R'), self.lowleveltype._short_name())
def setup(self):
""" call _setup_repr() and keep track of the initializiation
status to e.g. detect recursive _setup_repr invocations.
the '_initialized' attr has four states:
"""
if self._initialized == setupstate.FINISHED:
return
elif self._initialized == setupstate.BROKEN:
raise BrokenReprTyperError(
"cannot setup already failed Repr: %r" %(self,))
elif self._initialized == setupstate.INPROGRESS:
raise AssertionError(
"recursive invocation of Repr setup(): %r" %(self,))
elif self._initialized == setupstate.DELAYED:
raise AssertionError(
"Repr setup() is delayed and cannot be called yet: %r" %(self,))
assert self._initialized == setupstate.NOTINITIALIZED
self._initialized = setupstate.INPROGRESS
try:
self._setup_repr()
except TyperError:
self._initialized = setupstate.BROKEN
raise
else:
self._initialized = setupstate.FINISHED
def _setup_repr(self):
"For recursive data structure, which must be initialized in two steps."
def setup_final(self):
"""Same as setup(), called a bit later, for effects that are only
needed after the typer finished (as opposed to needed for other parts
of the typer itself)."""
if self._initialized == setupstate.BROKEN:
raise BrokenReprTyperError("cannot perform setup_final_touch "
"on failed Repr: %r" %(self,))
assert self._initialized == setupstate.FINISHED, (
"setup_final() on repr with state %s: %r" %
(self._initialized, self))
self._setup_repr_final()
def _setup_repr_final(self):
pass
def is_setup_delayed(self):
return self._initialized == setupstate.DELAYED
def set_setup_delayed(self, flag):
assert self._initialized in (setupstate.NOTINITIALIZED,
setupstate.DELAYED)
if flag:
self._initialized = setupstate.DELAYED
else:
self._initialized = setupstate.NOTINITIALIZED
def set_setup_maybe_delayed(self):
if self._initialized == setupstate.NOTINITIALIZED:
self._initialized = setupstate.DELAYED
return self._initialized == setupstate.DELAYED
def __getattr__(self, name):
# Assume that when an attribute is missing, it's because setup() needs
# to be called
if not (name[:2] == '__' == name[-2:]):
if self._initialized == setupstate.NOTINITIALIZED:
self.setup()
try:
return self.__dict__[name]
except KeyError:
pass
raise AttributeError("%s instance has no attribute %s" % (
self.__class__.__name__, name))
def _freeze_(self):
return True
def convert_desc_or_const(self, desc_or_const):
if isinstance(desc_or_const, description.Desc):
return self.convert_desc(desc_or_const)
elif isinstance(desc_or_const, Constant):
return self.convert_const(desc_or_const.value)
else:
raise TyperError("convert_desc_or_const expects a Desc"
"or Constant: %r" % desc_or_const)
def convert_const(self, value):
"Convert the given constant value to the low-level repr of 'self'."
if not self.lowleveltype._contains_value(value):
raise TyperError("convert_const(self = %r, value = %r)" % (
self, value))
return value
def get_ll_eq_function(self):
"""Return an eq(x,y) function to use to compare two low-level
values of this Repr.
This can return None to mean that simply using '==' is fine.
"""
raise TyperError('no equality function for %r' % self)
def get_ll_hash_function(self):
"""Return a hash(x) function for low-level values of this Repr.
"""
raise TyperError('no hashing function for %r' % self)
def get_ll_fasthash_function(self):
"""Return a 'fast' hash(x) function for low-level values of this
Repr. The function can assume that 'x' is already stored as a
key in a dict. get_ll_fasthash_function() should return None if
the hash should rather be cached in the dict entry.
"""
return None
def can_ll_be_null(self, s_value):
"""Check if the low-level repr can take the value 0/NULL.
The annotation s_value is provided as a hint because it may
contain more information than the Repr.
"""
return True # conservative
def get_ll_dummyval_obj(self, rtyper, s_value):
"""A dummy value is a special low-level value, not otherwise
used. It should not be the NULL value even if it is special.
This returns either None, or a hashable object that has a
(possibly lazy) attribute 'll_dummy_value'.
The annotation s_value is provided as a hint because it may
contain more information than the Repr.
"""
T = self.lowleveltype
if (isinstance(T, lltype.Ptr) and
isinstance(T.TO, (lltype.Struct,
lltype.Array,
lltype.ForwardReference))):
return DummyValueBuilder(rtyper, T.TO)
else:
return None
def rtype_bltn_list(self, hop):
raise TyperError('no list() support for %r' % self)
def rtype_unichr(self, hop):
raise TyperError('no unichr() support for %r' % self)
# default implementation of some operations
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
if s_attr.is_constant() and isinstance(s_attr.const, str):
attr = s_attr.const
s_obj = hop.args_s[0]
if s_obj.find_method(attr) is None:
raise TyperError("no method %s on %r" % (attr, s_obj))
else:
# implement methods (of a known name) as just their 'self'
return hop.inputarg(self, arg=0)
else:
raise TyperError("getattr() with a non-constant attribute name")
def rtype_str(self, hop):
[v_self] = hop.inputargs(self)
return hop.gendirectcall(self.ll_str, v_self)
def rtype_bool(self, hop):
try:
vlen = self.rtype_len(hop)
except MissingRTypeOperation:
if not hop.s_result.is_constant():
raise TyperError("rtype_bool(%r) not implemented" % (self,))
return hop.inputconst(Bool, hop.s_result.const)
else:
return hop.genop('int_is_true', [vlen], resulttype=Bool)
def rtype_hash(self, hop):
ll_hash = self.get_ll_hash_function()
v, = hop.inputargs(self)
return hop.gendirectcall(ll_hash, v)
def rtype_iter(self, hop):
r_iter = self.make_iterator_repr()
return r_iter.newiter(hop)
def make_iterator_repr(self, *variant):
raise TyperError("%s is not iterable" % (self,))
def rtype_hint(self, hop):
return hop.inputarg(hop.r_result, arg=0)
# hlinvoke helpers
def get_r_implfunc(self):
raise TyperError("%s has no corresponding implementation function representation" % (self,))
def get_s_callable(self):
raise TyperError("%s is not callable or cannot reconstruct a pbc annotation for itself" % (self,))
def ll_hash_void(v):
return 0
class CanBeNull(object):
"""A mix-in base class for subclasses of Repr that represent None as
'null' and true values as non-'null'.
"""
def rtype_bool(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(Bool, hop.s_result.const)
else:
return hop.rtyper.type_system.check_null(self, hop)
class IteratorRepr(Repr):
"""Base class of Reprs of any kind of iterator."""
def rtype_iter(self, hop): # iter(iter(x)) <==> iter(x)
v_iter, = hop.inputargs(self)
return v_iter
def rtype_method_next(self, hop):
return self.rtype_next(hop)
class __extend__(annmodel.SomeIterator):
# NOTE: SomeIterator is for iterators over any container, not just list
def rtyper_makerepr(self, rtyper):
r_container = rtyper.getrepr(self.s_container)
if self.variant == ("enumerate",):
from rpython.rtyper.rrange import EnumerateIteratorRepr
r_baseiter = r_container.make_iterator_repr()
return EnumerateIteratorRepr(r_baseiter)
return r_container.make_iterator_repr(*self.variant)
def rtyper_makekey(self):
return self.__class__, self.s_container.rtyper_makekey(), self.variant
class __extend__(annmodel.SomeImpossibleValue):
def rtyper_makerepr(self, rtyper):
return impossible_repr
def rtyper_makekey(self):
return self.__class__,
# ____ generic binary operations _____________________________
class __extend__(pairtype(Repr, Repr)):
def rtype_is_((robj1, robj2), hop):
if hop.s_result.is_constant():
return inputconst(Bool, hop.s_result.const)
return hop.rtyper.type_system.generic_is(robj1, robj2, hop)
# default implementation for checked getitems
def rtype_getitem_idx((r_c1, r_o1), hop):
return pair(r_c1, r_o1).rtype_getitem(hop)
# ____________________________________________________________
def make_missing_op(rcls, opname):
attr = 'rtype_' + opname
if not hasattr(rcls, attr):
def missing_rtype_operation(self, hop):
raise MissingRTypeOperation("unimplemented operation: "
"'%s' on %r" % (opname, self))
setattr(rcls, attr, missing_rtype_operation)
for opname in unaryop.UNARY_OPERATIONS:
make_missing_op(Repr, opname)
for opname in binaryop.BINARY_OPERATIONS:
make_missing_op(pairtype(Repr, Repr), opname)
# not in BINARY_OPERATIONS
make_missing_op(pairtype(Repr, Repr), 'contains')
class __extend__(pairtype(Repr, Repr)):
def convert_from_to((r_from, r_to), v, llops):
return NotImplemented
# ____________________________________________________________
class VoidRepr(Repr):
lowleveltype = Void
def get_ll_eq_function(self): return None
def get_ll_hash_function(self): return ll_hash_void
get_ll_fasthash_function = get_ll_hash_function
def ll_str(self, nothing): raise AssertionError("unreachable code")
impossible_repr = VoidRepr()
class SimplePointerRepr(Repr):
"Convenience Repr for simple ll pointer types with no operation on them."
def __init__(self, lowleveltype):
self.lowleveltype = lowleveltype
def convert_const(self, value):
if value is not None:
raise TyperError("%r only supports None as prebuilt constant, "
"got %r" % (self, value))
return lltype.nullptr(self.lowleveltype.TO)
# ____________________________________________________________
def inputconst(reqtype, value):
"""Return a Constant with the given value, of the requested type,
which can be a Repr instance or a low-level type.
"""
if isinstance(reqtype, Repr):
value = reqtype.convert_const(value)
lltype = reqtype.lowleveltype
elif isinstance(reqtype, LowLevelType):
lltype = reqtype
else:
raise TypeError(repr(reqtype))
if not lltype._contains_value(value):
raise TyperError("inputconst(): expected a %r, got %r" %
(lltype, value))
c = Constant(value)
c.concretetype = lltype
return c
class BrokenReprTyperError(TyperError):
""" raised when trying to setup a Repr whose setup
has failed already.
"""
def mangle(prefix, name):
"""Make a unique identifier from the prefix and the name. The name
is allowed to start with $."""
if name.startswith('$'):
return '%sinternal_%s' % (prefix, name[1:])
else:
return '%s_%s' % (prefix, name)
# __________ utilities __________
def getgcflavor(classdef):
classdesc = classdef.classdesc
alloc_flavor = classdesc.read_attribute('_alloc_flavor_',
Constant('gc')).value
return alloc_flavor
def externalvsinternal(rtyper, item_repr): # -> external_item_repr, (internal_)item_repr
from rpython.rtyper import rclass
if (isinstance(item_repr, rclass.InstanceRepr) and
getattr(item_repr, 'gcflavor', 'gc') == 'gc'):
return item_repr, rclass.getinstancerepr(rtyper, None)
else:
return item_repr, item_repr
class DummyValueBuilder(object):
def __init__(self, rtyper, TYPE):
self.rtyper = rtyper
self.TYPE = TYPE
def _freeze_(self):
return True
def __hash__(self):
return | |
and not self.config.tie_word_embeddings:
old_lm_head = self.get_output_embeddings()
new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
self.set_output_embeddings(new_lm_head)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None
) -> nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model without doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
else:
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
if not isinstance(old_embeddings, nn.Embedding):
raise TypeError(
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
)
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(
self.device, dtype=old_embeddings.weight.dtype
)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
# numbers of tokens to copy
n = min(old_num_tokens, new_num_tokens)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
if torch.distributed.get_rank() == 0:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
else:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
return new_embeddings
def _get_resized_lm_head(
self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
) -> nn.Linear:
"""
Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end
Args:
old_lm_head (:obj:`torch.nn.Linear`):
Old lm head liner layer to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Linear`` module of the model without doing anything.
transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
vocab_size`` else ``vocab_size, lm_head_dim``.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_lm_head
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None):
old_num_tokens, old_lm_head_dim = (
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
)
else:
old_num_tokens, old_lm_head_dim = (
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
)
if old_num_tokens == new_num_tokens:
return old_lm_head
if not isinstance(old_lm_head, nn.Linear):
raise TypeError(
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
f"You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}."
)
# Build new lm head
new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
has_new_lm_head_bias = old_lm_head.bias is not None
new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)
# initialize new lm head (in particular added tokens)
self._init_weights(new_lm_head)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
# XXX: put the long block of code in a wrapper
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=0):
if torch.distributed.get_rank() == 0:
# Copy old lm head weights to new lm head
if not transposed:
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[
:num_tokens_to_copy, :
]
else:
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[
:, :num_tokens_to_copy
]
# Copy bias weights to new lm head
if has_new_lm_head_bias:
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
else:
# Copy old lm head weights to new lm head
if not transposed:
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
else:
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]
# Copy bias weights to new lm head
if has_new_lm_head_bias:
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
return new_lm_head
def init_weights(self):
"""
If needed prunes and maybe initializes weights.
"""
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
if _init_weights:
# Initialize weights
self.apply(self._init_weights)
# Tie weights should be skipped when not initializing all weights
# since from_pretrained(...) calls tie weights anyways
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
save_config: bool = True,
state_dict: Optional[dict] = None,
save_function: Callable = torch.save,
push_to_hub: bool = False,
**kwargs,
):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
to call this function on all processes. In this case, set :obj:`save_config=True` only on the main
process to avoid race conditions.
state_dict (nested dictionary of :obj:`torch.Tensor`):
The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to
only save parts of the model or if special precautions need to be taken when recovering the state
dictionary of a model (like when using model parallelism).
save_function (:obj:`Callable`):
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
need to replace :obj:`torch.save` by another method.
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = unwrap_model(self)
# save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
# we currently don't use this setting automatically, but may start to use with v5
dtype = get_parameter_dtype(model_to_save)
model_to_save.config.torch_dtype = str(dtype).split(".")[1]
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save the config
if save_config:
model_to_save.config.save_pretrained(save_directory)
# Save the model
if state_dict is None:
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self._keys_to_ignore_on_save is not None:
state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
save_function(state_dict, output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not | |
"threads_per_core", value)
@pulumi.input_type
class InstanceCreditSpecificationArgs:
def __init__(__self__, *,
c_pu_credits: Optional[pulumi.Input[str]] = None):
if c_pu_credits is not None:
pulumi.set(__self__, "c_pu_credits", c_pu_credits)
@property
@pulumi.getter(name="cPUCredits")
def c_pu_credits(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "c_pu_credits")
@c_pu_credits.setter
def c_pu_credits(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "c_pu_credits", value)
@pulumi.input_type
class InstanceEbsArgs:
def __init__(__self__, *,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input[str]] = None):
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class InstanceElasticGpuSpecificationArgs:
def __init__(__self__, *,
type: pulumi.Input[str]):
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class InstanceElasticInferenceAcceleratorArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
count: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "type", type)
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@pulumi.input_type
class InstanceEnclaveOptionsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class InstanceHibernationOptionsArgs:
def __init__(__self__, *,
configured: Optional[pulumi.Input[bool]] = None):
if configured is not None:
pulumi.set(__self__, "configured", configured)
@property
@pulumi.getter
def configured(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "configured")
@configured.setter
def configured(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "configured", value)
@pulumi.input_type
class InstanceIpv6AddressArgs:
def __init__(__self__, *,
ipv6_address: pulumi.Input[str]):
pulumi.set(__self__, "ipv6_address", ipv6_address)
@property
@pulumi.getter(name="ipv6Address")
def ipv6_address(self) -> pulumi.Input[str]:
return pulumi.get(self, "ipv6_address")
@ipv6_address.setter
def ipv6_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ipv6_address", value)
@pulumi.input_type
class InstanceLaunchTemplateSpecificationArgs:
def __init__(__self__, *,
version: pulumi.Input[str],
launch_template_id: Optional[pulumi.Input[str]] = None,
launch_template_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "version", version)
if launch_template_id is not None:
pulumi.set(__self__, "launch_template_id", launch_template_id)
if launch_template_name is not None:
pulumi.set(__self__, "launch_template_name", launch_template_name)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="launchTemplateId")
def launch_template_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_id")
@launch_template_id.setter
def launch_template_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_id", value)
@property
@pulumi.getter(name="launchTemplateName")
def launch_template_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_name")
@launch_template_name.setter
def launch_template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_name", value)
@pulumi.input_type
class InstanceLicenseSpecificationArgs:
def __init__(__self__, *,
license_configuration_arn: pulumi.Input[str]):
pulumi.set(__self__, "license_configuration_arn", license_configuration_arn)
@property
@pulumi.getter(name="licenseConfigurationArn")
def license_configuration_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "license_configuration_arn")
@license_configuration_arn.setter
def license_configuration_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "license_configuration_arn", value)
@pulumi.input_type
class InstanceNetworkInterfaceArgs:
def __init__(__self__, *,
device_index: pulumi.Input[str],
associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
group_set: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ipv6_address_count: Optional[pulumi.Input[int]] = None,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['InstancePrivateIpAddressSpecificationArgs']]]] = None,
secondary_private_ip_address_count: Optional[pulumi.Input[int]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "device_index", device_index)
if associate_public_ip_address is not None:
pulumi.set(__self__, "associate_public_ip_address", associate_public_ip_address)
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if description is not None:
pulumi.set(__self__, "description", description)
if group_set is not None:
pulumi.set(__self__, "group_set", group_set)
if ipv6_address_count is not None:
pulumi.set(__self__, "ipv6_address_count", ipv6_address_count)
if ipv6_addresses is not None:
pulumi.set(__self__, "ipv6_addresses", ipv6_addresses)
if network_interface_id is not None:
pulumi.set(__self__, "network_interface_id", network_interface_id)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_addresses is not None:
pulumi.set(__self__, "private_ip_addresses", private_ip_addresses)
if secondary_private_ip_address_count is not None:
pulumi.set(__self__, "secondary_private_ip_address_count", secondary_private_ip_address_count)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="deviceIndex")
def device_index(self) -> pulumi.Input[str]:
return pulumi.get(self, "device_index")
@device_index.setter
def device_index(self, value: pulumi.Input[str]):
pulumi.set(self, "device_index", value)
@property
@pulumi.getter(name="associatePublicIpAddress")
def associate_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "associate_public_ip_address")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_public_ip_address", value)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="groupSet")
def group_set(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "group_set")
@group_set.setter
def group_set(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_set", value)
@property
@pulumi.getter(name="ipv6AddressCount")
def ipv6_address_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ipv6_address_count")
@ipv6_address_count.setter
def ipv6_address_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ipv6_address_count", value)
@property
@pulumi.getter(name="ipv6Addresses")
def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]]:
return pulumi.get(self, "ipv6_addresses")
@ipv6_addresses.setter
def ipv6_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]]):
pulumi.set(self, "ipv6_addresses", value)
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "network_interface_id")
@network_interface_id.setter
def network_interface_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface_id", value)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstancePrivateIpAddressSpecificationArgs']]]]:
return pulumi.get(self, "private_ip_addresses")
@private_ip_addresses.setter
def private_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstancePrivateIpAddressSpecificationArgs']]]]):
pulumi.set(self, "private_ip_addresses", value)
@property
@pulumi.getter(name="secondaryPrivateIpAddressCount")
def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "secondary_private_ip_address_count")
@secondary_private_ip_address_count.setter
def secondary_private_ip_address_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "secondary_private_ip_address_count", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class InstanceNoDeviceArgs:
def __init__(__self__):
pass
@pulumi.input_type
class InstancePrivateDnsNameOptionsArgs:
def __init__(__self__, *,
enable_resource_name_dns_aaaa_record: Optional[pulumi.Input[bool]] = None,
enable_resource_name_dns_a_record: Optional[pulumi.Input[bool]] = None,
hostname_type: Optional[pulumi.Input[str]] = None):
if enable_resource_name_dns_aaaa_record is not None:
pulumi.set(__self__, "enable_resource_name_dns_aaaa_record", enable_resource_name_dns_aaaa_record)
if enable_resource_name_dns_a_record is not None:
pulumi.set(__self__, "enable_resource_name_dns_a_record", enable_resource_name_dns_a_record)
if hostname_type is not None:
pulumi.set(__self__, "hostname_type", hostname_type)
@property
@pulumi.getter(name="enableResourceNameDnsAAAARecord")
def enable_resource_name_dns_aaaa_record(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_resource_name_dns_aaaa_record")
@enable_resource_name_dns_aaaa_record.setter
def enable_resource_name_dns_aaaa_record(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_resource_name_dns_aaaa_record", value)
@property
@pulumi.getter(name="enableResourceNameDnsARecord")
def enable_resource_name_dns_a_record(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_resource_name_dns_a_record")
@enable_resource_name_dns_a_record.setter
def enable_resource_name_dns_a_record(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_resource_name_dns_a_record", value)
@property
@pulumi.getter(name="hostnameType")
def hostname_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hostname_type")
@hostname_type.setter
def hostname_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname_type", value)
@pulumi.input_type
class InstancePrivateIpAddressSpecificationArgs:
def __init__(__self__, *,
primary: pulumi.Input[bool],
private_ip_address: pulumi.Input[str]):
pulumi.set(__self__, "primary", primary)
pulumi.set(__self__, "private_ip_address", private_ip_address)
@property
@pulumi.getter
def primary(self) -> pulumi.Input[bool]:
return pulumi.get(self, "primary")
@primary.setter
def primary(self, value: pulumi.Input[bool]):
pulumi.set(self, "primary", value)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> pulumi.Input[str]:
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "private_ip_address", value)
@pulumi.input_type
class InstanceSsmAssociationArgs:
def __init__(__self__, *,
document_name: pulumi.Input[str],
association_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceAssociationParameterArgs']]]] = None):
pulumi.set(__self__, "document_name", document_name)
if association_parameters is not None:
pulumi.set(__self__, "association_parameters", association_parameters)
@property
@pulumi.getter(name="documentName")
def document_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "document_name")
@document_name.setter
def document_name(self, value: pulumi.Input[str]):
pulumi.set(self, "document_name", value)
@property
@pulumi.getter(name="associationParameters")
def association_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceAssociationParameterArgs']]]]:
return pulumi.get(self, "association_parameters")
@association_parameters.setter
def association_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceAssociationParameterArgs']]]]):
pulumi.set(self, "association_parameters", value)
@pulumi.input_type
class InstanceTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class InstanceVolumeArgs:
def __init__(__self__, *,
device: pulumi.Input[str],
volume_id: pulumi.Input[str]):
pulumi.set(__self__, "device", device)
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter
def device(self) -> pulumi.Input[str]:
return pulumi.get(self, "device")
@device.setter
def device(self, value: pulumi.Input[str]):
pulumi.set(self, "device", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@pulumi.input_type
class InternetGatewayTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class KeyPairTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
A key-value pair to associate with a resource.
:param pulumi.Input[str] key: The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
| |
<reponame>katerinazuzana/sign-language-dictionary<filename>dictionary/drawing_canvas.py
import tkinter as tk
import math
import cmath
class DrawingCanvas(tk.Canvas):
"""A canvas used to select an eliptic area on the background picture.
An ellipse can be draw on the canvas. The ellipse can then be moved,
resized and rotated.
"""
def __init__(self, parent, **options):
super().__init__(parent, **options)
self.settings = {'width': 4, # ellipse settings
'outline': 'red',
'fill': 'black',
'stipple': 'gray25'}
self.markSize = 3 # half side of a square, or a radius
self.scMarkSettings = {'width': 1,
'outline': 'darkgreen',
'fill': 'green'}
self.rotMarkSettings = {'width': 2,
'outline': 'darkblue',
'fill': 'blue'}
self.markNames = ['r', 'tr', 't', 'tl', 'l', 'bl', 'b', 'br']
self.cursorShapes = ['right_side',
'top_right_corner',
'top_side',
'top_left_corner',
'left_side',
'bottom_left_corner',
'bottom_side',
'bottom_right_corner']
self.numShapes = len(self.cursorShapes)
self.markIds = {} # mark items drawn on canvas
self.id = None # id of the ellipse draw on canvas
self.ellipse = None # ellipse object
self.drawMode = True
self.scaleMode = False
self.rotateMode = False
self.bind('<ButtonPress-1>', self.onPress)
self.bind('<B1-Motion>', self.onMotion)
self.bind('<ButtonRelease-1>', self.onRelease)
self.bind('<Double-Button-1>', self.onDoubleClick)
def onPress(self, event):
"""Remember the topleft corner of a rect. encapsulating the ellipse."""
if self.drawMode:
self.topLeft = Vect(event)
def onMotion(self, event):
"""Draw an ellipse according to the mouse movement."""
if self.drawMode:
# draw an ellipse
if self.id:
self.delete(self.id)
self.id = self.create_oval(self.topLeft.x,
self.topLeft.y,
event.x, event.y,
**self.settings,
tags='ellipse')
def onRelease(self, event):
"""Create an Ellipse object and set the ellipse bindings."""
if self.drawMode:
# create an ellipse object
bottomRight = Vect(event)
self.ellipse = Ellipse(self.topLeft, bottomRight)
# set cursor
self.tag_bind('ellipse', '<Enter>',
lambda ev: self.config(cursor='fleur'))
self.tag_bind('ellipse', '<Leave>',
lambda ev: self.config(cursor=''))
# binding that enables moving the ellipse
self.tag_bind('ellipse', '<ButtonPress-1>', self.startMove)
# change mode
self.drawMode = False
self.switchToScaleMode()
def onDoubleClick(self, event):
"""On ellipse double-click change the mode between scale and rotate."""
if self.id in self.find_withtag(tk.CURRENT):
# if mouse is over the ellipse:
if self.scaleMode:
self.switchToRotateMode()
else:
self.switchToScaleMode()
def switchToRotateMode(self):
"""Switch to rotate mode and redraw the marks accordingly."""
self.scaleMode = False
self.rotateMode = True
self.delete('marks')
self.drawMarks(mode='rotate')
def switchToScaleMode(self):
"""Switch to scale mode and redraw the marks accordingly."""
self.rotateMode = False
self.scaleMode = True
self.delete('marks')
self.drawMarks(mode='scale')
def drawMarks(self, mode):
"""Draw the scaling/rotation marks of the ellipse and
set the corresponding bindings.
Arguments:
mode (str): takes a value of 'scale' or 'rotate' to diferentiate
between scaling and rotating marks
"""
if mode == 'scale':
createFcn = self.create_rectangle
settings = self.scMarkSettings
# update the dict with cursor shapes according to current angle
self.calcShapesDict()
elif mode == 'rotate':
createFcn = self.create_oval
settings = self.rotMarkSettings
for mark, position in self.ellipse.markCoords.items():
# draw the mark
topLeft = position - (self.markSize, self.markSize)
bottomRight = position + (self.markSize, self.markSize)
markId = createFcn(topLeft.x, topLeft.y,
bottomRight.x, bottomRight.y,
**settings,
tags='marks')
self.markIds[mark] = markId
if mode == 'scale':
# change the cursor shape when over the mark
def onEnter(mark):
return lambda ev: self.config(
cursor=self.scMarkCursors[mark])
self.tag_bind(markId, '<Enter>', onEnter(mark))
# on mark press start scaling of the ellipse
def onPress(mark):
return lambda ev: self.startScale(ev, mark)
self.tag_bind(markId, '<ButtonPress-1>', onPress(mark))
if mode == 'rotate':
# set the cursor shape over the rotation marks
self.tag_bind('marks', '<Enter>',
lambda ev: self.config(cursor='exchange'))
# on mark press start rotation of the ellipse
self.tag_bind('marks', '<ButtonPress-1>', self.startRotate)
self.tag_bind('marks', '<Leave>', lambda ev: self.config(cursor=''))
def calcShapesDict(self):
"""Calculate the dictionary with cursor shapes according to
current angle of the ellipse.
These cursor shapes are used over the scaling marks.
There are 'self.numShapes'=8 different directions in which the
mouse cursor arrow can point. When the ellipse is in a rotated
position, the cursor shapes are rotated as well, so that for
example the top-most mark still gets the up-pointing cursor.
"""
# divide the full 2pi angle into 'self.numShapes' number of segments
# and find out in which segment the ellipse angle resides
segmentSize = 2*math.pi / self.numShapes # pi/4
segmentNumber = (self.ellipse.angle + segmentSize/2) // segmentSize
# (angle + pi/8) // (pi/4)
# create a shifted list of shapes, where all the shapes are shifted
# with respect to the initial 'self.cursorShapes' list by the number
# of positions equal to the segmentNumber
rotatedShapes = []
for i in range(self.numShapes):
shape = self.cursorShapes[
int((i - segmentNumber) % self.numShapes)]
rotatedShapes.append(shape)
self.scMarkCursors = dict(zip(self.markNames, rotatedShapes))
def startMove(self, event):
"""Remember the start point and set bindings to move the ellipse."""
self.startPoint = Vect(event)
self.bind('<B1-Motion>', self.doMove)
self.bind('<ButtonRelease-1>', self.stopMove)
def doMove(self, event):
"""Recalculate the ellipse params and redraw the items on canvas."""
endPoint = Vect(event)
shift = endPoint - self.startPoint
self.ellipse.recalcCornersOnMove(shift)
self.ellipse.calcMarkCoords()
self.redrawItems()
self.startPoint = Vect(event)
def stopMove(self, event):
"""Reset the bindings to state before moving the ellipse."""
self.bind('<B1-Motion>', self.onMotion)
self.bind('<ButtonRelease-1>', self.onRelease)
def startScale(self, event, mark):
"""Remember the start point and the mark that is being dragged.
Set the bindings.
"""
self.startPoint = Vect(event)
self.movingMark = mark
# make cursor keep the same shape during whole scaling
# even when it moves out of the mark
self.config(cursor=self.scMarkCursors[mark])
self.tag_unbind(self.markIds[mark], '<Leave>')
self.bind('<B1-Motion>', self.doScale)
self.bind('<ButtonRelease-1>', self.stopScale)
def doScale(self, event):
"""Recalculate the ellipse params and redraw the items on canvas."""
endPoint = Vect(event)
mouseMove = endPoint - self.startPoint
self.ellipse.recalcCornersOnScale(mouseMove, self.movingMark)
self.ellipse.calcMarkCoords()
self.redrawItems()
self.startPoint = Vect(event)
def stopScale(self, event):
"""Reset cursor shape and bindings to state before scaling."""
# reset the cursor shape
self.config(cursor='')
self.tag_bind(self.markIds[self.movingMark], '<Leave>',
lambda ev: self.config(cursor=''))
# set the bindings to the initial ones
self.bind('<B1-Motion>', self.onMotion)
self.bind('<ButtonRelease-1>', self.onRelease)
def startRotate(self, event):
"""Remember the start point and set the bindings."""
self.startPoint = Vect(event)
# make the cursor keep the same shape during the rotation
# even when it moves out of the mark
self.config(cursor='exchange')
self.tag_unbind('marks', '<Leave>')
self.bind('<B1-Motion>', self.doRotate)
self.bind('<ButtonRelease-1>', self.stopRotate)
def doRotate(self, event):
"""Recalculate the ellipse params and redraw the items on canvas."""
# calculate the angle difference
startAngle = self.startPoint.getAngle(self.ellipse.center)
endAngle = Vect(event).getAngle(self.ellipse.center)
diffAngle = endAngle - startAngle
# update the parameters of the ellipse
self.ellipse.changeAngle(diffAngle)
self.ellipse.calcMarkCoords()
self.redrawItems()
self.startPoint = Vect(event)
def stopRotate(self, event):
"""Reset cursor shape and bindings to state before rotation."""
# reset the cursor shape
self.config(cursor='')
self.tag_bind('marks', '<Leave>', lambda ev: self.config(cursor=''))
# set the bindings to the initial ones
self.bind('<B1-Motion>', self.onMotion)
self.bind('<ButtonRelease-1>', self.onRelease)
def redrawItems(self):
"""Redraw the ellipse and the scaling/rotation marks.
If the angle of the ellipse rotation is zero (i.e. the ellipse
is horizontal) draw it as an oval, otherwise draw it as a polygon.
"""
# redraw the ellipse
self.delete(self.id)
if self.ellipse.angle == 0:
self.id = self.create_oval(self.ellipse.topLeft.x,
self.ellipse.topLeft.y,
self.ellipse.bottomRight.x,
self.ellipse.bottomRight.y,
**self.settings,
tags='ellipse')
else:
# get tuple of points covering (densely enough) the ellipse border
polygonPoints = self.getPolygonPoints()
# draw the rotated ellipse - as a polygon
self.id = self.create_polygon(polygonPoints,
**self.settings,
tags='ellipse')
# redraw the marks
if self.scaleMode:
mode = 'scale'
else:
mode = 'rotate'
self.delete('marks')
self.drawMarks(mode=mode)
def getPolygonPoints(self, steps=100):
"""Get coordinates of points placed around the ellipse border.
Arguments:
steps (int): the number of points (default 100)
Returns:
a tuple of points' coords: (x0, y0, x1, y1, ...)
"""
points = []
for i in range(steps):
# the angle for this step
theta = 2 * math.pi * float(i) / steps
point = (self.ellipse.center + math.cos(theta) * self.ellipse.a +
math.sin(theta) * self.ellipse.b)
points.append(point.x)
points.append(point.y)
return tuple(points)
class Ellipse():
"""A class that keeps track of the parameters defining position and
size of an ellipse, as well as positions of the scale/rotate marks.
Attributes:
topLeft (Vect): top left point of rectangle encapsulating the ellipse
bottomRight (Vect): bottom right point of the rectangle
angle (float): angle of rotation of the ellipse (its major axis)
in the (x, y) coord system of the canvas;
takes values from interval [0, 2 pi)
center (Vect): the center of the ellipse
a (Vect): major semi-axis
b (Vect): minor semi-axis
markCoords (dict): a dictionary of form {mark-name: mark-position}
where: mark-name (str), mark-position (Vect)
"""
def __init__(self, topLeft, bottomRight):
"""Initialize an ellipse object.
At the time of creation, the ellipse is horizontal (the major
axis parallel to the x-axis of the canvas).
Arguments:
topLeft (Vect): top left point of rect. encapsulating the ellipse
bottomRight (Vect): bottom right point of the rectangle
"""
self.topLeft = topLeft
self.bottomRight = bottomRight
self.angle = 0
self.markCoords = {}
# calculate initial values of parameters and positions of | |
an older version of
# Python that cannot validate certificates encrypted with SHA-2. If it is, then
# fall back on disabling the certificate validation and try again - unless the
# SHOTGUN_FORCE_CERTIFICATE_VALIDATION environment variable has been set by the
# user. In that case we simply raise the exception. Any other exceptions simply
# get raised as well.
#
# For more info see:
# http://blog.shotgunsoftware.com/2016/01/important-ssl-certificate-renewal-and.html
#
# SHA-2 errors look like this:
# [Errno 1] _ssl.c:480: error:0D0C50A1:asn1 encoding routines:ASN1_item_verify:
# unknown message digest algorithm
#
# Any other exceptions simply get raised.
if "unknown message digest algorithm" not in str(e) or \
"SHOTGUN_FORCE_CERTIFICATE_VALIDATION" in os.environ:
raise
if self.config.no_ssl_validation is False:
LOG.warning("SSL Error: this Python installation is incompatible with "
"certificates signed with SHA-2. Disabling certificate validation. "
"For more information, see http://blog.shotgunsoftware.com/2016/01/"
"important-ssl-certificate-renewal-and.html")
self._turn_off_ssl_validation()
# reload user agent to reflect that we have turned off ssl validation
req_headers["user-agent"] = "; ".join(self._user_agents)
self._close_connection()
if attempt == max_rpc_attempts:
raise
except Exception:
self._close_connection()
if attempt == max_rpc_attempts:
LOG.debug("Request failed. Giving up after %d attempts." % attempt)
raise
LOG.debug(
"Request failed, attempt %d of %d. Retrying in %.2f seconds..." %
(attempt, max_rpc_attempts, rpc_attempt_interval)
)
time.sleep(rpc_attempt_interval)
def _http_request(self, verb, path, body, headers):
"""
Make the actual HTTP request.
"""
url = urllib.parse.urlunparse((self.config.scheme, self.config.server, path, None, None, None))
LOG.debug("Request is %s:%s" % (verb, url))
LOG.debug("Request headers are %s" % headers)
LOG.debug("Request body is %s" % body)
conn = self._get_connection()
resp, content = conn.request(url, method=verb, body=body, headers=headers)
# http response code is handled else where
http_status = (resp.status, resp.reason)
resp_headers = dict(
(k.lower(), v)
for k, v in six.iteritems(resp)
)
resp_body = content
LOG.debug("Response status is %s %s" % http_status)
LOG.debug("Response headers are %s" % resp_headers)
LOG.debug("Response body is %s" % resp_body)
return (http_status, resp_headers, resp_body)
def _parse_http_status(self, status):
"""
Parse the status returned from the http request.
:param tuple status: Tuple of (code, reason).
:raises: RuntimeError if the http status is non success.
"""
error_code = status[0]
errmsg = status[1]
if status[0] >= 300:
headers = "HTTP error from server"
if status[0] == 503:
errmsg = "Shotgun is currently down for maintenance or too busy to reply. Please try again later."
raise ProtocolError(self.config.server,
error_code,
errmsg,
headers)
return
def _decode_response(self, headers, body):
"""
Decode the response from the server from the wire format to
a python data structure.
:param dict headers: Headers from the server.
:param str body: Raw response body from the server.
:returns: If the content-type starts with application/json or
text/javascript the body is json decoded. Otherwise the raw body is
returned.
:rtype: str
"""
if not body:
return body
ct = (headers.get("content-type") or "application/json").lower()
if ct.startswith("application/json") or ct.startswith("text/javascript"):
return self._json_loads(body)
return body
def _json_loads(self, body):
return json.loads(body)
def _json_loads_ascii(self, body):
"""
See http://stackoverflow.com/questions/956867
"""
def _decode_list(lst):
newlist = []
for i in lst:
if isinstance(i, six.text_type):
i = six.ensure_str(i)
elif isinstance(i, list):
i = _decode_list(i)
newlist.append(i)
return newlist
def _decode_dict(dct):
newdict = {}
for k, v in six.iteritems(dct):
if isinstance(k, six.text_type):
k = six.ensure_str(k)
if isinstance(v, six.text_type):
v = six.ensure_str(v)
elif isinstance(v, list):
v = _decode_list(v)
newdict[k] = v
return newdict
return json.loads(body, object_hook=_decode_dict)
def _response_errors(self, sg_response):
"""
Raise any API errors specified in the response.
:raises ShotgunError: If the server response contains an exception.
"""
# error code for authentication related problems
ERR_AUTH = 102
# error code when 2FA authentication is required but no 2FA token provided.
ERR_2FA = 106
# error code when SSO is activated on the site, preventing the use of username/password for authentication.
ERR_SSO = 108
# error code when Oxygen is activated on the site, preventing the use of username/password for authentication.
ERR_OXYG = 110
if isinstance(sg_response, dict) and sg_response.get("exception"):
if sg_response.get("error_code") == ERR_AUTH:
raise AuthenticationFault(sg_response.get("message", "Unknown Authentication Error"))
elif sg_response.get("error_code") == ERR_2FA:
raise MissingTwoFactorAuthenticationFault(
sg_response.get("message", "Unknown 2FA Authentication Error")
)
elif sg_response.get("error_code") == ERR_SSO:
raise UserCredentialsNotAllowedForSSOAuthenticationFault(
sg_response.get("message",
"Authentication using username/password is not "
"allowed for an SSO-enabled Shotgun site")
)
elif sg_response.get("error_code") == ERR_OXYG:
raise UserCredentialsNotAllowedForOxygenAuthenticationFault(
sg_response.get("message", "Authentication using username/password is not "
"allowed for an Autodesk Identity enabled Shotgun site")
)
else:
# raise general Fault
raise Fault(sg_response.get("message", "Unknown Error"))
return
def _visit_data(self, data, visitor):
"""
Walk the data (simple python types) and call the visitor.
"""
if not data:
return data
recursive = self._visit_data
if isinstance(data, list):
return [recursive(i, visitor) for i in data]
if isinstance(data, tuple):
return tuple(recursive(i, visitor) for i in data)
if isinstance(data, dict):
return dict(
(k, recursive(v, visitor))
for k, v in six.iteritems(data)
)
return visitor(data)
def _transform_outbound(self, data):
"""
Transform data types or values before they are sent by the client.
- changes timezones
- converts dates and times to strings
"""
if self.config.convert_datetimes_to_utc:
def _change_tz(value):
if value.tzinfo is None:
value = value.replace(tzinfo=SG_TIMEZONE.local)
return value.astimezone(SG_TIMEZONE.utc)
else:
_change_tz = None
local_now = datetime.datetime.now()
def _outbound_visitor(value):
if isinstance(value, datetime.datetime):
if _change_tz:
value = _change_tz(value)
return value.strftime("%Y-%m-%dT%H:%M:%SZ")
if isinstance(value, datetime.date):
# existing code did not tz transform dates.
return value.strftime("%Y-%m-%d")
if isinstance(value, datetime.time):
value = local_now.replace(
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond
)
if _change_tz:
value = _change_tz(value)
return value.strftime("%Y-%m-%dT%H:%M:%SZ")
# ensure return is six.text_type
if isinstance(value, six.string_types):
return six.ensure_text(value)
return value
return self._visit_data(data, _outbound_visitor)
def _transform_inbound(self, data):
"""
Transforms data types or values after they are received from the server.
"""
# NOTE: The time zone is removed from the time after it is transformed
# to the local time, otherwise it will fail to compare to datetimes
# that do not have a time zone.
if self.config.convert_datetimes_to_utc:
def _change_tz(x):
return x.replace(tzinfo=SG_TIMEZONE.utc).astimezone(SG_TIMEZONE.local)
else:
_change_tz = None
def _inbound_visitor(value):
if isinstance(value, six.string_types):
if len(value) == 20 and self._DATE_TIME_PATTERN.match(value):
try:
# strptime was not on datetime in python2.4
value = datetime.datetime(
*time.strptime(value, "%Y-%m-%dT%H:%M:%SZ")[:6])
except ValueError:
return value
if _change_tz:
return _change_tz(value)
return value
return value
return self._visit_data(data, _inbound_visitor)
# ========================================================================
# Connection Functions
def _get_connection(self):
"""
Return the current connection or creates a new connection to the current server.
"""
if self._connection is not None:
return self._connection
if self.config.proxy_server:
pi = ProxyInfo(socks.PROXY_TYPE_HTTP, self.config.proxy_server,
self.config.proxy_port, proxy_user=self.config.proxy_user,
proxy_pass=self.config.proxy_pass)
self._connection = Http(timeout=self.config.timeout_secs, ca_certs=self.__ca_certs,
proxy_info=pi, disable_ssl_certificate_validation=self.config.no_ssl_validation)
else:
self._connection = Http(timeout=self.config.timeout_secs, ca_certs=self.__ca_certs,
proxy_info=None, disable_ssl_certificate_validation=self.config.no_ssl_validation)
return self._connection
def _close_connection(self):
"""
Close the current connection.
"""
if self._connection is None:
return
for conn in self._connection.connections.values():
try:
conn.close()
except Exception:
pass
self._connection.connections.clear()
self._connection = None
return
# ========================================================================
# Utility
def _parse_records(self, records):
"""
Parse 'records' returned from the api to do local modifications:
- Insert thumbnail urls
- Insert local file paths.
- Revert < html entities that may be the result of input sanitization
mechanisms back to a litteral < character.
:param records: List of records (dicts) to process or a single record.
:returns: A list of the records processed.
"""
if not records:
return []
if not isinstance(records, (list, tuple)):
records = [records, ]
for rec in records:
# skip results that aren't entity dictionaries
if not isinstance(rec, dict):
continue
# iterate over each item and check each field for possible injection
for k, v in six.iteritems(rec):
if not v:
continue
# Check for html entities in strings
if isinstance(v, str):
rec[k] = rec[k].replace("<", "<")
# check for thumbnail for older version (<3.3.0) of shotgun
if k == "image" and self.server_caps.version and self.server_caps.version < (3, 3, 0):
rec["image"] = self._build_thumb_url(rec["type"], rec["id"])
continue
if isinstance(v, dict) and v.get("link_type") == "local" and self.client_caps.local_path_field in v:
local_path = v[self.client_caps.local_path_field]
v["local_path"] = local_path
v["url"] = "file://%s" % (local_path or "",)
return records
def _build_thumb_url(self, entity_type, entity_id):
"""
Return the URL for the thumbnail of an entity given the entity type and the entity id.
Note: This makes a call to the server for every thumbnail.
:param entity_type: Entity type the id is for.
:param entity_id: id of the entity to get the thumbnail for.
:returns: Fully qualified url to the thumbnail.
"""
# Example response from the end point
# curl "https://foo.com/upload/get_thumbnail_url?entity_type=Version&entity_id=1"
| |
import random
from flask import Flask, request, redirect, render_template, session, flash
import cgi
from StringSigFigs import MakeNumber, RoundValue, CheckAnswer, CheckRounding, ApplySciNotation
from CalcsWithSigFigs import addValues, subtractValues, multiplyValues, divideValues, findDecimalPlaces, addWithPlaceholders, subtractWithPlaceholders
app = Flask(__name__)
app.config['DEBUG'] = True
app.secret_key = 'yrtsimehc'
@app.route('/')
def index():
session.clear()
return render_template('index.html',title="Sig Fig Practice")
@app.route('/countingsf', methods=['POST', 'GET'])
def countingsf():
if request.method == 'POST':
answer = request.form['answer']
actualSigFigs = request.form['actualSigFigs']
value = request.form['value']
if answer==actualSigFigs:
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('countingSigFigs.html', value=value, sigFigs = actualSigFigs, answer = answer)
sigFigs = random.randrange(1,7)
power = random.randrange(-5,9)
value = MakeNumber(sigFigs,power)
return render_template('countingSigFigs.html',title="Counting Sig Figs", value=value, sigFigs = sigFigs)
@app.route('/roundingsf', methods=['POST', 'GET'])
def roundingsf():
if request.method == 'POST':
answer = request.form['answer']
origValue = request.form['value']
sigFigs = int(request.form['sigFigs'])
roundedValue = RoundValue(origValue, sigFigs)
if CheckAnswer(roundedValue, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('roundingSigFigs.html', value=origValue, sigFigs = sigFigs, answer = answer, roundedValue=roundedValue)
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-4,6)
value = MakeNumber(9,power)
result = RoundValue(value, sigFigs)
iffyValue = CheckRounding(result,sigFigs)
return render_template('roundingSigFigs.html',title="Rounding Sig Figs", value=value, sigFigs = sigFigs)
@app.route('/sfcalcs', methods=['POST', 'GET'])
def sfcalcs():
if request.method == 'POST':
answer = request.form['answer']
result = request.form['result']
value1 = request.form['value1']
value2 = request.form['value2']
operation = request.form['operation']
if CheckAnswer(result, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, result = result, answer = answer, operation=operation)
operators = ['+', '-', 'x', '/']
operation = random.randrange(4) #Randomly select +, -, * or / using integers 0 - 3, respectively.
if operation < 2: #For + and -, create 2 values between 0.001 and 90 with 1 - 6 sig figs.
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-3,2)
value = MakeNumber(sigFigs,power)
iffyValue = CheckRounding(value,sigFigs)
sigFigs1 = sigFigs
power1 = power
value1 = value
iffyValue = True
while iffyValue:
sigFigs = random.randrange(1,7)
power = random.randrange(-3,2)
value = MakeNumber(sigFigs,power)
iffyValue = CheckRounding(value,sigFigs)
sigFigs2 = sigFigs
power2 = power
value2 = value
else: #For * and /, create 2 values between 0.01 and 900 with 1 - 6 sig figs.
sigFigs1 = random.randrange(1,7)
power1 = random.randrange(-2,3)
value1 = MakeNumber(sigFigs1,power1)
sigFigs2 = random.randrange(1,7)
power2 = random.randrange(-2,3)
value2 = MakeNumber(sigFigs2,power2)
if operation == 0:
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = addWithPlaceholders(value1,value2)
else:
result = addValues(value1,value2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif operation == 1 and value1 > value2:
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = subtractWithPlaceholders(value1,value2)
else:
result = subtractValues(value1,value2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif operation == 1 and float(value1) < float(value2):
if (float(value1)>=10 and value1.find('.') == -1 and sigFigs1 < len(value1)) or (float(value2)>=10 and value2.find('.') == -1 and sigFigs2 < len(value2)):
result = subtractWithPlaceholders(value2,value1)
else:
result = subtractValues(value2,value1)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value2, value2 = value1, operation = operators[operation], result = result)
elif operation == 2:
result = multiplyValues(value1,sigFigs1,value2,sigFigs2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
elif float(value1)/float(value2)<1e-4:
result = divideValues(value2,sigFigs2,value1,sigFigs1)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value2, value2 = value1, operation = operators[operation], result = result)
else:
result = divideValues(value1,sigFigs1,value2,sigFigs2)
return render_template('sfCalcs.html',title="Calculations with Sig Figs", value1 = value1, value2 = value2, operation = operators[operation], result = result)
@app.route('/scinotation', methods=['POST', 'GET'])
def scinotation():
if request.method == 'POST':
sciNot = request.form['sciNot']
if sciNot=='True': #Given a value in sci notation, the user enters a number in standard notation.
answer = request.form['answer']
result = request.form['value']
sciValue = request.form['sciValue']
power = request.form['power']
if CheckAnswer(result, answer):
flash('Correct! :-)', 'correct')
else:
flash('Try again, or click here to reveal the answer.', 'error')
return render_template('scientificNotation.html',title="Scientific Notation", value = result, sciValue=sciValue, power = power, sciNot = True, answer = answer)
else: #Given a value in standard notation, the user enters a number in sci notation.
answer = request.form['answer']
result = request.form['value']
sciValue = request.form['sciValue']
power = request.form['power']
exponent = request.form['exponent']
if CheckAnswer(power, exponent) and CheckAnswer(sciValue,answer):
flash('Correct! :-)', 'correct')
elif CheckAnswer(power, exponent) and not CheckAnswer(sciValue,answer):
flash('Correct power. Wrong decimal value.', 'error')
elif CheckAnswer(sciValue,answer) and not CheckAnswer(power, exponent):
flash('Correct decimal value. Wrong power.', 'error')
else:
flash('Both entries are incorrect. Try again, or click to reveal the answer.', 'error')
return render_template('scientificNotation.html',title="Scientific Notation", value = result, sciValue=sciValue, power = power, sciNot = False, answer = answer, exponent = exponent)
sigFigs = random.randrange(1,5)
power = random.randrange(-5,9)
value = MakeNumber(sigFigs,power)
sciValue = ApplySciNotation(value, sigFigs)
if random.randrange(2) == 0: #Flip a coin: If '0', ask the user to change sci notation into standard notation.
return render_template('scientificNotation.html',title="Scientific Notation", value = value, sciValue=sciValue, power = power, sciNot = True)
else: #Otherwise ('1'), ask the user to change standard notation into sci notation.
return render_template('scientificNotation.html',title="Scientific Notation", value=value, sciValue=sciValue, power = power, sciNot = False)
@app.route('/sftutorial1', methods=['POST', 'GET'])
def sftutorial1():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
else:
displayText=1
return render_template('sftutorial1.html',title="Sig Fig Tutorial", page = 1, displayText=displayText)
@app.route('/sftutorial2', methods=['POST', 'GET'])
def sftutorial2():
if request.method == 'POST':
firstZeroRule = request.form['firstZeroRule']
session['firstZeroRule'] = firstZeroRule
secondHalf = True
if firstZeroRule == '':
flash('Please enter a response.', 'error')
secondHalf = False
return render_template('sftutorial2.html', answer = firstZeroRule, page = 2, secondHalf = secondHalf)
return render_template('sftutorial2.html',title="Sig Fig Tutorial", page = 2, secondHalf=False)
@app.route('/sftutorial3', methods=['POST', 'GET'])
def sftutorial3():
if request.method == 'POST':
firstZeroRule = session.get('firstZeroRule', None)
secondZeroRule = request.form['secondZeroRule']
session['secondZeroRule'] = secondZeroRule
secondHalf = True
if secondZeroRule == '':
flash('Please enter a response.', 'error')
secondHalf = False
return render_template('sftutorial3.html', firstZeroRule = firstZeroRule, secondZeroRule = secondZeroRule, page = 3, secondHalf = secondHalf)
firstZeroRule = session.get('firstZeroRule', None)
return render_template('sftutorial3.html',title="Sig Fig Tutorial", page = 3, firstZeroRule = firstZeroRule, secondHalf=False)
@app.route('/sftutorial4', methods=['POST', 'GET'])
def sftutorial4():
firstZeroRule = session.get('firstZeroRule', None)
secondZeroRule = session.get('secondZeroRule', None)
return render_template('sftutorial4.html',title="Sig Fig Tutorial", page = 4, firstZeroRule=firstZeroRule, secondZeroRule=secondZeroRule)
@app.route('/sftutorial5', methods=['POST', 'GET'])
def sftutorial5():
return render_template('sftutorial5.html',title="Sig Fig Tutorial", page = 5)
@app.route('/roundingtutorial1', methods=['POST', 'GET'])
def roundingtutorial1():
return render_template('roundingtutorial1.html',title="Rounding Tutorial", page = 1)
@app.route('/roundingtutorial2', methods=['POST', 'GET'])
def roundingtutorial2():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
roundedAnswer = request.form['5SigFigs']
answers = []
numCorrect = 0
if displayText == 4 and roundedAnswer != '12.386':
flash('Not quite correct. Try again.', 'error')
displayText = 3
elif displayText>5:
correctAnswers = ['0.00798','0.0080','0.008']
for x in range(3):
answers.append(request.form[str(3-x)+'SigFigs'])
if CheckAnswer(correctAnswers[x],answers[x]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again.', 'error')
else:
displayText=1
roundedAnswer = ''
answers = []
numCorrect = 0
return render_template('roundingtutorial2.html',title="Rounding Tutorial", page = 2, displayText=displayText, roundedAnswer = roundedAnswer, answers = answers, numCorrect = numCorrect)
@app.route('/roundingtutorial3', methods=['POST', 'GET'])
def roundingtutorial3():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
example3 = request.form['example3']
answers = []
numCorrect = 0
if displayText == 2 and example3 != '2380':
flash('Not quite correct. Try again.', 'error')
displayText = 1
elif displayText > 3:
correctAnswers = ['0.0998','0.10','0.1']
for x in range(3):
answers.append(request.form[str(3-x)+'SigFigs'])
if CheckAnswer(correctAnswers[x],answers[x]):
flash('Correct! :-)', 'correct')
numCorrect += 1
else:
flash('Try again.', 'error')
else:
displayText=1
example3 = ''
answers = []
numCorrect = 0
return render_template('roundingtutorial3.html',title="Rounding Tutorial", page = 3, displayText=displayText, answers = answers, example3 = example3, numCorrect = numCorrect)
@app.route('/roundingtutorial4', methods=['POST', 'GET'])
def roundingtutorial4():
return render_template('roundingtutorial4.html',title="Rounding Tutorial", page = 4)
@app.route('/scinottutorial1', methods=['POST', 'GET'])
def scinottutorial1():
if request.method == 'POST':
displayText = int(request.form['displayText'])
displayText += 1
if displayText == 2:
decimal = request.form['decimal']
power = request.form['exponent']
decimals = ['1.5', '15', '150', '1500']
powers = ['3','2','1','0']
if decimal in decimals:
index = decimals.index(decimal)
if power != powers[index]:
flash('Incorrect power. Try again.', 'error')
displayText = 1
else:
flash('Incorrect decimal value. Try again.', 'error')
displayText = 1
else:
decimal = ''
power = ''
else:
displayText=1
decimal = ''
power = ''
return render_template('scinottutorial1.html',title="Scientific Notation Tutorial", page = 1, displayText = displayText, decimal = decimal, exponent=power)
@app.route('/scinottutorial2', methods=['POST', 'GET'])
def scinottutorial2():
if request.method == 'POST':
decimals | |
import argparse
import base64
import datetime
import json
import logging
import sys
import uuid
import flask
import jwt
from flask import Response
from flask_babel import lazy_gettext as _
from jwt.algorithms import HMACAlgorithm
from sqlalchemy.orm.session import Session
from api.base_controller import BaseCirculationManagerController
from api.registration.constants import RegistrationConstants
from core.app_server import url_for
from core.model import (
ConfigurationSetting,
Credential,
DataSource,
DelegatedPatronIdentifier,
ExternalIntegration,
Library,
Patron,
create,
get_one,
)
from core.scripts import Script
from core.util.datetime_helpers import datetime_utc, utc_now
from core.util.problem_detail import ProblemDetail
from core.util.xmlparser import XMLParser
from .config import CannotLoadConfiguration, Configuration
from .problem_details import *
class AdobeVendorIDController(object):
"""Flask controllers that implement the Account Service and
Authorization Service portions of the Adobe Vendor ID protocol.
"""
def __init__(self, _db, library, vendor_id, node_value, authenticator):
self._db = _db
self.library = library
self.request_handler = AdobeVendorIDRequestHandler(vendor_id)
self.model = AdobeVendorIDModel(_db, library, authenticator, node_value)
def create_authdata_handler(self, patron):
"""Create an authdata token for the given patron.
This controller method exists only for backwards compatibility
with older client applications. Newer applications are
expected to understand the DRM Extensions for OPDS.
"""
__transaction = self._db.begin_nested()
credential = self.model.create_authdata(patron)
__transaction.commit()
return Response(credential.credential, 200, {"Content-Type": "text/plain"})
def signin_handler(self):
"""Process an incoming signInRequest document."""
__transaction = self._db.begin_nested()
output = self.request_handler.handle_signin_request(
flask.request.data, self.model.standard_lookup, self.model.authdata_lookup
)
__transaction.commit()
return Response(output, 200, {"Content-Type": "application/xml"})
def userinfo_handler(self):
"""Process an incoming userInfoRequest document."""
output = self.request_handler.handle_accountinfo_request(
flask.request.data, self.model.urn_to_label
)
return Response(output, 200, {"Content-Type": "application/xml"})
def status_handler(self):
return Response("UP", 200, {"Content-Type": "text/plain"})
class DeviceManagementProtocolController(BaseCirculationManagerController):
"""Implementation of the DRM Device ID Management Protocol.
The code that does the actual work is in DeviceManagementRequestHandler.
"""
DEVICE_ID_LIST_MEDIA_TYPE = "vnd.librarysimplified/drm-device-id-list"
PLAIN_TEXT_HEADERS = {"Content-Type": "text/plain"}
@property
def link_template_header(self):
"""Generate the Link Template that explains how to deregister
a specific DRM device ID.
"""
library = flask.request.library
url = url_for(
"adobe_drm_device",
library_short_name=library.short_name,
device_id="{id}",
_external=True,
)
# The curly brackets in {id} were escaped. Un-escape them to
# get a Link Template.
url = url.replace("%7Bid%7D", "{id}")
return {"Link-Template": '<%s>; rel="item"' % url}
def _request_handler(self, patron):
"""Create a DeviceManagementRequestHandler for the appropriate
Credential of the given Patron.
:return: A DeviceManagementRequestHandler
"""
if not patron:
return INVALID_CREDENTIALS.detailed(_("No authenticated patron"))
credential = AdobeVendorIDModel.get_or_create_patron_identifier_credential(
patron
)
return DeviceManagementRequestHandler(credential)
def device_id_list_handler(self):
"""Manage the list of device IDs associated with an Adobe ID."""
handler = self._request_handler(flask.request.patron)
if isinstance(handler, ProblemDetail):
return handler
device_ids = self.DEVICE_ID_LIST_MEDIA_TYPE
if flask.request.method == "GET":
# Serve a list of device IDs.
output = handler.device_list()
if isinstance(output, ProblemDetail):
return output
headers = self.link_template_header
headers["Content-Type"] = device_ids
return Response(output, 200, headers)
elif flask.request.method == "POST":
# Add a device ID to the list.
incoming_media_type = flask.request.headers.get("Content-Type")
if incoming_media_type != device_ids:
return UNSUPPORTED_MEDIA_TYPE.detailed(
_("Expected %(media_type)s document.", media_type=device_ids)
)
output = handler.register_device(flask.request.get_data(as_text=True))
if isinstance(output, ProblemDetail):
return output
return Response(output, 200, self.PLAIN_TEXT_HEADERS)
return METHOD_NOT_ALLOWED.detailed(_("Only GET and POST are supported."))
def device_id_handler(self, device_id):
"""Manage one of the device IDs associated with an Adobe ID."""
handler = self._request_handler(getattr(flask.request, "patron", None))
if isinstance(handler, ProblemDetail):
return handler
if flask.request.method != "DELETE":
return METHOD_NOT_ALLOWED.detailed(_("Only DELETE is supported."))
# Delete the specified device ID.
output = handler.deregister_device(device_id)
if isinstance(output, ProblemDetail):
return output
return Response(output, 200, self.PLAIN_TEXT_HEADERS)
class AdobeVendorIDRequestHandler(object):
"""Standalone class that can be tested without bringing in Flask or
the database schema.
"""
SIGN_IN_RESPONSE_TEMPLATE = """<signInResponse xmlns="http://ns.adobe.com/adept">
<user>%(user)s</user>
<label>%(label)s</label>
</signInResponse>"""
ACCOUNT_INFO_RESPONSE_TEMPLATE = """<accountInfoResponse xmlns="http://ns.adobe.com/adept">
<label>%(label)s</label>
</accountInfoResponse>"""
AUTH_ERROR_TYPE = "AUTH"
ACCOUNT_INFO_ERROR_TYPE = "ACCOUNT_INFO"
ERROR_RESPONSE_TEMPLATE = '<error xmlns="http://ns.adobe.com/adept" data="E_%(vendor_id)s_%(type)s %(message)s"/>'
TOKEN_FAILURE = "Incorrect token."
AUTHENTICATION_FAILURE = "Incorrect barcode or PIN."
URN_LOOKUP_FAILURE = "Could not identify patron from '%s'."
def __init__(self, vendor_id):
self.vendor_id = vendor_id
def handle_signin_request(self, data, standard_lookup, authdata_lookup):
parser = AdobeSignInRequestParser()
try:
data = parser.process(data)
except Exception as e:
logging.error("Error processing %s", data, exc_info=e)
return self.error_document(self.AUTH_ERROR_TYPE, str(e))
user_id = label = None
if not data:
return self.error_document(
self.AUTH_ERROR_TYPE, "Request document in wrong format."
)
if not "method" in data:
return self.error_document(self.AUTH_ERROR_TYPE, "No method specified")
if data["method"] == parser.STANDARD:
user_id, label = standard_lookup(data)
failure = self.AUTHENTICATION_FAILURE
elif data["method"] == parser.AUTH_DATA:
authdata = data[parser.AUTH_DATA]
user_id, label = authdata_lookup(authdata)
failure = self.TOKEN_FAILURE
if user_id is None:
return self.error_document(self.AUTH_ERROR_TYPE, failure)
else:
return self.SIGN_IN_RESPONSE_TEMPLATE % dict(user=user_id, label=label)
def handle_accountinfo_request(self, data, urn_to_label):
parser = AdobeAccountInfoRequestParser()
label = None
try:
data = parser.process(data)
if not data:
return self.error_document(
self.ACCOUNT_INFO_ERROR_TYPE, "Request document in wrong format."
)
if not "user" in data:
return self.error_document(
self.ACCOUNT_INFO_ERROR_TYPE,
"Could not find user identifer in request document.",
)
label = urn_to_label(data["user"])
except Exception as e:
return self.error_document(self.ACCOUNT_INFO_ERROR_TYPE, str(e))
if label:
return self.ACCOUNT_INFO_RESPONSE_TEMPLATE % dict(label=label)
else:
return self.error_document(
self.ACCOUNT_INFO_ERROR_TYPE, self.URN_LOOKUP_FAILURE % data["user"]
)
def error_document(self, type, message):
return self.ERROR_RESPONSE_TEMPLATE % dict(
vendor_id=self.vendor_id, type=type, message=message
)
class DeviceManagementRequestHandler(object):
"""Handle incoming requests for the DRM Device Management Protocol."""
def __init__(self, credential):
self.credential = credential
def device_list(self):
return "\n".join(
sorted(x.device_identifier for x in self.credential.drm_device_identifiers)
)
def register_device(self, data):
device_ids = data.split("\n")
if len(device_ids) > 1:
return PAYLOAD_TOO_LARGE.detailed(
_("You may only register one device ID at a time.")
)
for device_id in device_ids:
if device_id:
self.credential.register_drm_device_identifier(device_id)
return "Success"
def deregister_device(self, device_id):
self.credential.deregister_drm_device_identifier(device_id)
return "Success"
class AdobeRequestParser(XMLParser):
NAMESPACES = {"adept": "http://ns.adobe.com/adept"}
def process(self, data):
requests = list(self.process_all(data, self.REQUEST_XPATH, self.NAMESPACES))
if not requests:
return None
# There should only be one request tag, but if there's more than
# one, only return the first one.
return requests[0]
def _add(self, d, tag, key, namespaces, transform=None):
v = self._xpath1(tag, "adept:" + key, namespaces)
if v is not None:
v = v.text
if v is not None:
v = v.strip()
if transform is not None:
v = transform(v)
if isinstance(v, bytes):
v = v.decode("utf-8")
d[key] = v
class AdobeSignInRequestParser(AdobeRequestParser):
REQUEST_XPATH = "/adept:signInRequest"
STANDARD = "standard"
AUTH_DATA = "authData"
def process_one(self, tag, namespaces):
method = tag.attrib.get("method")
if not method:
raise ValueError("No signin method specified")
data = dict(method=method)
if method == self.STANDARD:
self._add(data, tag, "username", namespaces)
self._add(data, tag, "password", namespaces)
elif method == self.AUTH_DATA:
self._add(data, tag, self.AUTH_DATA, namespaces, base64.b64decode)
else:
raise ValueError("Unknown signin method: %s" % method)
return data
class AdobeAccountInfoRequestParser(AdobeRequestParser):
REQUEST_XPATH = "/adept:accountInfoRequest"
def process_one(self, tag, namespaces):
method = tag.attrib.get("method")
data = dict(method=method)
self._add(data, tag, "user", namespaces)
return data
class AdobeVendorIDModel(object):
"""Implement Adobe Vendor ID within the Simplified database
model.
"""
AUTHDATA_TOKEN_TYPE = "Authdata for Adobe Vendor ID"
VENDOR_ID_UUID_TOKEN_TYPE = "Vendor ID UUID"
def __init__(
self, _db, library, authenticator, node_value, temporary_token_duration=None
):
self.library = library
self._db = _db
self.authenticator = authenticator
self.temporary_token_duration = temporary_token_duration or datetime.timedelta(
minutes=10
)
if isinstance(node_value, (bytes, str)):
node_value = int(node_value, 16)
self.node_value = node_value
@property
def data_source(self):
return DataSource.lookup(self._db, DataSource.ADOBE)
def uuid_and_label(self, patron):
"""Create or retrieve a Vendor ID UUID and human-readable Vendor ID
label for the given patron.
This code is semi-deprecated, which accounts for the varying
paths and the code that tries to migrate patrons to the new
system. In the future everyone will send JWTs as authdata and
we will always go from the JWT to a DelegatedPatronIdentifier.
This code always ends up at a DelegatedPatronIdentifier, but
it might pick up the final value from somewhere else along the way.
The _reason_ this code is semi-deprecated is that it only
works for a library that has its own Adobe Vendor ID.
"""
if not patron:
return None, None
# First, find or create a Credential containing the patron's
# anonymized key into the DelegatedPatronIdentifier database.
adobe_account_id_patron_identifier_credential = (
self.get_or_create_patron_identifier_credential(patron)
)
# Look up a Credential containing the patron's Adobe account
# ID created under the old system. We don't use
# Credential.lookup because we don't want to create a
# Credential if it doesn't exist.
old_style_adobe_account_id_credential = get_one(
self._db,
Credential,
patron=patron,
data_source=self.data_source,
type=self.VENDOR_ID_UUID_TOKEN_TYPE,
)
if old_style_adobe_account_id_credential:
# The value of the old-style credential will become the
# default value of the DelegatedPatronIdentifier, assuming
# we have to create one.
def new_value():
return old_style_adobe_account_id_credential.credential
else:
# There is no old-style credential. If we have to create a
# new DelegatedPatronIdentifier we will give it a value
# using the default mechanism.
new_value = None
# Look up or create a DelegatedPatronIdentifier using the
# anonymized patron identifier we just looked up or created.
utility = AuthdataUtility.from_config(patron.library, self._db)
return self.to_delegated_patron_identifier_uuid(
utility.library_uri,
adobe_account_id_patron_identifier_credential.credential,
value_generator=new_value,
)
def create_authdata(self, patron):
credential, is_new = Credential.persistent_token_create(
self._db, self.data_source, self.AUTHDATA_TOKEN_TYPE, patron
)
return credential
def standard_lookup(self, authorization_data):
"""Look up a patron by authorization header. Return their Vendor ID
| |
<filename>collect.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import json
import csv
from datetime import date, datetime
from hashlib import sha1
import botocore
import boto3
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# ch.setLevel(logging.INFO)
account_id = None
logger.addHandler(ch)
node_fields = [
'type',
'name',
'description',
'weight',
'counter'
]
edge_fields = [
'from_type',
'from_name',
'edge',
'to_type',
'to_name',
'weight'
]
def make_dirs(folder):
''' Make directories and subdirectories for a location '''
if not os.path.exists(folder):
os.makedirs(folder)
def write_json_file(filename, obj):
''' Save an object as a json file '''
with open(filename, 'w+') as file:
file.write(json.dumps(obj, default=json_serial))
logger.debug("wrote file: %s", filename)
def read_json_file(filename):
''' Read a json file back as an object '''
with open(filename, 'r') as file:
obj = json.loads(file.read())
logger.debug("read file: %s", filename)
return obj
def write_csv(nodes, filename, fieldnames):
''' Write out all the nodes to a CSV File '''
with open(filename, 'w+', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(nodes)
logger.debug("wrote file: %s", filename)
def json_serial(obj):
"""
JSON serializer for objects not serializable by default json code
"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError('Type {} not serializable'.format(type(obj)))
def fmt_dns(name):
''' Remove any trailing dots from a dns name and format to lower case '''
return name.lower().rstrip('.').replace('dualstack.', '')
def new_node(**kwargs):
''' Creates a new node and will populate the fields that match in kwargs '''
return {key: kwargs.get(key, None) for key in node_fields}
def new_edge(**kwargs):
''' Creates a new edge and will populate the fields that match in kwargs '''
return {key: kwargs.get(key, None) for key in edge_fields}
def add_update_node(existing_nodes,node):
'''
Adds a node to the array - or increments the weight if a duplicate
'''
node_name = node['type']+'_'+node['name']
if node_name in existing_nodes:
existing_nodes[node_name]['counter'] += 1
else:
existing_nodes[node_name] = node
existing_nodes[node_name]['counter'] = 1
def get_aws_account_id():
global account_id
if account_id is None:
account_id = boto3.client("sts").get_caller_identity()["Account"]
return account_id
def query_aws(api, method, region, cached=True, **kwargs):
'''
Query AWS API using api and method to call for a given region
Cache the results to the filesystem for faster re-run. Can flush cache
with flag when required
'''
# build up the filename
filename = [api, method, region]
# add kwargs as a hash to the filename
filename.append(sha1(str(kwargs).encode()).hexdigest())
# construct filename and add path
filename = os.path.join('cache', '-'.join(filename)) + '.json'
try:
# look for a cache file, return result if found
if cached:
return read_json_file(filename)
except IOError:
pass
# connect to AWS and grab the data
client = boto3.client(api, region_name=region)
if api == 's3' and method == 'list_buckets':
# s3 list_buckets has no paginator. :/
records = client.list_buckets().get('Buckets', [])
elif api == 's3' and method == 'get_bucket_location':
# s3 get_bucket_location has no paginator. :/
records = client.get_bucket_location(Bucket=kwargs['Bucket']).get('LocationConstraint',None)
# Format the us-east-1 (aka VA) buckets to be correct.
if records is None:
records = "us-east-1"
elif api == 's3' and method == 'get_bucket_website':
# s3 list_buckets has no paginator. :/
try:
records = client.get_bucket_website(Bucket=kwargs['Bucket'])
except botocore.exceptions.ClientError as error:
# boto3.exceptions.ClientError.NoSuchWebsiteConfiguration:
records = {}
elif api == 'sqs' and method == 'list_queues':
# sqs list_queues has no paginator. :/
records = client.list_queues().get('QueueUrls', [])
elif api == 'opensearch' and method == 'list_domain_names':
# opensearch list_queues has no paginator. :/
records = client.list_domain_names().get('DomainNames', [])
elif api == 'opensearch' and method == 'describe_domains':
# opensearch describe_domains has no paginator. :/
records = client.describe_domains(DomainNames=kwargs['DomainNames']).get('DomainStatusList', [])
elif api == 'elbv2' and method == 'describe_target_health':
# elbv2 describe_target_health has no paginator. :/
records = client.describe_target_health(TargetGroupArn=kwargs['TargetGroupArn'])
else:
# just use paginator for the method call
paginator = client.get_paginator(method)
# get all records as we might overflow maxitems
records = paginator.paginate(**kwargs).build_full_result()
write_json_file(filename, records)
return records
def check_external_service(dns_name):
'''
Checks for known external services
'''
#TODO: Make this is a proper dictionary lookup.
if dns_name.endswith('pardot.com'):
return "pardot.com"
if dns_name.endswith('zendesk.com'):
return "Zendesk.com"
# Certs
if dns_name.endswith('acm-validations.aws'):
return "AWS Certs"
if dns_name.endswith('comodoca.com'):
return "Comodo CA"
if dns_name.endswith('sectigo.com'):
return "Sectigo CA"
# Google
if dns_name.endswith('ghs.google.com'):
return "Google Hosted"
if dns_name.endswith('googlehosted.com'):
return "Google Hosted"
if dns_name.endswith('dkim.amazonses.com'):
return "dkim.amazonses.com"
if dns_name.endswith('azurewebsites.net'):
return "azurewebsites.net"
if dns_name.endswith('stspg-customer.com'):
return "Status Page"
return None
def process_dns_records(zone_id, region, nodes, edges):
"""
Find nodes and edges in the DNS records
"""
# get records for zone_id and region
records = query_aws('route53', 'list_resource_record_sets', region,
HostedZoneId=zone_id).get('ResourceRecordSets', [])
for record in records:
name = fmt_dns(record['Name'])
ns_type = record['Type']
ns_value = fmt_dns(
record.get('ResourceRecords', [{}])[0].get('Value', '')
)
ns_alias = fmt_dns(
record.get('AliasTarget', {}).get('DNSName', '')
)
logger.debug(' - %s %s',name, ns_type)
if ns_type in ['CNAME', 'A']:
# add name node to the nodes
add_update_node(
nodes,
new_node(
type='dns',
name=name,
description=ns_type,
)
)
# Clamp dns weights to 1 or 0
weight = 1 if record.get('Weight', 1) > 0 else 0
# add the edge value for the CNAME
edges.append(
new_edge(
from_type='dns',
from_name=name,
edge='depends',
to_type='dns',
to_name=ns_alias if ns_alias else ns_value,
weight=weight
)
)
# Check if external service endpoint (returned None if not found)
external_service_name = check_external_service(ns_value)
# If an external service create a node for it.
if external_service_name is not None:
add_update_node(
nodes,
new_node(
type='dns',
name=ns_value,
description=ns_type,
)
)
add_update_node(
nodes,
new_node(
type='externalservice',
name=external_service_name,
description=external_service_name
)
)
edges.append(
new_edge(
from_type='dns',
from_name=ns_value,
edge='depends',
to_type='externalservice',
to_name=external_service_name,
weight=1
)
)
if ns_type == 'A':
# add edge node
add_update_node(
nodes,
new_node(
type='dns',
name=ns_alias if ns_alias else ns_value,
description=ns_type,
)
)
# check node is alive
if ns_alias:
# TODO: check alias
pass
if ns_value:
# TODO: check value
pass
def process_cloudfront(region, nodes, edges):
"""
Find all the cloudfront CDN nodes
"""
records = query_aws('cloudfront', 'list_distributions', region)
records = records.get('DistributionList', {}).get('Items', {})
for instance in records:
# add a cloudfront node
add_update_node(
nodes,
new_node(
type='cloudfront',
name=fmt_dns(instance['DomainName']),
description=" ".join([instance['Id'], instance['HttpVersion']]),
region='global'
)
)
# Add the DNS node for it that is auto created
add_update_node(
nodes,
new_node(
type='dns',
name=fmt_dns(instance['DomainName']),
description='A',
)
)
# add an edge for the DNS cloudfront
edges.append(
new_edge(
from_type='dns',
from_name=fmt_dns(instance['DomainName']),
edge='depends',
to_type='cloudfront',
to_name=fmt_dns(instance['DomainName']),
weight=1
)
)
# Loop through each of the origins and link them to endpoints
for origin in instance['Origins']['Items']:
edges.append(
new_edge(
from_type='cloudfront',
from_name=fmt_dns(instance['DomainName']),
edge='depends',
to_type='dns',
to_name=fmt_dns(origin['DomainName']),
weight=1
)
)
def process_ec2s(region, nodes, edges):
"""
Find all the EC2 instances in the given region
"""
# we're only interested in running instances
records = query_aws(
'ec2',
'describe_instances',
region,
Filters=[
{'Name': 'instance-state-name', 'Values': ['running']}
]
)
records = records.get('Reservations', [])
for resv in records:
for instance in resv.get('Instances'):
# get instances takes into a dict
inst_tags = {t['Key']: t['Value'] for t in instance.get('Tags')}
description = ' '.join(
[
inst_tags.get('Name', ''),
inst_tags.get('InstRole', ''),
instance.get('InstanceType', '')
]
)
# add ec2 node
add_update_node(
nodes,
new_node(
type='ec2',
name=instance['InstanceId'],
description=description,
region=region
)
)
# Removed Private IP - as not adding much value at this point
# add_update_node(
# nodes,
# new_node(
# type='IP',
# name=instance['PrivateIpAddress'],
# description=description,
# region=region
# )
# )
# # add the private ip edge
# edges.append(
# new_edge(
# from_type='ec2',
# from_name=instance['InstanceId'],
# edge='depends',
# to_type='IP',
# to_name=instance['PrivateIpAddress'],
# weight=1
# )
# )
if instance.get('PublicIpAddress'):
# Add node for the public IP address
add_update_node(
nodes,
new_node(
type='dns',
name=instance['PublicIpAddress'],
description=description,
region=region
)
)
# add the public ip edge
edges.append(
new_edge(
from_type='ec2',
from_name=instance['InstanceId'],
edge='depends',
to_type='dns',
to_name=instance['PublicIpAddress'],
weight=1
)
)
def process_elbs(region, nodes, edges):
"""
Find all the ELB nodes
"""
records = query_aws('elb', 'describe_load_balancers', region)
for elb in records['LoadBalancerDescriptions']:
name = fmt_dns(elb['DNSName'])
add_update_node(
nodes,
new_node(
type='elb',
name=name,
description=elb['LoadBalancerName'],
region=region
)
)
# Add Edges - of dependent instances
for instances in elb['Instances']:
edges.append(
new_edge(
from_type='elb',
from_name=name,
edge='depends',
to_type='ec2',
to_name=instances['InstanceId'],
weight=1
)
)
def process_elbsv2(region, nodes, edges):
"""
Find all the ELB nodes
"""
records = query_aws('elbv2', 'describe_load_balancers', region, cached=False)
for elb in records['LoadBalancers']:
name = fmt_dns(elb['DNSName'])
add_update_node(
nodes,
new_node(
type='elb',
name=name,
description=elb['LoadBalancerName'],
region=region
)
)
# Add the DNS node for it
add_update_node(
nodes,
new_node(
type='dns',
name=fmt_dns(elb['DNSName']),
description='A',
)
)
# add an edge for the RDS to DNS link
edges.append(
new_edge(
from_type='dns',
from_name=fmt_dns(elb['DNSName']),
edge='depends',
to_type='elb',
to_name=name,
weight=1
)
)
# TODO - this can likely come out to it's own top level
target_groups = query_aws('elbv2',
'describe_target_groups',
region,
LoadBalancerArn=elb['LoadBalancerArn']
)
for target_group in target_groups['TargetGroups']:
# Loop over each target
target_healths = query_aws('elbv2',
'describe_target_health',
region,
TargetGroupArn=target_group['TargetGroupArn']
)
for target in target_healths['TargetHealthDescriptions']:
# Connect ELB to the | |
<filename>pandas/tests/reshape/concat/test_datetimes.py
import datetime as dt
from datetime import datetime
import dateutil
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
to_timedelta,
)
import pandas._testing as tm
class TestDatetimeConcat:
def test_concat_datetime64_block(self):
from pandas.core.indexes.datetimes import date_range
rng = date_range("1/1/2000", periods=10)
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_datetime_datetime64_frame(self):
# GH#2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({"date": ind, "test": range(10)})
# it works!
concat([df1, df2_obj])
def test_concat_datetime_timezone(self):
# GH 18523
idx1 = date_range("2011-01-01", periods=3, freq="H", tz="Europe/Paris")
idx2 = date_range(start=idx1[0], end=idx1[-1], freq="H")
df1 = DataFrame({"a": [1, 2, 3]}, index=idx1)
df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
result = concat([df1, df2], axis=1)
exp_idx = (
DatetimeIndex(
[
"2011-01-01 00:00:00+01:00",
"2011-01-01 01:00:00+01:00",
"2011-01-01 02:00:00+01:00",
],
freq="H",
)
.tz_convert("UTC")
.tz_convert("Europe/Paris")
)
expected = DataFrame(
[[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
)
tm.assert_frame_equal(result, expected)
idx3 = date_range("2011-01-01", periods=3, freq="H", tz="Asia/Tokyo")
df3 = DataFrame({"b": [1, 2, 3]}, index=idx3)
result = concat([df1, df3], axis=1)
exp_idx = DatetimeIndex(
[
"2010-12-31 15:00:00+00:00",
"2010-12-31 16:00:00+00:00",
"2010-12-31 17:00:00+00:00",
"2010-12-31 23:00:00+00:00",
"2011-01-01 00:00:00+00:00",
"2011-01-01 01:00:00+00:00",
]
)
expected = DataFrame(
[
[np.nan, 1],
[np.nan, 2],
[np.nan, 3],
[1, np.nan],
[2, np.nan],
[3, np.nan],
],
index=exp_idx,
columns=["a", "b"],
)
tm.assert_frame_equal(result, expected)
# GH 13783: Concat after resample
result = concat([df1.resample("H").mean(), df2.resample("H").mean()], sort=True)
expected = DataFrame(
{"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
index=idx1.append(idx1),
)
tm.assert_frame_equal(result, expected)
def test_concat_datetimeindex_freq(self):
# GH 3232
# Monotonic index result
dr = date_range("01-Jan-2013", periods=100, freq="50L", tz="UTC")
data = list(range(100))
expected = DataFrame(data, index=dr)
result = concat([expected[:50], expected[50:]])
tm.assert_frame_equal(result, expected)
# Non-monotonic index result
result = concat([expected[50:], expected[:50]])
expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
expected.index._data.freq = None
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_datetime_object_index(self):
# https://github.com/pandas-dev/pandas/issues/11058
idx = Index(
[dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)],
dtype="object",
)
s = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[
[1, 2],
idx[:-1],
],
names=["first", "second"],
),
)
s2 = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[[1, 2], idx[::2]],
names=["first", "second"],
),
)
mi = MultiIndex.from_arrays(
[[1, 2, 2], idx],
names=["first", "second"],
)
assert mi.levels[1].dtype == object
expected = DataFrame(
[["a", "a"], ["b", np.nan], [np.nan, "b"]],
index=mi,
)
result = concat([s, s2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_NaT_series(self):
# GH 11693
# test for merging NaT series with datetime series.
x = Series(
date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
)
y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT with tz
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
result = concat([y, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# without tz
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h"))
y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h"))
y[:] = pd.NaT
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_concat_NaT_dataframes(self, tz):
# GH 12396
first = DataFrame([[pd.NaT], [pd.NaT]])
first = first.apply(lambda x: x.dt.tz_localize(tz))
second = DataFrame(
[[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]],
index=[2, 3],
)
expected = DataFrame(
[
pd.NaT,
pd.NaT,
Timestamp("2015/01/01", tz=tz),
Timestamp("2016/01/01", tz=tz),
]
)
result = concat([first, second], axis=0)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
@pytest.mark.parametrize("s", [pd.NaT, Timestamp("20150101")])
def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):
# GH 12396
# tz-naive
first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1))
second = DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2))
result = concat([first, second], axis=0)
expected = DataFrame(Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))
expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
if tz1 != tz2:
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
# GH 12396
first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
expected = DataFrame(
{
0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
# GH 12396
# tz-naive
first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
second = DataFrame(
[
[Timestamp("2015/01/01", tz=tz2)],
[Timestamp("2016/01/01", tz=tz2)],
],
index=[2, 3],
)
expected = DataFrame(
[
pd.NaT,
pd.NaT,
Timestamp("2015/01/01", tz=tz2),
Timestamp("2016/01/01", tz=tz2),
]
)
if tz1 != tz2:
expected = expected.astype(object)
result = concat([first, second])
tm.assert_frame_equal(result, expected)
class TestTimezoneConcat:
def test_concat_tz_series(self):
# gh-11755: tz and no tz
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(date_range("2012-01-01", "2012-01-02"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# gh-11887: concat tz and object
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(["a", "b"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# see gh-12217 and gh-12306
# Concatenating two UTC times
first = DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("UTC")
second = DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize("UTC")
result = concat([first, second])
assert result[0].dtype == "datetime64[ns, UTC]"
# Concatenating two London times
first = DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
# Concatenating 2+1 London times
first = DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame([[datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
# Concat'ing 1+2 London times
first = DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
assert result[0].dtype == "datetime64[ns, Europe/London]"
def test_concat_tz_series_tzlocal(self):
# see gh-13583
x = [
Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
]
y = [
Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y))
assert result.dtype == "datetime64[ns, tzlocal()]"
def test_concat_tz_series_with_datetimelike(self):
# see gh-12620: tz and timedelta
x = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-02-01", tz="US/Eastern"),
]
y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y, dtype="object"))
# tz and period
y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y, dtype="object"))
def test_concat_tz_frame(self):
df2 = DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
)
# concat
df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
tm.assert_frame_equal(df2, df3)
def test_concat_multiple_tzs(self):
# GH#12467
# combining datetime tz-aware and naive DataFrames
ts1 = Timestamp("2015-01-01", tz=None)
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="EST")
df1 = DataFrame({"time": [ts1]})
df2 = DataFrame({"time": [ts2]})
df3 = DataFrame({"time": [ts3]})
results = concat([df1, df2]).reset_index(drop=True)
expected = DataFrame({"time": [ts1, ts2]}, dtype=object)
tm.assert_frame_equal(results, expected)
results = concat([df1, df3]).reset_index(drop=True)
expected = DataFrame({"time": [ts1, ts3]}, dtype=object)
tm.assert_frame_equal(results, expected)
results = concat([df2, df3]).reset_index(drop=True)
expected = DataFrame({"time": [ts2, ts3]})
tm.assert_frame_equal(results, expected)
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame(
{
"dt": [
datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3),
],
"b": ["A", "B", "C"],
"c": [1, 2, 3],
"d": [4, 5, 6],
}
)
df["dt"] = df["dt"].apply(lambda d: Timestamp(d, tz="US/Pacific"))
df = df.set_index(["dt", "b"])
exp_idx1 = DatetimeIndex(
["2014-01-01", "2014-01-02", "2014-01-03"] * 2, tz="US/Pacific", name="dt"
)
exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame(
{"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
)
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_tz_not_aligned(self):
# GH#22796
ts = pd.to_datetime([1, 2]).tz_localize("UTC")
a = DataFrame({"A": ts})
b = DataFrame({"A": ts, "B": ts})
result = concat([a, b], sort=True, ignore_index=True)
expected = DataFrame(
{"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"t1",
[
"2015-01-01",
pytest.param(
pd.NaT,
marks=pytest.mark.xfail(
reason="GH23037 incorrect dtype when concatenating"
),
),
],
)
def test_concat_tz_NaT(self, t1):
# GH#22796
# Concatenating tz-aware multicolumn DataFrames
ts1 = Timestamp(t1, tz="UTC")
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="UTC")
df1 = DataFrame([[ts1, ts2]])
df2 = DataFrame([[ts3]])
result = concat([df1, df2])
expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
tm.assert_frame_equal(result, expected)
def test_concat_tz_with_empty(self):
# GH 9188
result = concat(
[DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()]
)
expected = DataFrame(date_range("2000", periods=1, | |
<gh_stars>1-10
from hityper.tdg import *
from hityper.typeobject import *
from hityper import logger
from hityper.stdtypes import builtin_method_properties, stdtypes, inputtypemap
import ast
from copy import deepcopy, copy
import sys, getopt
from pycg.pycg import CallGraphGenerator
from pycg import formats
from pycg.utils.constants import CALL_GRAPH_OP
import json
logger.name = __name__
# A map from ast nodes to op strings
AST2Op = {}
AST2Op[ast.Add] = "+"
AST2Op[ast.Sub] = "-"
AST2Op[ast.Mult] = "*"
AST2Op[ast.Div] = "/"
AST2Op[ast.FloorDiv] = "//"
AST2Op[ast.Mod] = "%"
AST2Op[ast.Pow] = "%"
AST2Op[ast.LShift] = "<<"
AST2Op[ast.RShift] = ">>"
AST2Op[ast.BitOr] = "|"
AST2Op[ast.BitXor] = "^"
AST2Op[ast.BitAnd] = "&"
AST2Op[ast.MatMult] = "@"
AST2Op[ast.UAdd] = "+"
AST2Op[ast.USub] = "-"
AST2Op[ast.Not] = "not"
AST2Op[ast.Invert] = "~"
AST2Op[ast.And] = "and"
AST2Op[ast.Or] = "or"
AST2Op[ast.Eq] = "=="
AST2Op[ast.NotEq] = "!="
AST2Op[ast.Lt] = "<"
AST2Op[ast.LtE] = "<="
AST2Op[ast.Gt] = ">"
AST2Op[ast.GtE] = ">="
AST2Op[ast.Is] = "is"
AST2Op[ast.IsNot] = "isnot"
AST2Op[ast.In] = "in"
AST2Op[ast.NotIn] = "not in"
def transformConstant(node):
if not isinstance(node, ast.Constant):
raise ValueError("Only Support Constant AST node.")
if isinstance(node.value, str):
return TypeObject("Text", 0)
elif isinstance(node.value, bytes):
return TypeObject("bytes", 0)
elif isinstance(node.value, bool):
return TypeObject("bool", 0)
elif isinstance(node.value, float):
return TypeObject("float", 0)
elif isinstance(node.value, int):
return TypeObject("int", 0)
elif node.value == None:
return TypeObject("None", 0)
elif type(node.value) == type(Ellipsis):
return None
else:
raise TypeError("Currently we do not suupport constant of type: " + str(type(node.value)))
def Attribute2Str(node):
if isinstance(node, ast.Attribute):
return Attribute2Str(node.value) + "_@_" + node.attr
elif isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Constant):
return transformConstant(node).type
elif isinstance(node,ast.Call):
return Attribute2Str(node.func) + "()"
else:
return "<Other>"
def Attribute2Str_Call(node):
temp1 = ''
temp1 += Attribute2Str(node.func) + "("
for argsnode in node.args:
temp1 += (Attribute2Str(argsnode)+"_&")
temp1 += ")"
return temp1
class AliasAnalyzer(ast.NodeVisitor):
def __init__(self, aliasgraph):
self.aliasgraph = aliasgraph
self.curfunc = None
self.curclass = None
def visit_ClassDef(self, node):
if self.curclass == None:
self.curclass = node.name
self.generic_visit(node)
self.curclass = None
def visit_FunctionDef(self, node):
if self.curfunc == None:
self.curfunc = node.name
self.generic_visit(node)
self.curfunc = None
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node)
#check the assignment like self.xxx = xxx or cls.xxx = xxx, which may create alias
def visit_Assign(self, node):
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
if type(node.value) == ast.Attribute:
attrstr = Attribute2Str(node.value)
if "<Other>" in attrstr:
logger.warning("Failed to initialize attribute " + attrstr)
else:
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
for t in node.targets:
targetstr = Attribute2Str(t)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
elif type(node.value) == ast.Name:
attrtargets = []
for t in node.targets:
if type(t) == ast.Attribute or type(t) == ast.Name:
attrtargets.append(t)
attrstr = Attribute2Str(node.value)
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
for t in attrtargets:
targetstr = Attribute2Str(t)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
self.generic_visit(node)
def visit_AnnAssign(self, node):
if node.value != None:
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
if type(node.value) == ast.Attribute:
attrstr = Attribute2Str(node.value)
if "<Other>" in attrstr:
logger.warning("Failed to initialize attribute " + attrstr)
else:
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
targetstr = Attribute2Str(node.target)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
elif type(node.value) == ast.Name:
attrstr = Attribute2Str(node.value)
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
targetstr = Attribute2Str(node.target)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
self.generic_visit(node)
def visit_Attribute(self, node):
attrstr = Attribute2Str(node)
if "<Other>" in attrstr:
logger.warning("Unsupported attribute: " + attrstr)
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if p == None:
logger.warning("Failed to initialize attribute ", attrstr)
def run(self, node):
self.visit(node)
return self.aliasgraph
class TDGGenerator(ast.NodeVisitor):
def __init__(self, filename, optimize, locations, usertypes, alias = 0, repo = None):
#usertypes
self.usertypes = self.processUserTypes(usertypes)
#type graph
self.GlobalTG = GlobalTypeGraph(filename, self.usertypes)
self.filename = filename
self.tgstack = []
#stacks and corresponding cusors
self.curfunc = -1
self.funcstack = []
self.curclass = -1
self.classstack = []
self.curop = -1
self.opstack = []
#variable maps
self.localvar2id = []
self.lastlocalvar = []
self.globalvar2id = {}
self.lastglobalvar = {}
self.attribute2id = []
self.lastattribute = []
#other info
self.modules = []
self.classnames = []
if isinstance(locations, list):
self.locations = locations
elif locations == None:
self.locations = locations
else:
logger.error("Invalid locations for generating TDGs.")
raise ValueError("Invalid locations for generating TDGs.")
self.visitedfuncs = []
self.withitemnames = []
self.withpos = []
#flags
self.asifcond = False
self.subscriptwrite = False
self.augassignread = False
self.forin = False
self.optimized = optimize
self.alias = alias
self.repo = repo
logger.info("Handling file #"+ filename)
def processUserTypes(self, usertype):
usertypes = []
for t in usertype["direct"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["indirect"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["unrecognized"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["init"]:
if t not in usertypes:
usertypes.append(t[0])
return usertypes
def addNode(self, node):
if self.curclass == -1 and self.curfunc == -1:
self.GlobalTG.addNode(node)
elif self.curfunc != -1:
self.tgstack[self.curfunc].addNode(node)
def searchNode(self, nodetype, nodename, nodepos):
if self.curclass == -1 and self.curfunc == -1:
for node in self.GlobalTG.globalnodes:
if node.nodetype == nodetype and node.lineno == nodepos[0] and node.columnno == nodepos[1] and node.columnend == nodepos[2]:
if nodetype == "TypeGen" and node.op == nodename:
return node
elif nodetype == "Symbol" and node.symbol == nodename:
return node
elif self.curfunc != -1:
for node in self.tgstack[self.curfunc].nodes:
if node.nodetype == nodetype and node.lineno == nodepos[0] and node.columnno == nodepos[1] and node.columnend == nodepos[2]:
if nodetype == "TypeGen" and node.op == nodename:
return node
elif nodetype == "Symbol" and node.symbol == nodename:
return node
def extractTypeCondition(self, node, inverse = 1):
'''
if type(node) == ast.BoolOp and type(node.op) == ast.And:
return self.extractTypeCondition(node.values[0], inverse) + self.extractTypeCondition(node.values[1], inverse)
elif type(node) == ast.UnaryOp and type(node.op) == ast.Not:
return self.extractTypeCondition(node.operand, inverse * -1)
'''
# type(x) == y
if (type(node) == ast.Compare and type(node.left) == ast.Call and type(node.left.func) == ast.Name
and node.left.func.id == "type" and len(node.left.args) == 1 and len(node.ops) == 1 and type(node.ops[0]) in [ast.Eq, ast.NotEq]
and len(node.comparators) == 1 and type(node.comparators[0]) in [ast.Name, ast.Attribute]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.left.args[0])
typestr = Attribute2Str(node.comparators[0])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if type(node.ops[0]) == ast.NotEq:
inverse = inverse * -1
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
# x is y
elif (type(node) == ast.Compare and (type(node.left) == ast.Name or type(node.left) == ast.Attribute) and len(node.ops) == 1
and type(node.ops[0]) in [ast.Is, ast.IsNot] and len(node.comparators) == 1 and type(node.comparators[0]) in [ast.Name, ast.Attribute, ast.Constant]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.left)
if type(node.comparators[0]) == ast.Constant:
typeobject = transformConstant(node.comparators[0])
else:
typestr = Attribute2Str(node.comparators[0])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if type(node.ops[0]) == ast.IsNot:
inverse = inverse * -1
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
# isinstance(x,y)
elif (type(node) == ast.Call and type(node.func) == ast.Name and node.func.id == "isinstance"
and len(node.args) == 2 and type(node.args[1]) in [ast.Name, ast.Attribute]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.args[0])
typestr = Attribute2Str(node.args[1])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
else:
if type(node) != ast.Constant:
self.asifcond = True
self.visit(node)
self.asifcond = False
return []
def visitfield(self, field):
for node in field:
if node != None:
self.visit(node)
def buildmergename(self, nodes):
namestr = ""
for n in nodes:
if isinstance(n, SymbolNode):
if n.scope == "local":
namestr = namestr + str(n.symbol) + str(n.order)
elif n.scope == "attribute":
namestr = namestr + str(n.classname) + "." + str(n.symbol) + str(n.order)
elif isinstance(n, MergeNode):
namestr = namestr + "(" + str(n.mergevar) + ")"
elif isinstance(n, BranchNode):
namestr = namestr + "(" + "branch | |
<reponame>mit-han-lab/torchquantum
import torch
import torch.nn as nn
import torchquantum as tq
import torchquantum.functional as tqf
import numpy as np
from enum import IntEnum
from torchquantum.functional import mat_dict
from torchquantum.quantization.clifford_quantization import CliffordQuantizer
from abc import ABCMeta
from .macro import C_DTYPE, F_DTYPE
from torchpack.utils.logging import logger
from typing import Iterable
__all__ = [
'op_name_dict',
'Operator',
'Operation',
'DiagonalOperation',
'Observable',
'Hadamard',
'SHadamard',
'PauliX',
'PauliY',
'PauliZ',
'I',
'S',
'T',
'SX',
'CNOT',
'CZ',
'CY',
'RX',
'RY',
'RZ',
'RXX',
'RYY',
'RZZ',
'RZX',
'SWAP',
'SSWAP',
'CSWAP',
'Toffoli',
'PhaseShift',
'Rot',
'MultiRZ',
'CRX',
'CRY',
'CRZ',
'CRot',
'U1',
'U2',
'U3',
'CU1',
'CU2',
'CU3',
'QubitUnitary',
'QubitUnitaryFast',
'TrainableUnitary',
'TrainableUnitaryStrict',
'MultiCNOT',
'MultiXCNOT',
'Reset',
]
class WiresEnum(IntEnum):
"""Integer enumeration class
to represent the number of wires
an operation acts on"""
AnyWires = -1
AllWires = 0
class NParamsEnum(IntEnum):
"""Integer enumeration class
to represent the number of wires
an operation acts on"""
AnyNParams = -1
AnyNParams = NParamsEnum.AnyNParams
AllWires = WiresEnum.AllWires
"""IntEnum: An enumeration which represents all wires in the
subsystem. It is equivalent to an integer with value 0."""
AnyWires = WiresEnum.AnyWires
"""IntEnum: An enumeration which represents any wires in the
subsystem. It is equivalent to an integer with value -1."""
class Operator(tq.QuantumModule):
fixed_ops = [
'Hadamard',
'SHadamard',
'PauliX',
'PauliY',
'PauliZ',
'I',
'S',
'T',
'SX',
'CNOT',
'CZ',
'CY',
'SWAP',
'SSWAP',
'CSWAP',
'Toffoli',
'MultiCNOT',
'MultiXCNOT',
'Reset',
]
parameterized_ops = [
'RX',
'RY',
'RZ',
'RXX',
'RYY',
'RZZ',
'RZX',
'PhaseShift',
'Rot',
'MultiRZ',
'CRX',
'CRY',
'CRZ',
'CRot',
'U1',
'U2',
'U3',
'CU1',
'CU2',
'CU3',
'QubitUnitary',
'QubitUnitaryFast',
'TrainableUnitary',
'TrainableUnitaryStrict',
]
@property
def name(self):
"""String for the name of the operator."""
return self._name
@name.setter
def name(self, value):
self._name = value
def __init__(self,
has_params: bool = False,
trainable: bool = False,
init_params=None,
n_wires=None,
wires=None):
super().__init__()
self.params = None
# number of wires of the operator
# n_wires is used in gates that can be applied to arbitrary number
# of qubits such as MultiRZ
self.n_wires = n_wires
# wires that the operator applies to
self.wires = wires
self._name = self.__class__.__name__
# for static mode
self.static_matrix = None
self.inverse = False
self.clifford_quantization = False
try:
assert not (trainable and not has_params)
except AssertionError:
has_params = True
logger.warning(f"Module must have parameters to be trainable; "
f"Switched 'has_params' to True.")
self.has_params = has_params
self.trainable = trainable
if self.has_params:
self.params = self.build_params(trainable=self.trainable)
self.reset_params(init_params)
@classmethod
def _matrix(cls, params):
raise NotImplementedError
@property
def matrix(self):
return self._matrix(self.params)
@classmethod
def _eigvals(cls, params):
raise NotImplementedError
@property
def eigvals(self):
return self._eigvals(self.params)
def _get_unitary_matrix(self):
return self.matrix
def set_wires(self, wires):
self.wires = [wires] if isinstance(wires, int) else wires
def forward(self, q_device: tq.QuantumDevice, wires=None, params=None,
inverse=False):
# try:
# assert self.name in self.fixed_ops or \
# self.has_params ^ (params is not None)
# except AssertionError as err:
# logger.exception(f"Parameterized gate either has its "
# f"own parameters or has input as parameters")
# raise err
# try:
# assert not (self.wires is None and wires is None)
# except AssertionError as err:
# logger.exception(f"Need to specify the wires either when "
# f"initialize or when forward")
# raise err
if params is not None:
self.params = params
if self.params is not None:
self.params = self.params.unsqueeze(-1) if self.params.dim() == 1 \
else self.params
if wires is not None:
# update the wires
wires = [wires] if isinstance(wires, int) else wires
self.wires = wires
self.inverse = inverse
if self.static_mode:
self.parent_graph.add_op(self)
return
# non-parameterized gate
if self.params is None:
if self.n_wires is None:
self.func(q_device, self.wires, inverse=inverse)
else:
self.func(q_device, self.wires, n_wires=self.n_wires,
inverse=inverse)
else:
if isinstance(self.noise_model_tq, tq.NoiseModelTQPhase):
params = self.noise_model_tq.add_noise(self.params)
else:
params = self.params
if self.clifford_quantization:
params = CliffordQuantizer.quantize_sse(params)
if self.n_wires is None:
self.func(q_device, self.wires, params=params,
inverse=inverse)
else:
self.func(q_device, self.wires, params=params,
n_wires=self.n_wires, inverse=inverse)
if self.noise_model_tq is not None and \
self.noise_model_tq.is_add_noise:
noise_ops = self.noise_model_tq.sample_noise_op(self)
if len(noise_ops):
for noise_op in noise_ops:
noise_op(q_device)
class Observable(Operator, metaclass=ABCMeta):
def __init__(self,
has_params: bool = False,
trainable: bool = False,
init_params=None,
n_wires=None,
wires=None):
super().__init__(
has_params=has_params,
trainable=trainable,
init_params=init_params,
n_wires=n_wires,
wires=wires
)
self.return_type = None
def diagonalizing_gates(self):
raise NotImplementedError
class Operation(Operator, metaclass=ABCMeta):
def __init__(self,
has_params: bool = False,
trainable: bool = False,
init_params=None,
n_wires=None,
wires=None):
super().__init__(
has_params=has_params,
trainable=trainable,
init_params=init_params,
n_wires=n_wires,
wires=wires
)
@property
def matrix(self):
op_matrix = self._matrix(self.params)
return op_matrix
@property
def eigvals(self):
op_eigvals = self._eigvals(self.params)
return op_eigvals
def init_params(self):
raise NotImplementedError
def build_params(self, trainable):
parameters = nn.Parameter(torch.empty([1, self.num_params],
dtype=F_DTYPE))
parameters.requires_grad = True if trainable else False
self.register_parameter(f"{self.name}_params", parameters)
return parameters
def reset_params(self, init_params=None):
if init_params is not None:
if isinstance(init_params, Iterable):
for k, init_param in enumerate(init_params):
torch.nn.init.constant_(self.params[:, k], init_param)
else:
torch.nn.init.constant_(self.params, init_params)
else:
torch.nn.init.uniform_(self.params, -np.pi, np.pi)
class DiagonalOperation(Operation, metaclass=ABCMeta):
@classmethod
def _eigvals(cls, params):
raise NotImplementedError
@property
def eigvals(self):
return super().eigvals
@classmethod
def _matrix(cls, params):
return torch.diag(cls._eigvals(params))
class Hadamard(Observable, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, -1], dtype=C_DTYPE)
matrix = mat_dict['hadamard']
func = staticmethod(tqf.hadamard)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
def diagonalizing_gates(self):
return [tq.RY(has_params=True,
trainable=False,
init_params=-np.pi / 4)]
class SHadamard(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 1
matrix = mat_dict['shadamard']
func = staticmethod(tqf.shadamard)
@classmethod
def _matrix(cls, params):
return cls.matrix
class PauliX(Observable, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, -1], dtype=C_DTYPE)
matrix = mat_dict['paulix']
func = staticmethod(tqf.paulix)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
def diagonalizing_gates(self):
return [tq.Hadamard()]
class PauliY(Observable, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, -1], dtype=C_DTYPE)
matrix = mat_dict['pauliy']
func = staticmethod(tqf.pauliy)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
def diagonalizing_gates(self):
return [tq.PauliZ(), tq.S(), tq.Hadamard()]
class PauliZ(Observable, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, -1], dtype=C_DTYPE)
matrix = mat_dict['pauliz']
func = staticmethod(tqf.pauliz)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
def diagonalizing_gates(self):
return []
class I(Observable, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, 1], dtype=C_DTYPE)
matrix = mat_dict['i']
func = staticmethod(tqf.i)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
def diagonalizing_gates(self):
return []
class S(DiagonalOperation, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, 1j], dtype=C_DTYPE)
matrix = mat_dict['s']
func = staticmethod(tqf.s)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
class T(DiagonalOperation, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, 1j], dtype=C_DTYPE)
matrix = mat_dict['t']
func = staticmethod(tqf.t)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
class SX(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 1
eigvals = torch.tensor([1, 1j], dtype=C_DTYPE)
matrix = mat_dict['sx']
func = staticmethod(tqf.sx)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
class CNOT(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 2
matrix = mat_dict['cnot']
func = staticmethod(tqf.cnot)
@classmethod
def _matrix(cls, params):
return cls.matrix
class CZ(DiagonalOperation, metaclass=ABCMeta):
num_params = 0
num_wires = 2
eigvals = np.array([1, 1, 1, -1])
matrix = mat_dict['cz']
func = staticmethod(tqf.cz)
@classmethod
def _matrix(cls, params):
return cls.matrix
@classmethod
def _eigvals(cls, params):
return cls.eigvals
class CY(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 2
matrix = mat_dict['cy']
func = staticmethod(tqf.cy)
@classmethod
def _matrix(cls, params):
return cls.matrix
class SWAP(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 2
matrix = mat_dict['swap']
func = staticmethod(tqf.swap)
@classmethod
def _matrix(cls, params):
return cls.matrix
class SSWAP(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 2
matrix = mat_dict['sswap']
func = staticmethod(tqf.sswap)
@classmethod
def _matrix(cls, params):
return cls.matrix
class CSWAP(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 3
matrix = mat_dict['cswap']
func = staticmethod(tqf.cswap)
@classmethod
def _matrix(cls, params):
return cls.matrix
class Toffoli(Operation, metaclass=ABCMeta):
num_params = 0
num_wires = 3
matrix = mat_dict['toffoli']
func = staticmethod(tqf.toffoli)
@classmethod
def _matrix(cls, params):
return cls.matrix
class RX(Operation, metaclass=ABCMeta):
num_params = 1
num_wires = 1
func = staticmethod(tqf.rx)
@classmethod
def _matrix(cls, params):
return tqf.rx_matrix(params)
class RY(Operation, metaclass=ABCMeta):
num_params = 1
num_wires = 1
func = staticmethod(tqf.ry)
@classmethod
def _matrix(cls, params):
return tqf.ry_matrix(params)
class RZ(DiagonalOperation, metaclass=ABCMeta):
num_params = 1
num_wires = 1
func = staticmethod(tqf.rz)
@classmethod
def _matrix(cls, params):
return tqf.rz_matrix(params)
class PhaseShift(DiagonalOperation, metaclass=ABCMeta):
num_params = 1
num_wires = 1
func = staticmethod(tqf.phaseshift)
@classmethod
def _matrix(cls, params):
return tqf.phaseshift_matrix(params)
class Rot(Operation, metaclass=ABCMeta):
num_params = 3
num_wires = 1
func = staticmethod(tqf.rot)
@classmethod
def _matrix(cls, params):
return tqf.rot_matrix(params)
class MultiRZ(DiagonalOperation, metaclass=ABCMeta):
num_params = 1
num_wires = AnyWires
func = staticmethod(tqf.multirz)
@classmethod
def _matrix(cls, params, n_wires):
return tqf.multirz_matrix(params, n_wires)
class RXX(Operation, metaclass=ABCMeta):
num_params = 1
num_wires = 2
func = staticmethod(tqf.rxx)
@classmethod
def _matrix(cls, params):
return tqf.rxx_matrix(params)
class RYY(Operation, metaclass=ABCMeta):
| |
msg.raw[:msglen.value]
def crypto_sign_verify_detached(sig, msg, pk):
if None in (sig, msg, pk):
raise ValueError
if len(sig) != crypto_sign_BYTES:
raise ValueError("invalid sign")
if not (len(pk) == crypto_sign_PUBLICKEYBYTES): raise ValueError('Truncated public key')
__check(sodium.crypto_sign_verify_detached(sig, msg, ctypes.c_ulonglong(len(msg)), pk))
# crypto_sign_init(crypto_sign_state *state);
@sodium_version(1, 0, 12)
def crypto_sign_init():
state = CryptoSignState()
__check(sodium.crypto_sign_init(ctypes.byref(state)))
return state
# crypto_sign_update(crypto_sign_state *state, const unsigned char *m, unsigned long long mlen);
@sodium_version(1, 0, 12)
def crypto_sign_update(state, m):
if(not isinstance(state, CryptoSignState)):
raise TypeError("state is not CryptoSignState")
if m is None:
raise ValueError("invalid parameters")
__check(sodium.crypto_sign_update(ctypes.byref(state), m, ctypes.c_ulonglong(len(m))))
# crypto_sign_final_create(crypto_sign_state *state, unsigned char *sig, unsigned long long *siglen_p, const unsigned char *sk);
@sodium_version(1, 0, 12)
def crypto_sign_final_create(state, sk):
if(not isinstance(state, CryptoSignState)):
raise TypeError("state is not CryptoSignState")
if sk is None:
raise ValueError("invalid parameters")
if len(sk) != crypto_sign_SECRETKEYBYTES:
raise ValueError("invalid secret key")
buf = ctypes.create_string_buffer(crypto_sign_BYTES)
__check(sodium.crypto_sign_final_create(ctypes.byref(state), buf, ctypes.c_void_p(0), sk))
return buf.raw
# crypto_sign_final_verify(crypto_sign_state *state, unsigned char *sig, const unsigned char *sk);
@sodium_version(1, 0, 12)
def crypto_sign_final_verify(state, sig, pk):
if(not isinstance(state, CryptoSignState)):
raise TypeError("state is not CryptoSignState")
if None in (sig, pk):
raise ValueError("invalid parameters")
if len(sig) != crypto_sign_BYTES:
raise ValueError("invalid signature")
if len(pk) != crypto_sign_PUBLICKEYBYTES:
raise ValueError("invalid public key")
__check(sodium.crypto_sign_final_verify(ctypes.byref(state), sig, pk))
# int crypto_stream_salsa20(unsigned char *c, unsigned long long clen,
# const unsigned char *n, const unsigned char *k);
def crypto_stream(cnt, nonce=None, key=None):
res = ctypes.create_string_buffer(cnt)
if not nonce:
nonce = randombytes(crypto_stream_NONCEBYTES)
if not key:
key = randombytes(crypto_stream_KEYBYTES)
__check(sodium.crypto_stream(res, ctypes.c_ulonglong(cnt), nonce, key))
return res.raw
# crypto_stream_salsa20_xor(unsigned char *c, const unsigned char *m, unsigned long long mlen,
# const unsigned char *n, const unsigned char *k)
def crypto_stream_xor(msg, cnt, nonce, key):
res = ctypes.create_string_buffer(cnt)
if len(nonce) != crypto_stream_NONCEBYTES: raise ValueError("invalid nonce")
if len(key) != crypto_stream_KEYBYTES: raise ValueError("invalid key")
__check(sodium.crypto_stream_xor(res, msg, ctypes.c_ulonglong(cnt), nonce, key))
return res.raw
def crypto_sign_pk_to_box_pk(pk):
if pk is None:
raise ValueError
if not (len(pk) == crypto_sign_PUBLICKEYBYTES): raise ValueError('Truncated public key')
res = ctypes.create_string_buffer(crypto_box_PUBLICKEYBYTES)
__check(sodium.crypto_sign_ed25519_pk_to_curve25519(ctypes.byref(res), pk))
return res.raw
def crypto_sign_sk_to_box_sk(sk):
if sk is None:
raise ValueError
if not (len(sk) == crypto_sign_SECRETKEYBYTES): raise ValueError('Truncated secret key')
res = ctypes.create_string_buffer(crypto_box_SECRETKEYBYTES)
__check(sodium.crypto_sign_ed25519_sk_to_curve25519(ctypes.byref(res), sk))
return res.raw
def crypto_sign_sk_to_seed(sk):
if sk is None:
raise ValueError
if not (len(sk) == crypto_sign_SECRETKEYBYTES): raise ValueError('Truncated secret key')
seed = ctypes.create_string_buffer(crypto_sign_SEEDBYTES)
__check(sodium.crypto_sign_ed25519_sk_to_seed(ctypes.byref(seed), sk))
return seed.raw
# int crypto_pwhash(unsigned char * const out,
# unsigned long long outlen,
# const char * const passwd,
# unsigned long long passwdlen,
# const unsigned char * const salt,
# unsigned long long opslimit,
# size_t memlimit, int alg);
@sodium_version(1, 0, 9)
@encode_strings
def crypto_pwhash(outlen, passwd, salt, opslimit, memlimit, alg=crypto_pwhash_ALG_DEFAULT):
if None in (outlen, passwd, salt, opslimit, memlimit):
raise ValueError("invalid parameters")
if len(salt) != crypto_pwhash_SALTBYTES: raise ValueError("invalid salt")
if not (crypto_pwhash_BYTES_MIN <= outlen <= crypto_pwhash_BYTES_MAX): raise ValueError("invalid hash len")
if not (crypto_pwhash_PASSWD_MIN <= len(passwd) <= crypto_pwhash_PASSWD_MAX): raise ValueError("invalid passwd len")
if not (crypto_pwhash_OPSLIMIT_MIN <= opslimit <= crypto_pwhash_OPSLIMIT_MAX): raise ValueError("invalid opslimit")
if not (crypto_pwhash_MEMLIMIT_MIN <= memlimit <= crypto_pwhash_MEMLIMIT_MAX): raise ValueError("invalid memlimit")
out = ctypes.create_string_buffer(outlen)
__check(sodium.crypto_pwhash(ctypes.byref(out), ctypes.c_ulonglong(outlen), passwd, ctypes.c_ulonglong(len(passwd)), salt, ctypes.c_ulonglong(opslimit), ctypes.c_size_t(memlimit), ctypes.c_int(alg)))
return out.raw
# int crypto_pwhash_str(char out[crypto_pwhash_STRBYTES],
# const char * const passwd,
# unsigned long long passwdlen,
# unsigned long long opslimit,
# size_t memlimit);
@sodium_version(1, 0, 9)
@encode_strings
def crypto_pwhash_str(passwd, opslimit, memlimit):
if None in (passwd, opslimit, memlimit):
raise ValueError("invalid parameters")
if not (crypto_pwhash_PASSWD_MIN <= len(passwd) <= crypto_pwhash_PASSWD_MAX): raise ValueError("invalid passwd len")
if not (crypto_pwhash_OPSLIMIT_MIN <= opslimit <= crypto_pwhash_OPSLIMIT_MAX): raise ValueError("invalid opslimit")
if not (crypto_pwhash_MEMLIMIT_MIN <= memlimit <= crypto_pwhash_MEMLIMIT_MAX): raise ValueError("invalid memlimit")
out = ctypes.create_string_buffer(crypto_pwhash_STRBYTES)
__check(sodium.crypto_pwhash_str(ctypes.byref(out), passwd, ctypes.c_ulonglong(len(passwd)), ctypes.c_ulonglong(opslimit), ctypes.c_size_t(memlimit)))
return out.raw
# int crypto_pwhash_str_verify(const char str[crypto_pwhash_STRBYTES],
# const char * const passwd,
# unsigned long long passwdlen);
@sodium_version(1, 0, 9)
@encode_strings
def crypto_pwhash_str_verify(pstr, passwd):
if None in (pstr, passwd) or len(pstr) != crypto_pwhash_STRBYTES:
raise ValueError("invalid parameters")
if not (crypto_pwhash_PASSWD_MIN < len(passwd) <= crypto_pwhash_PASSWD_MAX): raise ValueError("invalid passwd len")
return sodium.crypto_pwhash_str_verify(pstr, passwd, ctypes.c_ulonglong(len(passwd))) == 0
# int crypto_pwhash_scryptsalsa208sha256(unsigned char * const out,
# unsigned long long outlen,
# const char * const passwd,
# unsigned long long passwdlen,
# const unsigned char * const salt,
# unsigned long long opslimit,
# size_t memlimit);
def crypto_pwhash_scryptsalsa208sha256(outlen, passwd, salt, opslimit, memlimit):
if None in (outlen, passwd, salt, opslimit, memlimit):
raise ValueError
if len(salt) != crypto_pwhash_scryptsalsa208sha256_SALTBYTES: raise ValueError("invalid salt")
if not (crypto_pwhash_scryptsalsa208sha256_BYTES_MIN <= outlen <= crypto_pwhash_scryptsalsa208sha256_BYTES_MAX): raise ValueError("invalid hash len")
if not (crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN <= len(passwd) <= crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX): raise ValueError("invalid passwd len")
if not (crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN <= opslimit <= crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX): raise ValueError("invalid opslimit")
if not (crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN <= memlimit <= crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX): raise ValueError("invalid memlimit")
out = ctypes.create_string_buffer(outlen)
__check(sodium.crypto_pwhash_scryptsalsa208sha256(out, ctypes.c_ulonglong(outlen), passwd, ctypes.c_ulonglong(len(passwd)), salt, ctypes.c_ulonglong(opslimit), ctypes.c_size_t(memlimit)))
return out.raw
# int crypto_pwhash_scryptsalsa208sha256_str(char out[crypto_pwhash_scryptsalsa208sha256_STRBYTES],
# const char * const passwd,
# unsigned long long passwdlen,
# unsigned long long opslimit,
# size_t memlimit);
def crypto_pwhash_scryptsalsa208sha256_str(passwd, opslimit, memlimit):
if None in (passwd, opslimit, memlimit):
raise ValueError
if not (crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN <= len(passwd) <= crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX): raise ValueError("invalid passwd len")
if not (crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN <= opslimit <= crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX): raise ValueError("invalid opslimit")
if not (crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN <= memlimit <= crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX): raise ValueError("invalid memlimit")
out = ctypes.create_string_buffer(crypto_pwhash_scryptsalsa208sha256_STRBYTES)
__check(sodium.crypto_pwhash_scryptsalsa208sha256_str(out, passwd, ctypes.c_ulonglong(len(passwd)), ctypes.c_ulonglong(opslimit), ctypes.c_size_t(memlimit)))
return out.raw
#int crypto_pwhash_scryptsalsa208sha256_str_verify(const char str[crypto_pwhash_scryptsalsa208sha256_STRBYTES],
# const char * const passwd,
# unsigned long long passwdlen);
def crypto_pwhash_scryptsalsa208sha256_str_verify(stored, passwd):
if stored is None or passwd is None:
raise ValueError
if not (crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN <= len(passwd) <= crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX): raise ValueError("invalid passwd len")
if len(stored) != crypto_pwhash_scryptsalsa208sha256_STRBYTES: raise ValueError('invalid str size')
__check(sodium.crypto_pwhash_scryptsalsa208sha256_str_verify(stored, passwd, ctypes.c_ulonglong(len(passwd))))
# int crypto_sign_ed25519_sk_to_pk(unsigned char *pk, const unsigned char *sk)
def crypto_sign_sk_to_pk(sk):
if sk is None or len(sk) != crypto_sign_ed25519_SECRETKEYBYTES:
raise ValueError
res = ctypes.create_string_buffer(crypto_sign_ed25519_PUBLICKEYBYTES)
__check(sodium.crypto_sign_ed25519_sk_to_pk(ctypes.byref(res), sk))
return res.raw
# int crypto_hash_sha256(unsigned char *out, const unsigned char *in,
# unsigned long long inlen);
def crypto_hash_sha256(message):
if message is None:
raise ValueError("invalid parameters")
out = ctypes.create_string_buffer(crypto_hash_sha256_BYTES)
__check(sodium.crypto_hash_sha256(out, message, ctypes.c_ulonglong(len(message))))
return out.raw
# int crypto_hash_sha512(unsigned char *out, const unsigned char *in,
# unsigned long long inlen);
def crypto_hash_sha512(message):
if message is None:
raise ValueError("invalid parameters")
out = ctypes.create_string_buffer(crypto_hash_sha512_BYTES)
__check(sodium.crypto_hash_sha512(out, message, ctypes.c_ulonglong(len(message))))
return out.raw
# int crypto_hash_sha512_init(crypto_hash_sha512_state *state)
def crypto_hash_sha512_init():
state = ctypes.create_string_buffer(crypto_hash_sha512_STATEBYTES)
__check(sodium.crypto_hash_sha512_init(state))
return state
# int crypto_hash_sha512_update(crypto_hash_sha512_state *state, const unsigned char *in, unsigned long long inlen)
def crypto_hash_sha512_update(state, data):
__check(sodium.crypto_hash_sha512_update(state,data,ctypes.c_ulonglong(len(data))))
return state
# int crypto_hash_sha512_final(crypto_hash_sha512_state *state, unsigned char *out)
def crypto_hash_sha512_final(state):
out = ctypes.create_string_buffer(crypto_hash_sha512_BYTES)
__check(sodium.crypto_hash_sha512_final(state, out))
return out.raw
# int crypto_kx_keypair(unsigned char pk[crypto_kx_PUBLICKEYBYTES],
# unsigned char sk[crypto_kx_SECRETKEYBYTES]);
@sodium_version(1, 0, 12)
def crypto_kx_keypair():
pk = ctypes.create_string_buffer(crypto_kx_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_kx_SECRETKEYBYTES)
__check(sodium.crypto_kx_keypair(pk, sk))
return pk.raw, sk.raw
# int crypto_kx_client_session_keys(unsigned char rx[crypto_kx_SESSIONKEYBYTES],
# unsigned char tx[crypto_kx_SESSIONKEYBYTES],
# const unsigned char client_pk[crypto_kx_PUBLICKEYBYTES],
# const unsigned char client_sk[crypto_kx_SECRETKEYBYTES],
# const unsigned char server_pk[crypto_kx_PUBLICKEYBYTES]);
@sodium_version(1, 0, 12)
def crypto_kx_client_session_keys(client_pk, client_sk, server_pk):
if None in (client_pk, client_sk, server_pk):
raise ValueError("invalid parameters")
if not (len(client_pk) == crypto_kx_PUBLICKEYBYTES): raise ValueError("Invalid client public key")
if not (len(client_sk) == crypto_kx_SECRETKEYBYTES): raise ValueError("Invalid client secret key")
if not (len(server_pk) == crypto_kx_PUBLICKEYBYTES): raise ValueError("Invalid server public key")
rx = ctypes.create_string_buffer(crypto_kx_SESSIONKEYBYTES)
tx = ctypes.create_string_buffer(crypto_kx_SESSIONKEYBYTES)
__check(sodium.crypto_kx_client_session_keys(rx, tx, client_pk, client_sk, server_pk))
return rx.raw, tx.raw
# int crypto_kx_server_session_keys(unsigned char rx[crypto_kx_SESSIONKEYBYTES],
# unsigned char tx[crypto_kx_SESSIONKEYBYTES],
# const unsigned char server_pk[crypto_kx_PUBLICKEYBYTES],
# const unsigned char server_sk[crypto_kx_SECRETKEYBYTES],
# const unsigned char client_pk[crypto_kx_PUBLICKEYBYTES]);
@sodium_version(1, 0, 12)
def crypto_kx_server_session_keys(server_pk, server_sk, client_pk):
if None in (server_pk, server_sk, client_pk):
raise ValueError("invalid parameters")
if not (len(server_pk) == crypto_kx_PUBLICKEYBYTES): raise ValueError("Invalid server public key")
if not (len(server_sk) == crypto_kx_SECRETKEYBYTES): raise ValueError("Invalid server secret key")
if not (len(client_pk) == crypto_kx_PUBLICKEYBYTES): raise ValueError("Invalid client public key")
rx = ctypes.create_string_buffer(crypto_kx_SESSIONKEYBYTES)
tx = ctypes.create_string_buffer(crypto_kx_SESSIONKEYBYTES)
__check(sodium.crypto_kx_server_session_keys(rx, tx, server_pk, server_sk, client_pk))
return rx.raw, tx.raw
# void sodium_increment(unsigned char *n, const size_t nlen)
@sodium_version(1, 0, 4)
def sodium_increment(n):
sodium.sodium_increment(n, ctypes.c_size_t(len(n)))
# int crypto_core_ristretto255_is_valid_point(const unsigned char *p);
@sodium_version(1, 0, 18)
def crypto_core_ristretto255_is_valid_point(p):
return sodium.crypto_core_ristretto255_is_valid_point(p) == 1
# int crypto_core_ristretto255_from_hash(unsigned char *p, const unsigned char *r);
@sodium_version(1, 0, 18)
def crypto_core_ristretto255_from_hash(r):
if len(r) != crypto_core_ristretto255_HASHBYTES: raise ValueError("Invalid parameter, must be {} bytes".format(crypto_core_ristretto255_HASHBYTES))
p = ctypes.create_string_buffer(crypto_core_ristretto255_BYTES)
__check(sodium.crypto_core_ristretto255_from_hash(p,r))
return p.raw
# int crypto_scalarmult_ristretto255(unsigned char *q, const unsigned char *n, const unsigned char *p);
@sodium_version(1, 0, 18)
def crypto_scalarmult_ristretto255(n, p):
if None in (n,p):
raise ValueError("invalid parameters")
if len(n) != crypto_core_ristretto255_SCALARBYTES: raise ValueError("truncated scalar")
if len(p) != crypto_core_ristretto255_BYTES: raise ValueError("truncated point")
buf = ctypes.create_string_buffer(crypto_core_ristretto255_BYTES)
__check(sodium.crypto_scalarmult_ristretto255(buf, n, p))
return buf.raw
# int crypto_scalarmult_ristretto255_base(unsigned char *q, const unsigned char *n);
@sodium_version(1, 0, 18)
def crypto_scalarmult_ristretto255_base(n):
if n is None:
raise ValueError("invalid parameters")
if len(n) != crypto_core_ristretto255_SCALARBYTES: raise ValueError("truncated scalar")
buf = ctypes.create_string_buffer(crypto_core_ristretto255_BYTES)
__check(sodium.crypto_scalarmult_ristretto255_base(buf, n))
return buf.raw
# void crypto_core_ristretto255_scalar_random(unsigned char *r);
@sodium_version(1, 0, 18)
def crypto_core_ristretto255_scalar_random():
r = ctypes.create_string_buffer(crypto_core_ristretto255_SCALARBYTES)
sodium.crypto_core_ristretto255_scalar_random(r)
return r.raw
# int crypto_core_ristretto255_scalar_invert(unsigned char *recip, const unsigned char *s);
@sodium_version(1, 0, 18)
def crypto_core_ristretto255_scalar_invert(s):
if not s or len(s)!=crypto_core_ristretto255_SCALARBYTES: raise ValueError("Invalid param, must be {} bytes".format(crypto_core_ristretto255_SCALARBYTES))
r = ctypes.create_string_buffer(crypto_core_ristretto255_SCALARBYTES)
__check(sodium.crypto_core_ristretto255_scalar_invert(r,s))
return r.raw
# void crypto_core_ristretto255_scalar_reduce(unsigned char *r, const unsigned char *s);
@sodium_version(1, 0, 18)
def crypto_core_ristretto255_scalar_reduce(s):
if not s or len(s)!=crypto_core_ristretto255_NONREDUCEDSCALARBYTES: raise ValueError("Invalid parameter: must be {} bytes".format(crypto_core_ristretto255_NONREDUCEDSCALARBYTES))
r = ctypes.create_string_buffer(crypto_core_ristretto255_SCALARBYTES)
| |
from zeropdk.layout import insert_shape
from zeropdk.layout.geometry import cross_prod, project, rotate90
import klayout.db as kdb
def box(point1, point3, ex, ey):
"""Returns a polygon of a box defined by point1, point3 and orientation ex.
p2 ----- p3
| |
p1 ----- p4
ex --->
"""
point2 = project(point3 - point1, ey, ex) * ey + point1
point4 = point1 + point3 - point2
return kdb.DSimplePolygon([point1, point2, point3, point4])
def layout_box(cell, layer, point1, point3, ex):
"""Lays out a box
Args:
point1: bottom-left point
point3: top-right point
"""
ey = rotate90(ex)
polygon = box(point1, point3, ex, ey)
insert_shape(cell, layer, polygon)
return polygon
def rectangle(center, width, height, ex, ey):
"""
returns the polygon of a rectangle centered at center,
aligned with ex, with width and height in microns
Args:
center: pya.DPoint (um units)
width (x axis): float (um units)
height (y axis): float (um unit)
ex: orientation of x axis
ey: orientation of y axis
"""
if cross_prod(ex, ey) == 0:
raise RuntimeError("ex={} and ey={} are not orthogonal.".format(repr(ex), repr(ey)))
point1 = center - width / 2 * ex - height / 2 * ey
point3 = center + width / 2 * ex + height / 2 * ey
return box(point1, point3, ex=ex, ey=ey)
def square(center, width, ex, ey):
"""
returns the polygon of a square centered at center,
aligned with ex, with width in microns
Args:
center: pya.DPoint (um units)
width: float (um units)
ex: orientation
"""
return rectangle(center, width, width, ex=ex, ey=ey)
def layout_square(cell, layer, center, width, ex=None):
"""Lays out a square in a layer
Args:
center: pya.DPoint (um units)
width: float (um units)
ex: orientation
"""
if ex is None:
ex = pya.DPoint(1, 0)
ey = rotate90(ex)
shape = square(center, width, ex, ey)
insert_shape(cell, layer, shape)
return shape
def layout_rectangle(cell, layer, center, width, height, ex):
"""Lays out a rectangle
Args:
center: pya.DPoint (um units)
width: float (um units)
height: float (um unit)
ex: orientation
"""
ey = rotate90(ex)
shape = rectangle(center, width, height, ex, ey)
insert_shape(cell, layer, shape)
return shape
# TODO: Reorganize later
pya = kdb
import numpy as np
from math import pi
def layout_path(cell, layer, point_iterator, w):
""" Simple wrapper for pya.DPath."""
path = pya.DPath(list(point_iterator), w, 0, 0).to_itype(cell.layout().dbu)
cell.shapes(layer).insert(pya.Path.from_dpath(path))
def layout_path_with_ends(cell, layer, point_iterator, w):
""" Simple wrapper for pya.DPath."""
dpath = pya.DPath(list(point_iterator), w, w / 2, w / 2)
cell.shapes(layer).insert(dpath)
def append_relative(points, *relative_vectors):
"""Appends to list of points in relative steps:
It takes a list of points, and adds new points to it in relative coordinates.
For example, if you call append_relative([A, B], C, D), the result will be [A, B, B+C, B+C+D].
"""
try:
if len(points) > 0:
origin = points[-1]
except TypeError:
raise TypeError("First argument must be a list of points")
for vector in relative_vectors:
points.append(origin + vector)
origin = points[-1]
return points
from zeropdk.layout.algorithms import sample_function
def layout_ring(cell, layer, center, r, w):
"""
function to produce the layout of a ring
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint
r: radius
w: waveguide width
units in microns
"""
# outer arc
# optimal sampling
assert r - w / 2 > 0
radius = r + w / 2
arc_function = lambda t: np.array([radius * np.cos(t), radius * np.sin(t)])
t, coords = sample_function(arc_function, [0, 2 * pi], tol=0.002 / radius)
# create original waveguide poligon prior to clipping and rotation
points_hull = [center + pya.DPoint(x, y) for x, y in zip(*coords)]
del points_hull[-1]
radius = r - w / 2
arc_function = lambda t: np.array([radius * np.cos(t), radius * np.sin(t)])
t, coords = sample_function(arc_function, [0, 2 * pi], tol=0.002 / radius)
# create original waveguide poligon prior to clipping and rotation
points_hole = [center + pya.DPoint(x, y) for x, y in zip(*coords)]
del points_hole[-1]
dpoly = pya.DPolygon(list(reversed(points_hull)))
dpoly.insert_hole(points_hole)
dpoly.compress(True)
insert_shape(cell, layer, dpoly)
return dpoly
def layout_circle(cell, layer, center, r,
ex=None,
x_bounds=(-np.inf, np.inf),
y_bounds=(-np.inf, np.inf)
):
"""
function to produce the layout of a filled circle
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint
r: radius
x_bounds and y_bounds relative to the center, before rotation by ex.
units in microns
optimal sampling
"""
arc_function = lambda t: np.array([r * np.cos(t), r * np.sin(t)])
t, coords = sample_function(arc_function, [0, 2 * np.pi - 0.001], tol=0.002 / r)
# dbu = cell.layout().dbu
dpolygon = pya.DSimplePolygon([pya.DPoint(x, y) for x, y in zip(*coords)])
# clip dpolygon to bounds
dpolygon.clip(x_bounds=x_bounds, y_bounds=y_bounds)
# Transform points (translation + rotation)
dpolygon.transform_and_rotate(center, ex)
dpolygon.compress(True)
insert_shape(cell, layer, dpolygon)
return dpolygon
layout_disk = layout_circle
def layout_donut(cell, layer, center, r1, r2):
"""Layout donut shape.
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint (not affected by ex)
r1: internal radius
r2: external radius
"""
assert r2 > r1
arc_function = lambda t: np.array([center.x + r2 * np.cos(t), center.y + r2 * np.sin(t)])
t, coords = sample_function(arc_function, [0, 2 * np.pi - 0.001], tol=0.002 / r2)
external_points = [pya.DPoint(x, y) for x, y in zip(*coords)]
arc_function = lambda t: np.array([center.x + r1 * np.cos(-t), center.y + r1 * np.sin(-t)])
t, coords = sample_function(arc_function, [0, 2 * np.pi - 0.001], tol=0.002 / r1)
internal_points = [pya.DPoint(x, y) for x, y in zip(*coords)]
dpoly = pya.DPolygon(external_points)
dpoly.insert_hole(internal_points)
insert_shape(cell, layer, dpoly)
return dpoly
def layout_section(
cell,
layer,
center,
r2,
theta_start,
theta_end,
ex=None,
x_bounds=(-np.inf, np.inf),
y_bounds=(-np.inf, np.inf),
):
"""Layout section of a circle.
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint (not affected by ex)
r2: radius
theta_start, theta_end: angle in radians
x_bounds and y_bounds relative to the center, before rotation by ex.
units in microns
returns a dpolygon
"""
assert r2 > 0
# optimal sampling
arc_function = lambda t: np.array([r2 * np.cos(t), r2 * np.sin(t)])
t, coords = sample_function(arc_function, [theta_start, theta_end], tol=0.002 / r2)
# # This yields a better polygon
if theta_end < theta_start:
theta_start, theta_end = theta_end, theta_start
coords = np.insert(
coords, 0, arc_function(theta_start - 0.001), axis=1
) # start the waveguide a little bit before
coords = np.append(
coords, np.atleast_2d(arc_function(theta_end + 0.001)).T, axis=1
) # finish the waveguide a little bit after
# create original waveguide poligon prior to clipping and rotation
dpoints_list = [pya.DPoint(x, y) for x, y in zip(*coords)]
dpolygon = pya.DSimplePolygon(dpoints_list + [pya.DPoint(0, 0)])
# clip dpolygon to bounds
dpolygon.clip(x_bounds=x_bounds, y_bounds=y_bounds)
# Transform points (translation + rotation)
dpolygon.transform_and_rotate(center, ex)
dpolygon.compress(True)
dpolygon.layout(cell, layer)
return dpolygon
def layout_arc(
cell,
layer,
center,
r,
w,
theta_start,
theta_end,
ex=None,
x_bounds=(-np.inf, np.inf),
y_bounds=(-np.inf, np.inf),
):
"""function to produce the layout of an arc
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint (not affected by ex)
r: radius
w: waveguide width
theta_start, theta_end: angle in radians
x_bounds and y_bounds relative to the center, before rotation by ex.
units in microns
returns a dpolygon
"""
# fetch the database parameters
if r <= 0:
raise RuntimeError(f"Please give me a positive radius. Bad r={r}")
# optimal sampling
if theta_end < theta_start:
theta_start, theta_end = theta_end, theta_start
arc_function = lambda t: np.array([r * np.cos(t), r * np.sin(t)])
t, coords = sample_function(arc_function, [theta_start, theta_end], tol=0.002 / r)
dt = 0.0001
# # This yields a better polygon
insert_at = np.argmax(theta_start + dt < t)
t = np.insert(t, insert_at, theta_start + dt)
coords = np.insert(
coords, insert_at, arc_function(theta_start + dt), axis=1
) # start the second point a little bit after the first
insert_at = np.argmax(theta_end - dt < t)
t = np.insert(t, insert_at, theta_end - dt)
coords = np.insert(
coords, insert_at, arc_function(theta_end - dt), axis=1
) # start the second to last point a little bit before the final
# create original waveguide poligon prior to clipping and rotation
dpoints_list = [pya.DPoint(x, y) for x, y in zip(*coords)]
from zeropdk.layout import waveguide_dpolygon
dpolygon = waveguide_dpolygon(dpoints_list, w, cell.layout().dbu)
# clip dpolygon to bounds
dpolygon.clip(x_bounds=x_bounds, y_bounds=y_bounds)
# Transform points (translation + rotation)
dpolygon.transform_and_rotate(center, ex)
dpolygon.compress(True)
dpolygon.layout(cell, layer)
return dpolygon
def layout_arc_degree(
cell,
layer,
center,
r,
w,
theta_start,
theta_end,
ex=None,
x_bounds=(-np.inf, | |
<reponame>arcticmatter/pipresents-beep
"""
HTML parser
"""
import os
import webbrowser
import tkinter as tk
from tkinter import font
from copy import deepcopy
from PIL import Image, ImageTk
from html.parser import HTMLParser
from collections import OrderedDict
#__________________________________________________________________________________________________
class Defs():
DEFAULT_TEXT_FONT_FAMILY = ("Segoe ui", "Calibri", "Helvetica", "TkTextFont")
FONT_SIZE = 9
PREFORMATTED_FONT_FAMILY = ("Courier", "DejaVu Sans Mono", "TkFixedFont")
HEADINGS_FONT_SIZE = {
'h1':21,
'h2':16,
'h3':12,
'h4':11,
'h5':8,
'h6':7,
}
class HTML:
#----------------------------------------------------------------------------------------------
"""
List of supported HTML tags and attrs
"""
class Tag():
BR = 'br'
UL = 'ul'
OL = 'ol'
LI = 'li'
IMG = 'img'
A = 'a'
B = 'b'
STRONG = 'strong'
I = 'i'
EM = 'em'
U = 'u'
MARK = 'mark'
SPAN = 'span'
DIV = 'div'
P = 'p'
PRE = 'pre'
CODE = 'code'
H1 = 'h1'
H2 = 'h2'
H3 = 'h3'
H4 = 'h4'
H5 = 'h5'
H6 = 'h6'
class Attrs():
STYLE = 'style'
HREF ='href'
SRC = 'src'
WIDTH = 'width'
HEIGHT = 'height'
TYPE = 'type'
class TypeOrderedList():
_1 = '1'
a = 'a'
A = 'A'
class Style():
COLOR = 'color'
BACKGROUD_COLOR = 'background-color'
FONT_FAMILY = 'font-family'
FONT_SIZE = 'font-size'
TEXT_ALIGN = 'text-align'
TEXT_DECORATION = 'text-decoration'
class StyleTextDecoration():
UNDERLINE = 'underline'
LINE_THROUGH = 'line-through'
HEADING_TAGS = (
Tag.H1,
Tag.H2,
Tag.H3,
Tag.H4,
Tag.H5,
Tag.H6,
)
TEXT_ALIGN_TAGS = HEADING_TAGS + (
Tag.UL,
Tag.OL,
Tag.LI,
Tag.DIV,
Tag.P,
Tag.PRE,
Tag.CODE,
)
NEW_LINE_TAGS = HEADING_TAGS + (
Tag.UL,
Tag.OL,
Tag.DIV,
Tag.P,
Tag.PRE,
Tag.CODE,
)
STYLE_TAGS = TEXT_ALIGN_TAGS + (
Tag.A,
Tag.B,
Tag.STRONG,
Tag.I,
Tag.EM,
Tag.U,
Tag.MARK,
Tag.SPAN,
)
#--------------------------------------------------------------------------------------------------
# Text widget defs
class WCfg():
KEY = "config"
BACKGROUND = "background"
FOREGROUND = "foreground"
JUSTIFY = "justify"
TABS = "tabs"
class Fnt():
KEY = "font"
FAMILY = "family"
SIZE = "size"
WEIGHT = "weight"
SLANT = "slant"
UNDERLINE = "underline"
OVERSTRIKE = "overstrike"
class Bind():
KEY = "bind"
LINK = "link"
IMAGE = "image"
class WTag():
START_INDEX = "start_index"
END_INDEX = "end_index"
DEFAULT_STACK = {
WCfg.KEY:{
WCfg.BACKGROUND:[],
WCfg.FOREGROUND:[("__DEFAULT__", "black")],
WCfg.JUSTIFY:[("__DEFAULT__", 'left')],
WCfg.TABS:[("__DEFAULT__", ())],
},
Fnt.KEY:{
Fnt.FAMILY:[],
Fnt.SIZE:[("__DEFAULT__", Defs.FONT_SIZE)],
Fnt.WEIGHT:[("__DEFAULT__", 'normal')],
Fnt.SLANT:[("__DEFAULT__", 'roman')],
Fnt.UNDERLINE:[("__DEFAULT__", False)],
Fnt.OVERSTRIKE:[("__DEFAULT__", False)],
},
Bind.KEY:{
Bind.LINK:[("__DEFAULT__", None)],
},
}
#__________________________________________________________________________________________________
# functions
def get_existing_font(font_families):
#------------------------------------------------------------------------------------------
try:
return next(filter(lambda f: f.lower() in (f.lower() for f in font.families()), font_families))
except:
return "TkTextFont"
#__________________________________________________________________________________________________
# classes
class HLinkSlot():
#----------------------------------------------------------------------------------------------
def __init__(self, w, tag_name, url):
#------------------------------------------------------------------------------------------
self._w = w
self.tag_name = tag_name
self.URL = url
def call(self, event):
#------------------------------------------------------------------------------------------
webbrowser.open(self.URL)
self._w.tag_config(self.tag_name, foreground="purple")
def enter(self, event):
#------------------------------------------------------------------------------------------
self._w.config(cursor="hand2")
def leave(self, event):
#------------------------------------------------------------------------------------------
self._w.config(cursor="")
class ListTag():
#----------------------------------------------------------------------------------------------
def __init__(self, ordered:bool, list_type=None):
#------------------------------------------------------------------------------------------
self.ordered = ordered
self.type = list_type
self.index = 0
def add(self):
#------------------------------------------------------------------------------------------
if self.ordered:
self.index += 1
def line_index(self):
#------------------------------------------------------------------------------------------
if self.ordered:
if self.type == HTML.TypeOrderedList._1:
return str(self.index)
elif self.type == HTML.TypeOrderedList.a:
return self._index_to_str(self.index).lower()
elif self.type == HTML.TypeOrderedList.A:
return self._index_to_str(self.index).upper()
else:
return chr(8226)
def _index_to_str(self, index):
#------------------------------------------------------------------------------------------
prefix = ""
if index > 26:
prefix = self._index_to_str(index // 26)
index = index % 26
return prefix + chr(0x60 + index)
class HTMLTextParser(HTMLParser):
#----------------------------------------------------------------------------------------------
def __init__(self):
#------------------------------------------------------------------------------------------
super().__init__()
# set list tabs
self.cached_images = {}
self.DEFAULT_TEXT_FONT_FAMILY = get_existing_font(Defs.DEFAULT_TEXT_FONT_FAMILY)
self.PREFORMATTED_FONT_FAMILY = get_existing_font(Defs.PREFORMATTED_FONT_FAMILY)
def _parse_attrs(self, attrs):
#------------------------------------------------------------------------------------------
attrs_dict = {
HTML.Attrs.STYLE:{},
HTML.Attrs.HREF:None,
HTML.Attrs.SRC:None,
HTML.Attrs.WIDTH:None,
HTML.Attrs.HEIGHT:None,
HTML.Attrs.TYPE:None,
}
for k, v in attrs:
k = k.lower()
if k == HTML.Attrs.STYLE:
for p in v.split(";"):
try:
p_key = p.split(":")[0].strip().lower()
p_value = p.split(":")[1].strip().lower()
attrs_dict[HTML.Attrs.STYLE][p_key] = p_value
except:
pass
elif k in (HTML.Attrs.HREF, HTML.Attrs.SRC, HTML.Attrs.WIDTH, HTML.Attrs.HEIGHT, HTML.Attrs.TYPE):
attrs_dict[k] = v
return attrs_dict
def _w_tags_add(self):
#------------------------------------------------------------------------------------------
tag = {
WCfg.KEY:{},
Fnt.KEY:{},
Bind.KEY:{}
}
for k1 in (WCfg.KEY, Fnt.KEY, Bind.KEY):
for k2 in DEFAULT_STACK[k1]:
tag[k1][k2] = self.stack[k1][k2][-1][1]
self._w_tags[self._w.index("end-1c")] = tag
def _stack_get_main_key(self, key):
#------------------------------------------------------------------------------------------
if key in WCfg.__dict__.values():
main_key = WCfg.KEY
elif key in Fnt.__dict__.values():
main_key = Fnt.KEY
elif key in Bind.__dict__.values():
main_key = Bind.KEY
else:
raise ValueError("key %s doesn't exists" % key)
return main_key
def _stack_add(self, tag, key, value=None):
#------------------------------------------------------------------------------------------
main_key = self._stack_get_main_key(key)
if value is None:
# if value is none, add the previous value
value = self.stack[main_key][key][-1][1]
self.stack[main_key][key].append((tag, value))
def _stack_index(self, tag, key):
#------------------------------------------------------------------------------------------
main_key = self._stack_get_main_key(key)
index = None
for i, v in enumerate(self.stack[main_key][key]):
if v[0] == tag:
index = i
return index
def _stack_pop(self, tag, key):
#------------------------------------------------------------------------------------------
main_key = self._stack_get_main_key(key)
index = None
if len(self.stack[main_key][key]) > 1:
index = self._stack_index(tag, key)
if index is not None:
return self.stack[main_key][key].pop(index)[1]
def _parse_styles(self, tag, attrs):
#------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------- [ COLOR ]
if HTML.Style.COLOR in attrs[HTML.Attrs.STYLE].keys():
self._stack_add(tag, WCfg.FOREGROUND, attrs[HTML.Attrs.STYLE][HTML.Style.COLOR])
elif tag == HTML.Tag.A and attrs[HTML.Attrs.HREF]:
self._stack_add(tag, WCfg.FOREGROUND, "blue")
else:
self._stack_add(tag, WCfg.FOREGROUND)
#---------------------------------------------------------------------- [ BACKGROUD_COLOR ]
if HTML.Style.BACKGROUD_COLOR in attrs[HTML.Attrs.STYLE].keys():
self._stack_add(tag, WCfg.BACKGROUND, attrs[HTML.Attrs.STYLE][HTML.Style.BACKGROUD_COLOR])
elif tag == HTML.Tag.MARK:
self._stack_add(tag, WCfg.BACKGROUND, "yellow")
else:
self._stack_add(tag, WCfg.BACKGROUND)
#-------------------------------------------------------------------------- [ FONT_FAMILY ]
# font family
if HTML.Style.FONT_FAMILY in attrs[HTML.Attrs.STYLE].keys():
font_family = Defs.DEFAULT_TEXT_FONT_FAMILY
for f in attrs[HTML.Attrs.STYLE][HTML.Style.FONT_FAMILY].split(","):
f = f.strip()
if f in map(lambda f: f.lower(), font.families()):
font_family = f
break
self._stack_add(tag, Fnt.FAMILY, font_family)
elif tag in (HTML.Tag.PRE, HTML.Tag.CODE):
self._stack_add(tag, Fnt.FAMILY, self.PREFORMATTED_FONT_FAMILY)
else:
self._stack_add(tag, Fnt.FAMILY)
#---------------------------------------------------------------------------- [ FONT_SIZE ]
if HTML.Style.FONT_SIZE in attrs[HTML.Attrs.STYLE].keys():
size = Defs.FONT_SIZE
if attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE].endswith("px"):
if attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE][:-2].isdigit():
size = int(attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE][:-2])
elif attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE].endswith(r"%"):
if attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE][:-1].isdigit():
size = int((int(attrs[HTML.Attrs.STYLE][HTML.Style.FONT_SIZE][:-1]) * Defs.FONT_SIZE)/100)
self._stack_add(tag, Fnt.SIZE, size)
elif tag.startswith('h') and len(tag) == 2:
self._stack_add(tag, Fnt.SIZE, Defs.HEADINGS_FONT_SIZE[tag])
else:
self._stack_add(tag, Fnt.SIZE)
#--------------------------------------------------------------------------- [ TEXT_ALIGN ]
if HTML.Style.TEXT_ALIGN in attrs[HTML.Attrs.STYLE].keys() and tag in HTML.TEXT_ALIGN_TAGS:
self._stack_add(tag, WCfg.JUSTIFY, attrs[HTML.Attrs.STYLE][HTML.Style.TEXT_ALIGN])
else:
self._stack_add(tag, WCfg.JUSTIFY)
#---------------------------------------------------------------------- [ TEXT_DECORATION ]
if HTML.Style.TEXT_DECORATION in attrs[HTML.Attrs.STYLE].keys():
if tag == HTML.Tag.STRONG:
self._stack_add(tag, Fnt.UNDERLINE, False)
self._stack_add(tag, Fnt.OVERSTRIKE, False)
elif HTML.StyleTextDecoration.UNDERLINE in attrs[HTML.Attrs.STYLE][HTML.Style.TEXT_DECORATION]:
self._stack_add(tag, Fnt.UNDERLINE, True)
self._stack_add(tag, Fnt.OVERSTRIKE, False)
elif HTML.StyleTextDecoration.LINE_THROUGH in attrs[HTML.Attrs.STYLE][HTML.Style.TEXT_DECORATION]:
self._stack_add(tag, Fnt.UNDERLINE, False)
self._stack_add(tag, Fnt.OVERSTRIKE, True)
else:
self._stack_add(tag, Fnt.UNDERLINE)
self._stack_add(tag, Fnt.OVERSTRIKE)
else:
if tag == HTML.Tag.A and attrs[HTML.Attrs.HREF]:
self._stack_add(tag, Fnt.UNDERLINE, True)
self._stack_add(tag, Fnt.OVERSTRIKE, False)
elif tag == HTML.Tag.U:
self._stack_add(tag, Fnt.UNDERLINE, True)
self._stack_add(tag, Fnt.OVERSTRIKE, False)
else:
self._stack_add(tag, Fnt.UNDERLINE)
self._stack_add(tag, Fnt.OVERSTRIKE)
def handle_starttag(self, tag, attrs):
#------------------------------------------------------------------------------------------
tag = tag.lower()
attrs = self._parse_attrs(attrs)
if tag in HTML.STYLE_TAGS:
#---------------------------------------------------------------------- [ STYLED_TAGS ]
self._parse_styles(tag, attrs)
if tag == HTML.Tag.B or tag == HTML.Tag.STRONG or tag in HTML.HEADING_TAGS:
self._stack_add(tag, Fnt.WEIGHT, "bold")
elif tag == HTML.Tag.I or tag == HTML.Tag.EM:
self._stack_add(tag, Fnt.SLANT, "italic")
elif tag == HTML.Tag.A:
self._stack_add(tag, Bind.LINK, attrs[HTML.Attrs.HREF])
elif tag == HTML.Tag.OL:
#---------------------------------------------------------------- [ ORDERED_LISTS ]
if attrs[HTML.Attrs.TYPE] and attrs[HTML.Attrs.TYPE] in HTML.TypeOrderedList.__dict__.values():
list_type = attrs[HTML.Attrs.TYPE]
else:
list_type = HTML.TypeOrderedList._1
self.list_tags.append(ListTag(ordered=True, list_type=list_type))
tabs = []
for i in range(len(self.list_tags)):
offset = 30 * (i + 1)
tabs += [offset, tk.RIGHT, offset+5, tk.LEFT ]
self._stack_add(tag, WCfg.TABS, tabs)
elif tag == HTML.Tag.UL:
#-------------------------------------------------------------- [ UNORDERED_LISTS ]
self.list_tags.append(ListTag(ordered=False))
tabs = []
for i in range(len(self.list_tags)):
offset = 30 * (i + 1)
tabs += [offset, tk.RIGHT, offset+5, tk.LEFT ]
self._stack_add(tag, WCfg.TABS, tabs)
elif tag == HTML.Tag.LI:
#------------------------------------------------------------------ [ LISTS_LINES ]
level = len(self.list_tags)
if level:
self.list_tags[-1].add()
if self.strip:
self._insert_new_line()
line_index = self.list_tags[-1].line_index()
if self.list_tags[-1].ordered:
line_index = "\t" + "\t\t" * (level-1) + line_index + ".\t"
else:
line_index = "\t" + "\t\t" * (level-1) + line_index + "\t"
self._stack_add(tag, Fnt.UNDERLINE, False)
self._stack_add(tag, Fnt.OVERSTRIKE, False)
self._w_tags_add()
self._w.insert(tk.INSERT, line_index)
self._stack_pop(tag, Fnt.UNDERLINE)
self._stack_pop(tag, Fnt.OVERSTRIKE)
elif tag == HTML.Tag.IMG and attrs[HTML.Attrs.SRC]:
#-------------------------------------------------------------------- [ UNSTYLED_TAGS ]
image = None
#krt add PP relative paths
if attrs[HTML.Attrs.SRC] in self.cached_images.keys():
image = deepcopy(self.cached_images[attrs[HTML.Attrs.SRC]])
elif os.path.exists(self.complete_path(attrs[HTML.Attrs.SRC])):
image = Image.open(self.complete_path(attrs[HTML.Attrs.SRC]))
self.cached_images[attrs[HTML.Attrs.SRC]] = deepcopy(image)
if image:
width = image.size[0]
height = image.size[1]
resize = False
if str(attrs[HTML.Attrs.WIDTH]).isdigit():
width = int(attrs[HTML.Attrs.WIDTH])
resize = True
if str(attrs[HTML.Attrs.HEIGHT]).isdigit():
height = int(attrs[HTML.Attrs.HEIGHT])
resize = True
if resize:
image = image.resize((width, height), Image.ANTIALIAS)
self.images.append(ImageTk.PhotoImage(image))
self._w.image_create(tk.INSERT, image=self.images[-1])
if self.strip:
#------------------------------------------------------------------------ [ NEW_LINES ]
if tag == HTML.Tag.BR:
self._insert_new_line()
else:
self.html_tags.append(tag)
if tag in HTML.NEW_LINE_TAGS and self.strip and self._w.index("end-1c") != "1.0":
if tag in (HTML.Tag.DIV,):
self._insert_new_line()
elif tag in (HTML.Tag.UL, HTML.Tag.OL):
if len(self.list_tags) == 1:
self._insert_new_line(double=True)
else:
self._insert_new_line(double=False)
else:
self._insert_new_line(double=True)
self._w_tags_add()
def handle_charref(self, data):
#------------------------------------------------------------------------------------------
try:
char = chr(int(data))
self._w.insert(tk.INSERT, char)
except:
pass
def _insert_new_line(self, double=False):
#------------------------------------------------------------------------------------------
self._remove_last_space()
if self._w.get("end-3c", "end-1c") == "\n\n":
pass
elif self._w.get("end-2c", "end-1c") == "\n":
if double:
self._w.insert(tk.INSERT, "\n")
else:
if double:
self._w.insert(tk.INSERT, "\n\n")
else:
self._w.insert(tk.INSERT, "\n")
def _text_rstrip(self):
#------------------------------------------------------------------------------------------
for _ in range(3):
if self._w.get("end-2c", "end-1c") in (" ", "\n"):
self._w.delete("end-2c", "end-1c")
def _remove_last_space(self):
#------------------------------------------------------------------------------------------
if self._w.get("end-2c", "end-1c") == " ":
self._w.delete("end-2c", "end-1c")
def _remove_multi_spaces(self, | |
'''IndexedFasta.py - fast random access in fasta files
===================================================
This module provides fast random access to :term:`fasta` formatted
files that have been previously indexed. The indexing can be done
either through the samtools faidx tool (accessible through pysam_) or
using the in-house methods implemented in this module.
The main class is :class:`IndexedFasta`. This is a factory function
that provides transparent access to both samtools or CGAT indexed
fasta files. The basic usage to retrieve the sequence spanning the
region chr12:10,000-10,100 is::
from IndexedFasta import IndexedFasta
fasta = IndexedFasta("hg19")
fasta.getSequence("chr12", "+", 10000, 10100)
To index a file, use the :mod:`scripts/index_fasta` command line utility or the
:func:`createDatabase` function::
> python index_fasta.py hg19 chr*.fa
This module has some useful utility functions:
:func:`splitFasta`
split a :term:`fasta` formatted file into smaller pieces.
:func:`parseCoordinates`
parse a coordinate string in various formats
but otherwise the module contains a multitude of additional functions that are
only of internal use.
Reference
----------
'''
import os
import sys
import array
import string
import re
import struct
import math
import tarfile
import random
import zlib
import gzip
import tempfile
import io
from CGAT import Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Genomics as Genomics
from CGAT.AString import AString
import pysam
from future.moves import dbm
from six import StringIO
IS_PY3 = sys.version_info.major >= 3
class Uncompressor:
def __init__(self, filename, unmangler):
self.mFile = open(filename, "rb")
self.mUnMangler = unmangler
def read(self, block_size, indices, start, end):
"""read an uncompressed block from start:end.
The compressed chunk starts at first_pos.
NOTE: This is poorly implemented - do better.
"""
# skip over uncompressed blocks
d = int(math.floor(float(start) / block_size))
r = start % block_size
assert(d < len(indices))
self.mFile.seek(indices[d])
# read x bytes of compressed data, at least one full chunk.
nchunks = int(math.ceil(float((r + end - start)) / block_size))
fragments = []
for x in range(d, d + nchunks):
s = self.mFile.read(indices[x + 1] - indices[x])
fragments.append(self.mUnMangler(s))
u = "".join(fragments)
assert len(u) >= end - start, \
"fragment smaller than requested size: %i > %i-%i=%i" %\
(len(u), end, start, end - start)
return u[r:r + end - start]
def writeFragments(outfile_fasta,
outfile_index,
fragments,
mangler, size,
write_all=False):
"""write mangled fragments to *outfile_fasta* in chunks of *size*
updating *outfile_index*.
returns part of last fragment that has not been written and is
less than *size* and the number of fragments output.
If *write_all* is True, all of the fragments are written to
the file and the last file position is added to *outfile_index*
as well.
"""
s = "".join(fragments)
rest = len(s) % size
if len(s) > size:
for x in range(0, len(s) - rest, size):
outfile_index.write("\t%i" % outfile_fasta.tell())
outfile_fasta.write(mangler(s[x:x + size]))
if rest:
if write_all:
outfile_index.write("\t%i" % outfile_fasta.tell())
outfile_fasta.write(mangler(s[-rest:]))
outfile_index.write("\t%i" % outfile_fasta.tell())
return ""
else:
return s[-rest:]
else:
return ""
def gzip_mangler(s):
xfile = StringIO()
gzipfile = gzip.GzipFile(fileobj=xfile, mode="wb")
gzipfile.write(s)
gzipfile.close()
m = xfile.getvalue()
xfile.close()
# write identifier
return m
def gzip_demangler(s):
if sys.version_info.major >= 3:
gzipfile = io.TextIOWrapper(
gzip.GzipFile(fileobj=io.BytesIO(s), mode="rb"),
encoding="ascii")
else:
gzipfile = gzip.GzipFile(fileobj=io.BytesIO(s), mode="rb")
m = gzipfile.readline()
return m
class Translator:
"""translate a sequence."""
def __init__(self):
self.mRegEx = re.compile(" +")
def __call__(self, sequence):
return "".join(self.mMapScore2Char[self.mMapScore2Score[int(x)]]
for x in self.mRegEx.split(sequence.strip()))
def translate(self, sequence):
raise NotImplementedError("translate not implemented.")
class TranslatorPhred(Translator):
"""translate phred quality scores."""
def __init__(self, *args, **kwargs):
Translator.__init__(self, *args, **kwargs)
self.mMapScore2Char = [chr(33 + x) for x in range(0, 93)]
self.mMapScore2Score = list(range(0, 93))
def translate(self, sequence):
return array.array("I", (ord(x) - 33 for x in sequence))
class TranslatorSolexa(Translator):
"""translate solexa quality scores."""
def __init__(self, *args, **kwargs):
Translator.__init__(self, *args, **kwargs)
self.mMapScore2Char = [chr(64 + x) for x in range(0, 128)]
self.mMapScore2Score = [
int(10.0 * math.log(1.0 + 10 ** (x / 10.0)) / math.log(10) + .499)
for x in range(-64, 65)]
def translate(self, sequence):
raise NotImplementedError("translate not implemented.")
# return array.array("i", (ord(x) - 64))
class TranslatorRange200(Translator):
"""translate pcap quality scores.
For example for PCAP scores.
These scores range from 0 to 100 and are the
"a weighted sum of input base quality values
(Huang and Madan 1999)
The numerical values from 0 to 200 are stored
as values form 33 to 233
"
"""
def __init__(self, *args, **kwargs):
Translator.__init__(self, *args, **kwargs)
self.mMapScore2Char = [chr(33 + x) for x in range(0, 200)]
def __call__(self, sequence):
try:
return "".join(self.mMapScore2Char[int(x)]
for x in self.mRegEx.split(sequence.strip()))
except ValueError as msg:
raise ValueError(msg + " parsing error in fragment: %s" % sequence)
def translate(self, sequence):
return array.array("I", (ord(x) - 33 for x in sequence))
class TranslatorBytes(Translator):
"""output binary values as bytes permitting values from 0 to 255
Note the resulting file will not be iterable as newline is not
a record-separator any more.
"""
def __init__(self, *args, **kwargs):
Translator.__init__(self, *args, **kwargs)
def __call__(self, sequence):
try:
return "".join(chr(int(x)) for x in
self.mRegEx.split(sequence.strip()))
except ValueError as msg:
print("parsing error in line: %s" % sequence)
print("message=%s" % str(msg))
return ""
def translate(self, sequence):
return array.array("I", (ord(x) for x in sequence))
class MultipleFastaIterator:
def __init__(self,
filenames,
regex_identifier=None,
format="auto"):
if isinstance(filenames, str):
self.filenames = [filenames]
else:
self.filenames = filenames
self.regexIdentifier = regex_identifier
self.iterator = self._iterate()
self.format = format
def __iter__(self):
return self
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
return None
def next(self):
try:
return next(self.iterator)
except StopIteration:
return None
def _iterate(self):
"""iterate over muliple files."""
def _iter(infile):
identifier = None
is_new = False
for line in infile:
if line.startswith("#"):
continue
if line.startswith(">"):
if self.regexIdentifier:
try:
identifier = re.search(
self.regexIdentifier, line[1:-1]).groups()[0]
except AttributeError:
raise ValueError(
"could not parse identifier from line %s "
"- check the input" % line[1:-1])
else:
identifier = re.split("\s", line[1:-1])[0]
is_new = True
else:
if not identifier:
raise ValueError(
"refusing to emit sequence without identifier "
"- check the input")
yield is_new, identifier, line.strip()
is_new = False
for filename in self.filenames:
if self.format == "tar.gz" or self.format == "tar" or \
(self.format == "auto" and filename.endswith("tar.gz")):
if filename == "-":
if IS_PY3:
tf = tarfile.open(fileobj=sys.stdin.buffer, mode="r|*")
else:
tf = tarfile.open(fileobj=sys.stdin, mode="r|*")
else:
tf = tarfile.open(filename, mode="r")
for f in tf:
b, ext = os.path.splitext(f.name)
if ext.lower() in (".fasta", ".fa"):
E.info("extracting %s" % f.name)
if sys.version_info.major >= 3:
infile = io.TextIOWrapper(
tf.extractfile(f),
encoding="ascii")
else:
infile = tf.extractfile(f)
for x in _iter(infile):
yield x
else:
E.info("skipping %s" % f.name)
if tf != sys.stdin:
tf.close()
continue
elif self.format == "fasta.gz" or (self.format == "auto" and
filename.endswith(".gz")):
infile = IOTools.openFile(filename, "r")
elif filename == "-":
infile = sys.stdin
else:
infile = IOTools.openFile(filename, "r")
for x in _iter(infile):
yield x
if filename != "-":
infile.close()
raise StopIteration
def createDatabase(db, iterator,
force=False,
synonyms=None,
compression=None,
random_access_points=None,
regex_identifier=None,
clean_sequence=False,
ignore_duplicates=False,
allow_duplicates=False,
translator=None):
"""index files in filenames to create database.
Two new files are created - db.fasta and db_name.idx
If compression is enabled, provide random access points
every # bytes.
Dictzip is treated as an uncompressed file.
regex_identifier: pattern to extract identifier from description line.
If None, the part until the first white-space character is used.
translator: specify a translator
"""
if db.endswith(".fasta"):
db = db[:-len(".fasta")]
if compression:
if compression == "lzo":
import lzo
def lzo_mangler(s):
return lzo.compress(s, 9)
mangler = lzo_mangler
db_name = db + ".lzo"
write_chunks = True
elif compression == "zlib":
def zlib_mangler(s):
return zlib.compress(s, 9)
mangler = zlib_mangler
db_name = db + ".zlib"
write_chunks = True
elif compression == "gzip":
mangler = gzip_mangler
db_name = db + ".gz"
write_chunks = True
elif compression == "dictzip":
from . import dictzip
def mangler(x):
return x
db_name = db + ".dz"
write_chunks = False
elif compression == "bzip2":
import bz2
def bzip_mangler(x):
return bz2.compress(x, 9)
mangler = bzip_mangler
db_name = db + ".bz2"
write_chunks = True
elif compression == "debug":
def mangler(x):
return x
db_name = db + ".debug"
write_chunks = True
elif compression == "rle":
from . import RLE
mangler = RLE.compress
db_name = db + ".rle"
write_chunks = True
else:
raise ValueError("unknown compression library: %s" % compression)
index_name = db + ".cdx"
if write_chunks and random_access_points is None \
or random_access_points <= 0:
raise ValueError("specify chunksize in --random-access-points")
else:
def mangler(x):
return x
db_name = db + ".fasta"
write_chunks = False
index_name = db + ".idx"
if os.path.exists(db_name) and not force:
raise ValueError("database %s already exists." % db_name)
if os.path.exists(index_name) and not force:
raise ValueError("database index %s already exists." % index_name)
outfile_index | |
g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTopClientIpSumInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTopClientIpSumInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTopClientIpSumInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPullStreamStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPullStreamStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyPullStreamStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveRecordTemplates(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveRecordTemplatesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveRecordTemplates(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyLiveDomainCert(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyLiveDomainCertRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyLiveDomainCert(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVisitTopSumInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVisitTopSumInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVisitTopSumInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyLiveRecordTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyLiveRecordTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyLiveRecordTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveDomainCert(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveDomainCertRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveDomainCert(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAddLiveWatermark(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AddLiveWatermarkRequest()
model.from_json_string(json.dumps(args))
rsp = client.AddLiveWatermark(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAreaBillBandwidthAndFluxList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAreaBillBandwidthAndFluxListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAreaBillBandwidthAndFluxList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLiveWatermarkRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLiveWatermarkRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLiveWatermarkRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDeliverBandwidthList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDeliverBandwidthListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDeliverBandwidthList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLiveCallbackRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLiveCallbackRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLiveCallbackRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPullStreamConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPullStreamConfigRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyPullStreamConfig(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveSnapshotTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveSnapshotTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveSnapshotTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveStreamOnlineList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveStreamOnlineListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveStreamOnlineList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLiveCallbackTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLiveCallbackTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLiveCallbackTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLivePushAuthKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLivePushAuthKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLivePushAuthKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeletePullStreamConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], | |
self.get_nhg_id('group2')
# Test scenario:
# - update the route created in `test_remove_referenced_nhg` to own the NHG with the same details as the
# previous one and assert a new NHG and 2 new NHGMs are added
# - update the route to point back to the original NHG and assert the routeOrch's owned NHG is deleted
def routeorch_nhgorch_interop_test():
rt_id = self.get_route_id('2.2.2.0/24')
assert rt_id is not None
# Update the route with routeOrch's owned next hop group
nhgid = self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID']
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'),
('ifname', 'Ethernet0,Ethernet4')])
self.rt_ps.set('2.2.2.0/24', fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 2)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 4)
# Assert the next hop group ID changed
time.sleep(1)
assert self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID'] != nhgid
nhgid = self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID']
# Update the route to point back to group2
fvs = swsscommon.FieldValuePairs([('nexthop_group', 'group2')])
self.rt_ps.set('2.2.2.0/24', fvs)
# The routeOrch's owned next hop group should get deleted
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2)
# Assert the route points back to group2
assert self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID'] != nhgid
# Test scenario:
# - create a new NHG with the same details as the previous NHG and assert a new NHG and 2 new NHGMs are created
# - update the route to point to the new NHG and assert its SAI NHG ID changes
def identical_nhgs_test():
rt_id = self.get_route_id('2.2.2.0/24')
assert rt_id is not None
# Create a new group with the same members as group2
nhgid = self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID']
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'),
('ifname', 'Ethernet0,Ethernet4')])
self.nhg_ps.set("group1", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 2)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 4)
# Update the route to point to the new group
fvs = swsscommon.FieldValuePairs([('nexthop_group', 'group1')])
self.rt_ps.set('2.2.2.0/24', fvs)
time.sleep(1)
# Assert the next hop group ID changed
assert self.asic_db.get_entry(self.ASIC_RT_STR, rt_id)['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID'] != nhgid
# Test scenario:
# - create a route referencing a NHG that does not exist and assert it is not created
def create_route_inexistent_nhg_test():
# Add a route with a NHG that does not exist
fvs = swsscommon.FieldValuePairs([('nexthop_group', 'group3')])
self.rt_ps.set('2.2.3.0/24', fvs)
time.sleep(1)
assert self.get_route_id('2.2.3.0/24') is None
# Remove the pending route
self.rt_ps._del('2.2.3.0/24')
self.init_test(dvs, 3)
remove_inexistent_nhg_test()
nhg_members_validation_test()
remove_referenced_nhg_test()
routeorch_nhgorch_interop_test()
identical_nhgs_test()
create_route_inexistent_nhg_test()
# Cleanup
# Remove the route
self.rt_ps._del('2.2.2.0/24')
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count)
# Remove the groups
self.nhg_ps._del('group1')
self.nhg_ps._del('group2')
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count)
def test_nhgorch_nh_group(self, dvs, testlog):
# Test scenario:
# - create NHG 'group1' and assert it is being added to ASIC DB along with its members
def create_nhg_test():
# create next hop group in APPL DB
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3,10.0.0.5'),
("ifname", "Ethernet0,Ethernet4,Ethernet8")])
self.nhg_ps.set("group1", fvs)
# check if group was propagated to ASIC DB
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
assert self.nhg_exists('group1')
# check if members were propagated to ASIC DB
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 3)
assert len(self.get_nhgm_ids('group1')) == 3
# Test scenario:
# - create a route pointing to `group1` and assert it is being added to ASIC DB and pointing to its SAI ID
# - delete the route and assert it is being removed
def create_route_nhg_test():
# create route in APPL DB
fvs = swsscommon.FieldValuePairs([("nexthop_group", "group1")])
self.rt_ps.set("2.2.2.0/24", fvs)
# check if route was propagated to ASIC DB
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count + 1)
k = self.get_route_id('2.2.2.0/24')
assert k is not None
# assert the route points to next hop group
fvs = self.asic_db.get_entry(self.ASIC_RT_STR, k)
assert fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] == self.get_nhg_id('group1')
# Remove route 2.2.2.0/24
self.rt_ps._del("2.2.2.0/24")
self.asic_db.wait_for_n_keys(self.ASIC_RT_STR, self.asic_rts_count)
# Test scenario:
# - bring the links down one by one and assert the group1's members are subsequently removed and the group
# still exists
# - bring the liks up one by one and assert the group1's members are subsequently added back
def link_flap_test():
# bring links down one-by-one
for i in [0, 1, 2]:
self.flap_intf(i, 'down')
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2 - i)
assert len(self.get_nhgm_ids('group1')) == 2 - i
assert self.nhg_exists('group1')
# bring links up one-by-one
for i in [0, 1, 2]:
self.flap_intf(i, 'up')
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + i + 1)
assert len(self.get_nhgm_ids('group1')) == i + 1
# Test scenario:
# - bring a link down and assert a NHGM of `group1` is removed
# - create NHG `group2` which has a member pointing to the link being down and assert the group gets created
# but the member referencing the link is not added
# - update `group1` by removing a member while having another member referencing the link which is down and
# assert it'll only have a member added in ASIC DB
# - bring the link back up and assert the missing 2 members of `group1` and `group2` are added
# - remove `group2` and assert it and its members are removed
def validate_invalidate_group_member_test():
# Bring an interface down
self.flap_intf(1, 'down')
# One group member will get deleted
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2)
# Create a group that contains a NH that uses the down link
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'),
("ifname", "Ethernet0,Ethernet4")])
self.nhg_ps.set('group2', fvs)
# The group should get created, but it will not contained the NH that
# has the link down
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 2)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 3)
assert len(self.get_nhgm_ids('group2')) == 1
# Update the NHG with one interface down
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.3,10.0.0.1'),
("ifname", "Ethernet4,Ethernet0")])
self.nhg_ps.set("group1", fvs)
# Wait for group members to update - the group will contain only the
# members that have their links up
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2)
assert len(self.get_nhgm_ids('group1')) == 1
# Bring the interface up
self.flap_intf(1, 'up')
# Check that the missing member of group1 and group2 is being added
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 4)
# Remove group2
self.nhg_ps._del('group2')
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2)
# Test scenario:
# - create NHG `group2` with a NH that does not exist and assert it isn't created
# - update `group1` to contain the invalid NH and assert it remains only with the unremoved members
# - configure the invalid NH's interface and assert `group2` gets created and `group1`'s NH is added
# - delete `group` and assert it is being removed
def inexistent_group_member_test():
# Create group2 with a NH that does not exist
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.3,10.0.0.63'),
("ifname", "Ethernet4,Ethernet124")])
self.nhg_ps.set("group2", fvs)
# The groups should not be created
time.sleep(1)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
# Update group1 with a NH that does not exist
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.3,10.0.0.63'),
("ifname", "Ethernet4,Ethernet124")])
self.nhg_ps.set("group1", fvs)
# The update should fail, leaving group1 with only the unremoved
# members
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 1)
assert len(self.get_nhgm_ids('group1')) == 1
# Configure the missing NH's interface
self.config_intf(31)
# A couple more routes will be added to ASIC DB
self.asic_rts_count += 2
# Group2 should get created and group1 should be updated
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 2)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 4)
assert len(self.get_nhgm_ids('group1')) == 2
assert len(self.get_nhgm_ids('group2')) == 2
# Delete group2
self.nhg_ps._del('group2')
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
# Test scenario:
# - update `group1` to have 4 members and assert they are all added
# - update `group1` to have only 1 member and assert the other 3 are removed
# - update `group1` to have 2 members and assert a new one is added
def update_nhgm_count_test():
# Update the NHG, adding two new members
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3,10.0.0.5,10.0.0.7'),
("ifname", "Ethernet0,Ethernet4,Ethernet8,Ethernet12")])
self.nhg_ps.set("group1", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 4)
assert len(self.get_nhgm_ids('group1')) == 4
# Update the group to one NH only
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), ("ifname", "Ethernet0")])
self.nhg_ps.set("group1", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 1)
assert len(self.get_nhgm_ids('group1')) == 1
# Update the group to 2 NHs
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), ("ifname", "Ethernet0,Ethernet4")])
self.nhg_ps.set("group1", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2)
assert len(self.get_nhgm_ids('group1')) == 2
self.init_test(dvs, 4)
create_nhg_test()
create_route_nhg_test()
link_flap_test()
validate_invalidate_group_member_test()
inexistent_group_member_test()
update_nhgm_count_test()
# Cleanup
# Remove group1
self.nhg_ps._del("group1")
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count)
def test_nhgorch_label_route(self, dvs, testlog):
self.init_test(dvs, 4)
# create next hop group in APPL DB
fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3,10.0.0.5'),
("ifname", "Ethernet0,Ethernet4,Ethernet8")])
self.nhg_ps.set("group1", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1)
self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 3)
# create label route in APPL DB pointing to the NHG
fvs = swsscommon.FieldValuePairs([("nexthop_group", "group1")])
self.lr_ps.set("20", fvs)
self.asic_db.wait_for_n_keys(self.ASIC_INSEG_STR, self.asic_insgs_count + 1)
k = self.get_inseg_id('20')
assert k is not None
# assert the route points to next | |
rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
try: mpaa = item['certification']
except: mpaa = '0'
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['overview']
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url, idx=True):
try:
if url == self.imdbwatchlist_link:
def imdb_watchlist_id(url):
return re.compile('/export[?]list_id=(ls\d*)').findall(client.request(url))[0]
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
result = str(client.request(url,headers=self.en_headers))
try:
if idx == True: raise Exception()
pages = client.parseDOM(result, 'div', attrs = {'class': 'desc'})[0]
pages = re.compile('Page \d+? of (\d*)').findall(pages)[0]
for i in range(1, int(pages)):
u = url.replace('&start=1', '&start=%s' % str(i*100+1))
result += str(client.request(u, headers=self.en_headers))
except:
pass
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
if len(next) == 0:
next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
next = [i[0] for i in next if 'Next' in i[1]]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
try: title = client.parseDOM(item, 'a')[1]
except: pass
try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
except: pass
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
year = re.findall('(\d{4})', year[0])[0]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = re.findall('(tt\d*)', imdb)[0]
imdb = imdb.encode('utf-8')
#control.log('[imdb_list] Title: %s ID:%s' %(title,imdb))
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: poster = '0'
poster = re.sub('(?:_SX\d+?|)(?:_SY\d+?|)(?:_UX\d+?|)_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
except: genre = '0'
genre = ' / '.join([i.strip() for i in genre.split(',')])
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
except: duration = '0'
duration = duration.encode('utf-8')
rating = '0'
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: pass
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
except: pass
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
except: votes = '0'
try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
except: director = '0'
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
except: cast = '0'
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def imdb_user_list(self, url):
print("Items", url)
try:
result = client.request(url, headers=self.en_headers)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
#control.log("##################><><><><> trakt_list item %s" % item)
print("Items",items)
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_person_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def scn_list(self, url):
def predb_items():
try:
years = [(self.datetime).strftime('%Y'), (self.datetime - datetime.timedelta(days = 365)).strftime('%Y')]
months = (self.datetime - datetime.timedelta(days = 180)).strftime('%Y%m%d')
result = ''
for i in years:
result += client.request(self.scn_page % (str(i), '1'))
result += client.request(self.scn_page % (str(i), '2'))
items = client.parseDOM(result, 'div', attrs = {'class': 'post'})
items = [(client.parseDOM(i, 'a', attrs = {'class': 'p-title'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in items]
items = [(i[0][0], i[1][0]) for i in items if len(i[0]) > 0 and len(i[1]) > 0]
items = [(re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', i[0]), re.compile('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]').findall(i[0]), re.sub('[^0-9]', '', i[1])) for i in items]
items = [(i[0], i[1][-1], i[2]) for i in items if len(i[1]) > 0]
items = [i for i in items if int(months) <= int(i[2])]
items = sorted(items,key=lambda x: x[2])[::-1]
items = [(re.sub('(\.|\(|\[|LIMITED|UNCUT)', ' ', i[0]).strip(), i[1]) for i in items]
items = [x for y,x in enumerate(items) if x not in items[:y]]
items = items[:150]
print items
return items
except:
return
def predb_list(i):
try:
url = self.imdb_by_query % (urllib.quote_plus(i[0]), i[1])
item = client.request(url, headers=self.en_headers ,timeout='10')
item = json.loads(item)
title = item['Title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['Year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
#poster = 'http://films4u.org/poster/'+base64.b64encode(imdb)+'.png'
#poster = poster.encode('utf-8')
poster = item['Poster']
if poster == None or poster == '' or poster == 'N/A': poster = '0'
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = poster.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': | |
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
import torch
# This is how we include tests located in test/jit/...
# They are included here so that they are invoked when you call `test_jit.py`,
# do not run these test files directly.
from jit.test_tracer import TestTracer, TestMixTracingScripting # noqa: F401
from jit.test_recursive_script import TestRecursiveScript # noqa: F401
from jit.test_type_sharing import TestTypeSharing # noqa: F401
from jit.test_logging import TestLogging # noqa: F401
from jit.test_backends import TestBackends, TestBackendsWithCompiler # noqa: F401
from jit.test_backend_nnapi import TestNnapiBackend # noqa: F401
from jit.test_list_dict import TestList, TestDict, TestNamedTuple, TestScriptDict, TestScriptList # noqa: F401
from jit.test_async import TestAsync # noqa: F401
from jit.test_data_parallel import TestDataParallel # noqa: F401
from jit.test_models import TestModels # noqa: F401
from jit.test_modules import TestModules # noqa: F401
from jit.test_autodiff import TestAutodiffJit # noqa: F401
from jit.test_autodiff_subgraph_slicing import TestAutodiffSubgraphSlicing # noqa: F401
from jit.test_custom_operators import TestCustomOperators # noqa: F401
from jit.test_export_modes import TestExportModes # noqa: F401
from jit.test_graph_rewrite_passes import TestGraphRewritePasses # noqa: F401
from jit.test_class_type import TestClassType # noqa: F401
from jit.test_builtins import TestBuiltins, TestTensorBuiltins # noqa: F401
from jit.test_ignore_context_manager import TestIgnoreContextManager # noqa: F401
from jit.test_symbolic_shape_analysis import TestSymbolicShapeAnalysis # noqa: F401
from jit.test_op_decompositions import TestOpDecompositions # noqa: F401
from jit.test_unsupported_ops import TestUnsupportedOps # noqa: F401
from jit.test_freezing import TestFreezing, TestFrozenOptimizations, TestMKLDNNReinplacing # noqa: F401
from jit.test_peephole import TestPeephole # noqa: F401
from jit.test_alias_analysis import TestAliasAnalysis # noqa: F401
from jit.test_save_load import TestSaveLoad, TestSaveLoadFlatbuffer # noqa: F401
from jit.test_save_load_for_op_version import TestSaveLoadForOpVersion # noqa: F401
from jit.test_module_containers import TestModuleContainers # noqa: F401
from jit.test_python_bindings import TestPythonBindings # noqa: F401
from jit.test_python_ir import TestPythonIr # noqa: F401
from jit.test_functional_blocks import TestFunctionalBlocks # noqa: F401
from jit.test_remove_mutation import TestRemoveMutation # noqa: F401
from jit.test_torchbind import TestTorchbind # noqa: F401
from jit.test_module_interface import TestModuleInterface # noqa: F401
from jit.test_onnx_export import TestONNXExport # noqa: F401
from jit.test_with import TestWith # noqa: F401
from jit.test_enum import TestEnum # noqa: F401
from jit.test_string_formatting import TestStringFormatting # noqa: F401
from jit.test_profiler import TestProfiler # noqa: F401
from jit.test_slice import TestSlice # noqa: F401
from jit.test_ignorable_args import TestIgnorableArgs # noqa: F401
from jit.test_hooks import TestHooks # noqa: F401
from jit.test_warn import TestWarn # noqa: F401
from jit.test_isinstance import TestIsinstance # noqa: F401
from jit.test_cuda import TestCUDA # noqa: F401
from jit.test_python_builtins import TestPythonBuiltinOP # noqa: F401
from jit.test_typing import TestTyping # noqa: F401
from jit.test_hash import TestHash # noqa: F401
from jit.test_complex import TestComplex # noqa: F401
from jit.test_jit_utils import TestJitUtils # noqa: F401
from jit.test_scriptmod_ann import TestScriptModuleInstanceAttributeTypeAnnotation # noqa: F401
from jit.test_types import TestTypesAndAnnotation # noqa: F401
from jit.test_misc import TestMisc # noqa: F401
from jit.test_upgraders import TestUpgraders # noqa: F401
from jit.test_pdt import TestPDT # noqa: F401
from jit.test_tensor_creation_ops import TestTensorCreationOps # noqa: F401
from jit.test_module_apis import TestModuleAPIs # noqa: F401
from jit.test_script_profile import TestScriptProfile # noqa: F401
from jit.test_convert_activation import TestFunctionalToInplaceActivation, TestInplaceToFunctionalActivation # noqa: F401
from jit.test_parametrization import TestParametrization # noqa: F401
from jit.test_attr import TestGetDefaultAttr # noqa: F401
from jit.test_aten_pow import TestAtenPow # noqa: F401
from jit.test_optimize_for_mobile_preserve_debug_info import TestOptimizeForMobilePreserveDebugInfo # noqa: F401
from jit.test_union import TestUnion # noqa: F401
from jit.test_legacy_upgraders import TestLegacyUpgraders # noqa: F401
from jit.test_models import MnistNet
from jit.test_batch_mm import TestBatchMM # noqa: F401
from jit.test_dtype_analysis import TestDtypeAnalysis, TestDtypeCustomRulesCPU # noqa: F401
from jit.test_device_analysis import TestDeviceAnalysis # noqa: F401
from jit.test_dce import TestDCE # noqa: F401
from jit.test_sparse import TestSparse # noqa: F401
from jit.test_tensor_methods import TestTensorMethods # noqa: F401
# Torch
from torch import Tensor
from torch._C import TensorType, BoolType, parse_ir, _propagate_shapes
from torch.autograd import Variable
from torch.jit.annotations import BroadcastingList2, BroadcastingList3, Any # noqa: F401
from torch.nn.utils.rnn import PackedSequence
from torch.testing import FileCheck, make_tensor
import torch.autograd.profiler
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.nn as nn
import torch.nn.functional as F
# Testing utils
from torch.testing._internal import jit_utils
from torch.testing._internal.common_jit import check_against_reference
from torch.testing._internal.common_utils import run_tests, IS_WINDOWS, TEST_WITH_UBSAN, \
suppress_warnings, BUILD_WITH_CAFFE2, IS_SANDCASTLE, GRAPH_EXECUTOR, ProfilingMode, TestCase, \
freeze_rng_state, slowTest, TemporaryFileName, skipIfCompiledWithoutNumpy, \
enable_profiling_mode_for_profiling_tests, TEST_MKL, set_default_dtype, num_profiled_runs, \
skipIfCrossRef
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, disable_autodiff_subgraph_inlining, \
_trace, do_input_map, get_execution_plan, make_global, \
execWrapper, _inline_everything, _tmp_donotuse_dont_inline_everything, \
RUN_CUDA
from torch.testing._internal.jit_metaprogramming_utils import (
get_script_args,
create_input, unpack_variables,
additional_module_tests, EXCLUDE_SCRIPT_MODULES,
get_nn_module_name_from_kwargs, get_nn_mod_test_name, script_method_template)
from torch.testing._internal.common_nn import module_tests, new_module_tests, criterion_tests
# For testing truediv in python 2
from torch.testing._internal.test_module.future_div import div_int_future, div_float_future
from torch.testing._internal.test_module.no_future_div import div_int_nofuture, div_float_nofuture
# Standard library
from collections import defaultdict, namedtuple, OrderedDict
from copy import deepcopy
from itertools import product
from textwrap import dedent
from typing import List, Dict, NamedTuple, Optional, Tuple, Union
import copy
import functools
import inspect
import io
import itertools
import math
import numpy as np
import os
import pickle
import pickletools
import random
import re
import shutil
import string
import sys
import tempfile
import types
import typing
import unittest
import warnings
import zipfile
def canonical(graph):
return torch._C._jit_pass_canonicalize(graph).str(False)
def LSTMCellF(input, hx, cx, *params):
return LSTMCell(input, (hx, cx), *params)
def doAutodiffCheck(testname):
# TODO: setting false on test itself is not working
if "test_t_" in testname or testname == "test_t":
return False
if GRAPH_EXECUTOR == ProfilingMode.SIMPLE:
return False
if GRAPH_EXECUTOR == ProfilingMode.LEGACY:
return True
# these tests are disabled because BailOut nodes
# inserted by ProfilingExecutor interfere with
# subgraph slicing of Differentiable Graphs
test_exceptions = [
# functional
'test_nn_dropout',
'test_nn_log_softmax',
'test_nn_relu',
'test_nn_softmax',
'test_nn_threshold',
'test_nn_lp_pool2d',
'test_nn_lp_pool1d',
'test_nn_gumbel_softmax_hard',
'test_nn_gumbel_softmax',
'test_nn_multilabel_soft_margin_loss',
'test_nn_batch_norm',
'test_nn_max_pool2d_with_indices',
# AutogradJitGenerated
'test___rdiv___constant',
'test___rdiv___scalar_constant',
'test_split',
'test_split_dim',
'test_split_dim_neg0',
'test_split_size_list',
'test_split_size_list_dim',
'test_split_size_list_dim_neg0',
'test_split_with_sizes',
'test_split_with_sizes_dim',
'test_split_with_sizes_dim_neg0',
'test_split_with_sizes_size_0',
'test_nn_max_pool2d_with_indices',
]
if testname in test_exceptions:
return False
return True
# TODO: enable TE in PE when all tests are fixed
torch._C._jit_set_texpr_fuser_enabled(GRAPH_EXECUTOR == ProfilingMode.PROFILING)
torch._C._jit_set_profiling_executor(GRAPH_EXECUTOR != ProfilingMode.LEGACY)
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
def LSTMCellC(*args, **kwargs):
hy, cy = LSTMCellF(*args, **kwargs)
return torch.cat((hy, cy))
def LSTMCellS(x, hx, cx, w_ih, w_hh, b_ih, b_hh):
gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
# Code reference: https://github.com/pytorch/translate/blob/master/pytorch_translate/rnn_cell.py#L27:44
def MiLSTMCell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
# Section 2.1 in https://arxiv.org/pdf/1606.06630.pdf
gates = alpha * Wx * Uz + beta_i * Wx + beta_h * Uz + bias
# Same as LSTMCell after this point
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = ingate.sigmoid()
forgetgate = forgetgate.sigmoid()
cellgate = cellgate.tanh()
outgate = outgate.sigmoid()
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * cy.tanh()
return hy, cy
def get_lstm_inputs(device, training=False, seq_length=None):
input_shape = (3, 10) if seq_length is None else (seq_length, 3, 10)
input = torch.randn(*input_shape, dtype=torch.float, device=device, requires_grad=training)
hx = torch.randn(3, 20, dtype=torch.float, device=device, requires_grad=training)
cx = torch.randn(3, 20, dtype=torch.float, device=device, requires_grad=training)
module = nn.LSTMCell(10, 20).to(device, torch.float) # Just to allocate weights with correct sizes
if training:
params = tuple(module.parameters())
else:
params = tuple(p.requires_grad_(False) for p in module.parameters())
return (input, hx, cx) + params
def get_milstm_inputs(device, training=False):
minibatch = 3
input_size = 10
hidden_size = 20
x = torch.randn(minibatch, input_size, device=device, dtype=torch.float)
hx = torch.randn(minibatch, hidden_size, device=device, dtype=torch.float)
cx = torch.randn(minibatch, hidden_size, device=device, dtype=torch.float)
ih = torch.randn(4 * hidden_size, input_size, device=device, dtype=torch.float, requires_grad=training)
hh = torch.randn(4 * hidden_size, hidden_size, device=device, dtype=torch.float, requires_grad=training)
alpha = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
ibeta = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
hbeta = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
bias = torch.randn(4 * hidden_size, dtype=torch.float, device=device, requires_grad=training)
return x, hx, cx, ih, hh, alpha, ibeta, hbeta, bias
def get_fn(file_name, script_path):
import importlib.util
spec = importlib.util.spec_from_file_location(file_name, script_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
fn = module.fn
return fn
def get_grad_executor(plan_state, diff_graph_idx=None, skip_check=False):
if diff_graph_idx is None:
nodes = list(plan_state.graph.nodes())
if not skip_check:
nodes = list(filter(lambda n : n.kind() != "prim::BailOut" and n.kind() != "prim::BailoutTemplate", nodes))
if len(nodes) == 1 or (len(nodes) == 2 and nodes[1].kind() == "prim::TupleConstruct"):
pass
elif len(nodes) == 2 and nodes[0].kind() == "prim::RequiresGradCheck" and nodes[1].kind() == "prim::If":
pass
else:
raise RuntimeError("Can't get a grad_executor for a non-differentiable graph")
grad_executors = list(plan_state.code.grad_executor_states())
return grad_executors[diff_graph_idx or 0]
def all_backward_graphs(script_module, diff_graph_idx=None):
# Note: for Python 2 the order seems to be unstable
ge_state = script_module.get_debug_state()
fwd_plan = get_execution_plan(ge_state)
grad_executor_state = get_grad_executor(fwd_plan, diff_graph_idx=diff_graph_idx)
bwd_plans = list(grad_executor_state.execution_plans.values())
return [p.graph.copy() for p in bwd_plans]
def backward_graph(script_module, diff_graph_idx=None, skip_check=False):
ge_state = script_module.get_debug_state()
fwd_plan = get_execution_plan(ge_state)
grad_executor_state = get_grad_executor(fwd_plan, diff_graph_idx=diff_graph_idx, skip_check=skip_check)
bwd_plan = get_execution_plan(grad_executor_state)
# Running JIT passes requires that we own the graph (with a shared_ptr).
# The debug state struct does not own its graph so we make | |
<filename>jdma_control/backends/FTPBackend.py
"""Class for a JASMIN Data Migration App backend that targets a FTP server
using the Python ftplib
Creating a migration on a ftp server consists of the following
operations:
1. Create a directory for the group workspace and current batch id, as an
identifier
2. Upload a tarfile archive to the directory as part of the migrations
"""
import os
import ftplib
from django.db.models import Q
from jdma_control.backends.Backend import Backend
from jdma_control.scripts.config import read_backend_config
from jdma_control.backends.ConnectionPool import ConnectionPool
from jdma_control.backends import AES_tools
from jdma_control.scripts.common import get_archive_set_from_get_request
from jdma_control.scripts.common import get_verify_dir, get_staging_dir
import jdma_site.settings as settings
import multiprocessing
import signal
def get_completed_puts(backend_object):
"""Get all the completed puts for the FTP backend"""
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, Migration, StorageQuota
# get the storage id
storage_id = StorageQuota.get_storage_index("ftp")
# get the decrypt key
key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)
# list of completed PUTs to return
completed_PUTs = []
# now loop over the PUT requests
put_reqs = MigrationRequest.objects.filter(
(Q(request_type=MigrationRequest.PUT)
| Q(request_type=MigrationRequest.MIGRATE))
& Q(stage=MigrationRequest.PUTTING)
& Q(migration__stage=Migration.PUTTING)
& Q(migration__storage__storage=storage_id)
)
for pr in put_reqs:
# decrypt the credentials
credentials = AES_tools.AES_decrypt_dict(key, pr.credentials)
try:
ftp = ftplib.FTP(host=backend_object.FTP_Settings["FTP_ENDPOINT"],
user=credentials['username'],
passwd=credentials['password'])
# loop over each archive in the migration
archive_set = pr.migration.migrationarchive_set.order_by('pk')
# counter for number of uploaded archives
n_up_arch = 0
for archive in archive_set:
# get the list of files for this archive
file_list = archive.get_file_names()['FILE']
n_files = 0
for file_path in file_list['FILE']:
# object name is the file_path, without the gws prefix
object_name = (pr.migration.external_id +
"/" + file_path)
# enforce switch to binary (images here, but that doesn't
# matter)
ftp.voidcmd('TYPE I')
try:
fsize = ftp.size(object_name)
if fsize is not None:
n_files += 1
except:
pass
# check if all files uploaded and then inc archive
if n_files == len(file_list):
n_up_arch += 1
if n_up_arch == pr.migration.migrationarchive_set.count():
completed_PUTs.append(pr.migration.external_id)
ftp.quit()
except Exception as e:
raise Exception(e)
return completed_PUTs
def get_completed_gets(backend_object):
# This is the same as ObjectStoreBackend::get_completed_gets
# That might change in the future, though
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, StorageQuota
from jdma_control.models import MigrationFile, MigrationArchive
# get the storage id
storage_id = StorageQuota.get_storage_index("ftp")
# list of completed GETs to return
completed_GETs = []
# now loop over the GET requests
get_reqs = MigrationRequest.objects.filter(
(Q(stage=MigrationRequest.GETTING)
| Q(stage=MigrationRequest.VERIFY_GETTING))
& Q(migration__storage__storage=storage_id)
)
#
for gr in get_reqs:
# loop over each archive in the migration
archive_set, st_arch, n_arch = get_archive_set_from_get_request(gr)
# just need to see if the archive has been downloaded to the file system
# we know this when the file is present and the file size is equal to
# that stored in the database
n_completed_archives = 0
for archive in archive_set:
# Determine the staging directory. Three options:
# 1. The stage is VERIFY_GETTING->VERIFY DIR
# 2. The stage is GETTING and archive.packed->STAGING_DIR
# 3. The stage is GETTING and not archive.packed->target_path
# form the filepath
if gr.stage == MigrationRequest.VERIFY_GETTING:
staging_dir = get_verify_dir(backend_object, gr)
elif gr.stage == MigrationRequest.GETTING:
if archive.packed:
staging_dir = get_staging_dir(backend_object, gr)
else:
staging_dir = gr.target_path
# now loop over each file in the archive
n_completed_files = 0
file_name_list = archive.get_file_names(
filelist = gr.filelist
)
for file_name in file_name_list['FILE']:
file_path = os.path.join(staging_dir, file_name)
try:
# just rely on exception thrown if file does not exist yet
# now check for size
size = os.stat(file_path).st_size
# for packed archive check the archive size
if archive.packed:
n_completed_files += int(size == archive.size)
else:
# get the file from the db
file_obj = MigrationFile.objects.get(
path=file_name,
archive=archive
)
n_completed_files += int(size == file_obj.size)
except:
pass
# add if all files downloaded from archive
if n_completed_files == len(file_name_list):
n_completed_archives += 1
# if number completed is equal to number in archive set then the
# transfer has completed
if n_completed_archives == len(archive_set):
completed_GETs.append(gr.transfer_id)
return completed_GETs
def get_completed_deletes(backend_object):
"""Get all the completed deletes for the ObjectStore"""
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, Migration, StorageQuota
# get the storage id
storage_id = StorageQuota.get_storage_index("ftp")
# get the decrypt key
key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)
# list of completed DELETEs to return
completed_DELETEs = []
# now loop over the PUT requests
del_reqs = MigrationRequest.objects.filter(
(Q(request_type=MigrationRequest.DELETE))
& Q(stage=MigrationRequest.DELETING)
& Q(migration__storage__storage=storage_id)
)
for dr in del_reqs:
# decrypt the credentials
credentials = AES_tools.AES_decrypt_dict(key, dr.credentials)
try:
# create a connection to the object store
ftp = ftplib.FTP(host=backend_object.FTP_Settings["FTP_ENDPOINT"],
user=credentials['username'],
passwd=<PASSWORD>['password'])
# if the external_id directory has been deleted then the
# deletion has completed
dir_list = ftp.mlsd("/")
found = False
for d in dir_list:
# check if directory and groupworkspace name is in directory
if d[1]['type'] == 'dir' and dr.migration.external_id in d[0]:
found = True
break
if not found:
completed_DELETEs.append(dr.migration.external_id)
except Exception as e:
raise Exception(e)
return completed_DELETEs
class FTP_DownloadProcess(multiprocessing.Process):
"""Download thread for FTP backend."""
def setup(self,
filelist,
external_id,
req_number,
target_dir,
credentials,
backend_object,
thread_number
):
self.filelist = filelist
self.conn = backend_object.connection_pool.find_or_create_connection(
backend_object,
req_number = req_number,
credentials = credentials,
mode = "download",
thread_number = thread_number,
uid = "GET")
# need these to carry out the transfer
self.external_id = external_id
self.req_number = req_number
self.target_dir = target_dir
# need these to close the connection
self.backend_object = backend_object
self.thread_number = thread_number
def run(self):
"""Download all the files in the sub file list."""
# change the working directory to the external batch id
try:
self.conn.cwd("/" + self.external_id)
for filename in self.filelist:
# external id is the bucket name, add this to the file name
download_file_path = os.path.join(self.target_dir, filename)
# check that the the sub path exists
sub_path = os.path.split(download_file_path)[0]
# The "it's better to ask forgiveness method!"
try:
os.makedirs(sub_path)
except:
pass
# open the download file
fh = open(download_file_path, 'wb')
self.conn.retrbinary("RETR " + filename, fh.write)
fh.close()
except SystemExit:
pass
def exit(self):
"""FTP Download exit handler."""
self.backend_object.connection_pool.close_connection(
self.backend_object,
req_number = self.req_number,
mode = "download",
thread_number = self.thread_number,
uid = "GET"
)
class FTP_UploadProcess(multiprocessing.Process):
"""Upload thread for FTP backend."""
def setup(self,
filelist,
external_id,
req_number,
prefix,
credentials,
backend_object,
thread_number
):
self.filelist = filelist
self.conn = backend_object.connection_pool.find_or_create_connection(
backend_object,
req_number = req_number,
credentials = credentials,
mode = "upload",
thread_number = thread_number,
uid = "PUT")
# need these to carry out the transfer
self.req_number = req_number
self.external_id = external_id
self.prefix = prefix
# need these to close the connection
self.backend_object = backend_object
self.thread_number = thread_number
def run(self):
# we have multiple files so upload them one at once
# upload the multiple files in the file_list
# change the working directory to the external batch id
try:
self.conn.cwd("/" + self.external_id)
for filename in self.filelist:
# change to the directory where the file will be deposited
ftp_file_name = os.path.relpath(filename, self.prefix)
# open the file from the archive_path in binary mode
fh = open(filename, 'rb')
self.conn.storbinary("STOR " + ftp_file_name, fh)
fh.close()
except SystemExit:
pass
def exit(self):
"""FTP Upload exit handler."""
if settings.TESTING:
print (" Exit FTP_UploadProcess")
self.backend_object.connection_pool.close_connection(
self.backend_object,
req_number = self.req_number,
mode = "upload",
thread_number = self.thread_number,
uid = "PUT"
)
class FTP_DeleteProcess(multiprocessing.Process):
"""Delete thread for FTP backend."""
def setup(self,
filelist,
external_id,
req_number,
credentials,
backend_object,
thread_number
):
self.filelist = filelist
self.external_id = external_id
self.conn = backend_object.connection_pool.find_or_create_connection(
backend_object,
req_number = req_number,
credentials = credentials,
mode = "upload",
thread_number = thread_number,
uid = "DELETE")
# need these to carry out the transfer
self.req_number = req_number
# need these to close the connection
self.backend_object = backend_object
self.thread_number = thread_number
def run(self):
# we can delete multiple objects, upto a 1000 at a time
# a maximum of 1000 will be passed by the calling function
self.conn.cwd("/" + self.external_id)
try:
for filepath in self.filelist:
# remove the file
try:
conn.delete(filepath)
except ftplib.error_perm as e:
# handle directory already created
if not '550' in e.args[0]:
raise Exception(e)
else:
continue
except SystemExit:
pass
def exit(self):
"""FTP Delete exit handler."""
if settings.TESTING:
print (" Exit FTP_DeleteProcess")
self.backend_object.connection_pool.close_connection(
self.backend_object,
req_number = self.req_number,
mode = "upload",
thread_number = self.thread_number,
uid = "DELETE"
)
class FTPBackend(Backend):
"""Class for a JASMIN Data Migration App backend which targets a FTP server
with Python ftplib .
Inherits from Backend class and overloads inherited functions."""
def __init__(self):
"""Need to set the verification directory and | |
'tuf.roledb.py', raise an exception.
if not tuf.roledb.role_exists(rolename):
raise tuf.Error(repr(rolename) + ' not in "tuf.roledb".')
# Keep track of the keys loaded to avoid duplicates.
keyids = []
# Generate keys for the keyids listed by the role being processed.
for keyid in tuf.roledb.get_role_keyids(rolename):
key = tuf.keydb.get_key(keyid)
# If 'key' is an RSA key, it would conform to 'tuf.formats.RSAKEY_SCHEMA',
# and have the form:
# {'keytype': 'rsa',
# 'keyid': keyid,
# 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
# 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
keyid = key['keyid']
if keyid not in keydict:
# This appears to be a new keyid. Generate the key for it.
if key['keytype'] in ['rsa', 'ed25519']:
keytype = key['keytype']
keyval = key['keyval']
keydict[keyid] = \
tuf.keys.format_keyval_to_metadata(keytype, keyval, private=False)
# This is not a recognized key. Raise an exception.
else:
raise tuf.Error('Unsupported keytype: '+keyid)
# Do we have a duplicate?
if keyid in keyids:
raise tuf.Error('Same keyid listed twice: '+keyid)
# Add the loaded keyid for the role being processed.
keyids.append(keyid)
# Generate and store the role data belonging to the processed role.
role_threshold = tuf.roledb.get_role_threshold(rolename)
role_metadata = tuf.formats.make_role_metadata(keyids, role_threshold)
roledict[rolename] = role_metadata
# Generate the root metadata object.
root_metadata = tuf.formats.RootFile.make_metadata(version, expiration_date,
keydict, roledict,
consistent_snapshot)
return root_metadata
def generate_targets_metadata(targets_directory, target_files, version,
expiration_date, delegations=None,
write_consistent_targets=False):
"""
<Purpose>
Generate the targets metadata object. The targets in 'target_files' must
exist at the same path they should on the repo. 'target_files' is a list of
targets. The 'custom' field of the targets metadata is not currently
supported.
<Arguments>
targets_directory:
The directory containing the target files and directories of the
repository.
target_files:
The target files tracked by 'targets.json'. 'target_files' is a
dictionary of target paths that are relative to the targets directory and
an optional custom value (e.g., {'file1.txt': {'custom_data: 0755},
'Django/module.py': {}}).
version:
The metadata version number. Clients use the version number to
determine if the downloaded version is newer than the one currently
trusted.
expiration_date:
The expiration date of the metadata file. Conformant to
'tuf.formats.ISO8601_DATETIME_SCHEMA'.
delegations:
The delegations made by the targets role to be generated. 'delegations'
must match 'tuf.formats.DELEGATIONS_SCHEMA'.
write_consistent_targets:
Boolean that indicates whether file digests should be prepended to the
target files.
<Exceptions>
tuf.FormatError, if an error occurred trying to generate the targets
metadata object.
tuf.Error, if any of the target files cannot be read.
<Side Effects>
The target files are read and file information generated about them.
<Returns>
A targets metadata object, conformant to 'tuf.formats.TARGETS_SCHEMA'.
"""
# Do the arguments have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.PATH_SCHEMA.check_match(targets_directory)
tuf.formats.PATH_FILEINFO_SCHEMA.check_match(target_files)
tuf.formats.METADATAVERSION_SCHEMA.check_match(version)
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date)
tuf.formats.BOOLEAN_SCHEMA.check_match(write_consistent_targets)
if delegations is not None:
tuf.formats.DELEGATIONS_SCHEMA.check_match(delegations)
# Store the file attributes of targets in 'target_files'. 'filedict',
# conformant to 'tuf.formats.FILEDICT_SCHEMA', is added to the targets
# metadata object returned.
filedict = {}
# Ensure the user is aware of a non-existent 'target_directory', and convert
# it to its abosolute path, if it exists.
targets_directory = _check_directory(targets_directory)
# Generate the fileinfo of all the target files listed in 'target_files'.
for target, custom in six.iteritems(target_files):
# The root-most folder of the targets directory should not be included in
# target paths listed in targets metadata.
# (e.g., 'targets/more_targets/somefile.txt' -> 'more_targets/somefile.txt')
relative_targetpath = target
# Note: join() discards 'targets_directory' if 'target' contains a leading
# path separator (i.e., is treated as an absolute path).
target_path = os.path.join(targets_directory, target.lstrip(os.sep))
# Ensure all target files listed in 'target_files' exist. If just one of
# these files does not exist, raise an exception.
if not os.path.exists(target_path):
message = repr(target_path) + ' cannot be read. Unable to generate ' +\
'targets metadata.'
raise tuf.Error(message)
# Add 'custom' if it has been provided. Custom data about the target is
# optional and will only be included in metadata (i.e., a 'custom' field in
# the target's fileinfo dictionary) if specified here.
custom_data = None
if len(custom):
custom_data = custom
filedict[relative_targetpath] = \
get_metadata_fileinfo(target_path, custom_data)
# Create hard links for 'target_path' if consistent hashing is enabled.
if write_consistent_targets:
for target_digest in filedict[relative_targetpath]['hashes']:
dirname, basename = os.path.split(target_path)
digest_filename = target_digest + '.' + basename
digest_target = os.path.join(dirname, digest_filename)
if not os.path.exists(digest_target):
logger.warning('Hard linking target file to ' + repr(digest_target))
os.link(target_path, digest_target)
# Generate the targets metadata object.
targets_metadata = tuf.formats.TargetsFile.make_metadata(version,
expiration_date,
filedict,
delegations)
return targets_metadata
def generate_snapshot_metadata(metadata_directory, version, expiration_date,
root_filename, targets_filename,
consistent_snapshot=False):
"""
<Purpose>
Create the snapshot metadata. The minimum metadata must exist
(i.e., 'root.json' and 'targets.json'). This will also look through
the 'targets/' directory in 'metadata_directory' and the resulting
snapshot file will list all the delegated roles.
<Arguments>
metadata_directory:
The directory containing the 'root.json' and 'targets.json' metadata
files.
version:
The metadata version number. Clients use the version number to
determine if the downloaded version is newer than the one currently
trusted.
expiration_date:
The expiration date of the metadata file.
Conformant to 'tuf.formats.ISO8601_DATETIME_SCHEMA'.
root_filename:
The filename of the top-level root role. The hash and file size of this
file is listed in the snapshot role.
targets_filename:
The filename of the top-level targets role. The hash and file size of
this file is listed in the snapshot role.
consistent_snapshot:
Boolean. If True, a file digest is expected to be prepended to the
filename of any target file located in the targets directory. Each digest
is stripped from the target filename and listed in the snapshot metadata.
<Exceptions>
tuf.FormatError, if the arguments are improperly formatted.
tuf.Error, if an error occurred trying to generate the snapshot metadata
object.
<Side Effects>
The 'root.json' and 'targets.json' files are read.
<Returns>
The snapshot metadata object, conformant to 'tuf.formats.SNAPSHOT_SCHEMA'.
"""
# Do the arguments have the correct format?
# This check ensures arguments have the appropriate number of objects and
# object types, and that all dict keys are properly named.
# Raise 'tuf.FormatError' if the check fails.
tuf.formats.PATH_SCHEMA.check_match(metadata_directory)
tuf.formats.METADATAVERSION_SCHEMA.check_match(version)
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(expiration_date)
tuf.formats.PATH_SCHEMA.check_match(root_filename)
tuf.formats.PATH_SCHEMA.check_match(targets_filename)
tuf.formats.BOOLEAN_SCHEMA.check_match(consistent_snapshot)
metadata_directory = _check_directory(metadata_directory)
# Retrieve the fileinfo of 'root.json' and 'targets.json'. This file
# information includes data such as file length, hashes of the file, etc.
filedict = {}
filedict[ROOT_FILENAME] = get_metadata_fileinfo(root_filename)
filedict[TARGETS_FILENAME] = get_metadata_fileinfo(targets_filename)
# Add compressed versions of the 'targets.json' and 'root.json' metadata,
# if they exist.
for extension in SUPPORTED_COMPRESSION_EXTENSIONS:
compressed_root_filename = root_filename+extension
compressed_targets_filename = targets_filename+extension
# If the compressed versions of the root and targets metadata is found,
# add their file attributes to 'filedict'.
if os.path.exists(compressed_root_filename):
filedict[ROOT_FILENAME+extension] = \
get_metadata_fileinfo(compressed_root_filename)
if os.path.exists(compressed_targets_filename):
filedict[TARGETS_FILENAME+extension] = \
get_metadata_fileinfo(compressed_targets_filename)
# Walk the 'targets/' directory and generate the fileinfo of all the role
# files found. This information is stored in the 'meta' field of the snapshot
# metadata object.
targets_metadata = os.path.join(metadata_directory, 'targets')
if os.path.exists(targets_metadata) and os.path.isdir(targets_metadata):
for directory_path, junk_directories, files in os.walk(targets_metadata):
# 'files' here is a list of file names.
for basename in files:
metadata_path = os.path.join(directory_path, basename)
metadata_name = \
metadata_path[len(metadata_directory):].lstrip(os.path.sep)
# Strip the digest if 'consistent_snapshot' is True.
# Example: 'targets/unclaimed/13df98ab0.django.json' -->
# 'targets/unclaimed/django.json'
metadata_name, digest_junk = \
_strip_consistent_snapshot_digest(metadata_name, consistent_snapshot)
# All delegated roles are added to the snapshot file, including
# compressed versions.
for metadata_extension in METADATA_EXTENSIONS:
if metadata_name.endswith(metadata_extension):
rolename = metadata_name[:-len(metadata_extension)]
# Obsolete role files may still be found. Ensure only roles loaded
# in the roledb are included in the snapshot metadata.
if tuf.roledb.role_exists(rolename):
filedict[metadata_name] = get_metadata_fileinfo(metadata_path)
# Generate the snapshot metadata object.
snapshot_metadata = tuf.formats.SnapshotFile.make_metadata(version,
expiration_date,
filedict)
return snapshot_metadata
def generate_timestamp_metadata(snapshot_filename, version,
expiration_date, compressions=()):
"""
<Purpose>
Generate the timestamp metadata object. The 'snapshot.json' file must
exist.
<Arguments>
snapshot_filename:
The required filename of the snapshot metadata file. The timestamp role
needs to the calculate the file size and hash of this file.
version:
The timestamp's version number. | |
= bed_dict['all']['real'].cat(bed_dict[region]['real'], stream=True, postmerge=False).saveas()
#saves offsets so after shuffling the offsets can be readjusted
offset_dict = get_offsets_bed12(bed_dict[region]['real'])
for i in range(nrand):
random_intervals = bed_dict[region]['real'].shuffle(genome=short_species, incl=bedtracks[region].fn).sort()
random_intervals = fix_shuffled_strand(random_intervals, bedtracks[region].fn)
random_intervals = adjust_offsets(random_intervals, offset_dict)
bed_dict[region]['rand'][i] = random_intervals.saveas()
if i not in bed_dict['all']['rand']:
bed_dict['all']['rand'][i] = bed_dict[region]['rand'][i]
else:
bed_dict['all']['rand'][i] = bed_dict['all']['rand'][i].cat(bed_dict[region]['rand'][i], stream=True, postmerge=False)
#if there are no more clusters to assign stop trying
if no_overlapping_count == 0:
break
# print "After assigning %d un-categorized regions" % len(remaining_clusters)
if len(remaining_clusters) > 0:
bed_dict['uncatagorized'] = {'real': remaining_clusters.sort(stream=True).saveas()}
bed_dict = save_bedtools(bed_dict, clusters, assigned_dir)
return bed_dict
def get_offsets_bed12(bedtool):
"""
Gets offsets for each cluster in a bed12 file (CLIPper formatted bed file)
Offsets are the difference between the wide peak and the narrow (predicted binding site)
This will break if we analize different types of data
tool - bedtool
returns offset for each cluster
"""
if bedtool.field_count() < 8:
print "Not Valid bed12 file, continuing processing, some things may be strange"
return None
try:
offset_dict = {}
for interval in bedtool:
if interval.strand == "+":
offset = int(interval[6]) - int(interval[1])
else:
offset = int(interval[2]) - int(interval[7])
offset_dict[interval.name] = offset
return offset_dict
except:
print "Not Valid bed12 file, continuing processing, some things may be strange, also this will cause a file leak in pybedtools, watch out"
return None
def adjust_offsets(bedtool, offsets=None):
"""
For finding motiff position relative to center of peaks
Handles merging overlapping peaks, merge > bed12 -> bed6
picks first offset from merged peak and assigns tqhat to
new peaks
Input:
tool - a bed tool (bed12) object
offset - dict of key:peak, value:int
Adjusts the offsets for each transcript in a bedtool
"""
if offsets is None:
return bedtool
clusters = []
for interval in bedtool:
#the ; represents two merged locations
if "," in interval.name and interval.name not in offsets:
offset = offsets[interval.name.split(",")[0]]
else:
offset = offsets[interval.name]
if interval.strand == "+":
thick_start = interval.start + offset
thick_stop = thick_start + 4
else:
thick_stop = interval.stop - offset
thick_start = thick_stop - 4
clusters.append("\t".join([str(x) for x in (interval.chrom,
interval.start,
interval.stop,
interval.name,
interval.score,
interval.strand,
thick_start,
thick_stop)]))
return pybedtools.BedTool("\n".join(clusters), from_string=True)
def generate_region_dict(bedtool):
region_dict = defaultdict(list)
for interval in bedtool:
region_dict[interval.name].append(interval)
for gene in region_dict.keys():
if region_dict[gene][0].strand == "-":
region_dict[gene].reverse()
return region_dict
def convert_to_transcript(bedtool):
"""
Converts bed file to be based only off transcripts (transcripts are defined by chromosome)
returns modified dict
"""
return bedtool.each(name_to_chrom).sort().saveas()
def convert_to_mrna(bedtool, exon_dict):
"""
converts transcript dict into mRNA locations given a dict of exons
feature_dict - generated from convert to transcript, dict of bedfiles
exon_dict - dict of { genes : [exons] }
"""
return bedtool.each(convert_to_mRNA_position, exon_dict).filter(lambda x: x.chrom != "none").sort().saveas()
def invert_neg(interval):
interval[-1] = str(int(interval[-1]) * -1)
return interval
def get_feature_distances(bedtool, features, exons):
"""
:param bedtool: bedtools of peaks, both (only works with clipper defined peaks)
:param features: dict of features to find distance from
:param exons: bedtool of exons, used to convert genomic coordinates to mrna coordinates
:return:
"""
exon_dict = generate_region_dict(exons)
bed_center = bedtool.each(small_peaks).saveas()
beds_center_transcripts = convert_to_transcript(bed_center)
beds_center_transcripts_mrna = convert_to_mrna(beds_center_transcripts, exon_dict)
features_transcript = {name: convert_to_transcript(bedtool) for name, bedtool in features.items()}
features_mrna = {name: convert_to_mrna(bedtool, exon_dict) for name, bedtool in features_transcript.items()}
#for pre-mrna
features_transcript_closest = defaultdict(lambda: None)
for name, feature in features_transcript.items():
features_transcript_closest[name] = beds_center_transcripts.closest(feature, s=True, D="b").filter(lambda x: x[-2] != ".").saveas()
#for mrna
features_mrna_closest = defaultdict(lambda: None)
for name, feature in features_mrna.items():
features_mrna_closest[name] = beds_center_transcripts_mrna.closest(feature, s=True, D="ref").filter(lambda x: x[-2] != ".").each(invert_neg).saveas()
return features_transcript_closest, features_mrna_closest
def get_region_distributions(bedtool, regions):
"""
Gets location of each peak across different regions
:param bedtool: pybedtool of peaks
:param regions: dict of regions to
:return:
"""
distributions = {}
for name, region in regions.items():
region_dict = generate_region_dict(region)
distributions[name] = get_distributions(bedtool, region_dict)
return distributions
def get_distributions(bedtool, region_dict):
"""
Gets distributions from RNA_position function
bedtool - clipper bed file to generate distributions from
region_dict - generate_region_dict dict defining regions
"""
exon_distributions = []
total_distributions = []
num_errors = []
num_missed = []
bound_in_regions = []
genes = []
for interval in bedtool:
try:
#will need to redefine this to use intervals
exon, total, bound_in_region, gene = RNA_position_interval(interval, region_dict)
if total is not None:
total_distributions.append(total)
exon_distributions.append(exon)
bound_in_regions.append(bound_in_region)
genes.append(gene)
else:
num_missed.append(interval)
except Exception as e:
print e
num_errors.append(interval)
return {'individual': exon_distributions, 'total': total_distributions, "gene_ids": genes,
"region_numbers": bound_in_regions, 'errors': num_errors, 'missed': num_missed}
def RNA_position_interval(interval, location_dict):
"""
makes mrna and pre-mrna peak_center figure
interval - single interval
location_dict = dict{gene_id : {strand : "+/-", regions : list((start,stop)
as_structure_dict - from build AS structure dict
will return distribution across entire region + just across specific region
Might be able to use my ribo-seq stuff for genic -> transcriptomic location conversion
this is based off as structure, which provides sequences ordered with first exon being the first exon on the gene,
not first in the chromosome (like gff does) THIS WILL NOT WORK WITH RAW GFF DATA
"""
#think about turning the location_dict into a gff file
#gets thickstart and stop
peak_center = (int(interval[6]) + int(interval[7])) / 2
try:
gene = interval.name.split("_")[0]
except:
#takes first gene if there are multiple overlapping
gene = interval.name.split(";")[0].split("_")[0]
if gene not in location_dict:
raise KeyError(gene + """ not in current region dict, ignoring cluster (not to worry, this error gets thrown if the peak isn't in the region being looked at, or you have your annotations wrong, double check you're using a supported genome or make a regions file yourself""")
if not interval.strand == location_dict[gene][0].strand:
raise ValueError("strands not the same, there is some issue with gene annotations")
total_length = float(sum(region.length for region in location_dict[gene]))
running_length = 0
for region_number, region in enumerate(location_dict[gene]):
length = float(region.length)
if int(region.start) <= peak_center <= int(region.stop):
if interval.strand == "+":
total_location = running_length + (peak_center - region.start)
total_fraction = np.round((total_location / total_length), 3)
individual_fraction = (peak_center - region.start) / length
elif interval.strand == "-":
total_location = running_length + (region.stop - peak_center)
total_fraction = total_location / total_length
individual_fraction = (region.stop - peak_center) / length
else:
raise ValueError("Strand not correct strand is %s" % interval.strand)
#probably not nessessary
if total_fraction < 0 or total_fraction > 1:
raise ValueError("total_fraction is bad: %f, gene %s, total_length: %s, total_location: %s" % (total_fraction,
gene,
total_length,
total_location))
return individual_fraction, total_fraction, region_number + 1, gene
running_length += length
#clusters fall outside of regions integrated
return None, None, None, None
def get_closest_exon_types(bedtool, as_structure_dict):
"""
For each peak gets type of exon nearest to peak (AS, CE ect...)
:param bedtool: Bedtool of peaks
:param as_structure_dict: AS Structure dict
:return:
"""
#get closest features to peaks
bedtool_list = []
for name, gene in as_structure_dict.items():
for exon, type in zip(gene['exons'].values(), gene['types'].values()):
start, stop = map(int, exon.split("-"))
bedtool_list.append([gene['chrom'], start, stop, name, 0, gene['strand'], type])
feature_tool = pybedtools.BedTool(bedtool_list).sort()
return Counter([interval[-1] for interval in bedtool.closest(feature_tool, s=True)])
#TODO Start small module for getting read densiites
def get_bam_coverage(bamfile):
"""
Given a bam file returns a properly covered htseq coverage file (this is slow)
"""
bam = Robust_BAM_Reader(bamfile)
coverage = HTSeq.GenomicArray("auto", typecode="i", stranded=True)
for read in bam:
if read.aligned:
for cigop in read.cigar:
if cigop.type != "M":
continue
coverage[cigop.ref_iv] += 1
return coverage
def get_bam_counts(bamfile):
"""
Given a bam file returns a coverage file with just count of the number of reads that start at a specific location
:param bamfile:
:return HTSeq coverage :
"""
bam = Robust_BAM_Reader(bamfile)
coverage = HTSeq.GenomicArray("auto", typecode="i")
for read in bam:
read.iv.length = 1
if read.aligned:
coverage[read.iv] += 1
return coverage
def bed_to_genomic_interval(bed):
for interval in bed:
yield HTSeq.GenomicInterval(str(interval.chrom), interval.start, interval.stop + 1, str(interval.strand))
def get_densities(intervals, coverage):
for interval in bed_to_genomic_interval(intervals):
density = np.fromiter(coverage[interval], dtype="i")
if interval.strand == "-":
density = density[::-1]
yield density
def adjust_width(interval, width=250):
interval.start -= width
interval.stop += width
return interval
def cluster_peaks(bedtool, coverage, k=16):
"""
Given a bedtool of peaks, positive and negative bigwig files
gets read densities around peaks, normalizes them and outputs clusters and dataframe
"""
#TODO change small peaks so they can automatically fall back on | |
import os
import numpy as np
from warnings import warn
from Utility import timer
import shutil
class BaseProperties:
def __init__(self, casename, casedir='.', filename_pre='', filename_sub='', ensemblefolder_name='Ensemble', result_folder='Result', timecols='infer', time_kw='ime', force_remerge=False,
debug=False, **kwargs):
self.casename, self.casedir = casename, casedir
self.case_fullpath = casedir + '/' + casename + '/'
self.filename_pre, self.filename_sub = filename_pre, filename_sub
# Check if result folder is made
self.result_dir = self.case_fullpath + result_folder + '/'
os.makedirs(self.result_dir, exist_ok=True)
self.ensemble_folderpath = self.case_fullpath + ensemblefolder_name + '/'
self.timecols = (timecols,) if isinstance(timecols, int) else timecols
self._mergeTimeDirectories(time_kw=time_kw, force_remerge=force_remerge, **kwargs)
self.times_all = self._readTimes(**kwargs)
self.data, self.filenames = {}, []
self.debug = debug
def _ensureTupleInput(self, input):
input_tuple = (input,) if isinstance(input, (str, np.ndarray, int)) else input
# If input[0] is '*' or 'all', get all file names
# input_tuple = os.listdir(self.ensemble_folderpath) if input_tuple[0] in ('*', 'all') else input_tuple
if input_tuple[0] in ('*', 'all'):
# Ignore hidden files
input_tuple = tuple([f for f in os.listdir(self.ensemble_folderpath) if not f.startswith('.')])
return input_tuple
def _readTimes(self, norepeat=True, **kwargs):
filenames = os.listdir(self.ensemble_folderpath)
# In case of file e.g. hLevelsCell that doesn't incl. times
try:
times_all = np.genfromtxt(self.ensemble_folderpath + '/' + filenames[0])[:, self.timecols[0]]
except IndexError:
times_all = np.genfromtxt(self.ensemble_folderpath + '/' + filenames[1])[:, self.timecols[0]]
if norepeat:
times_all = np.unique(times_all)
return times_all
def _mergeTimeDirectories(self, trim_overlaptime=True, time_kw='ime', force_remerge=False, excl_files=None):
def __numberStringToFloat(str):
return float(str)
# Check whether the directory is made
try:
os.makedirs(self.ensemble_folderpath)
except OSError:
# If folder not empty, abort
if os.listdir(self.ensemble_folderpath) and not force_remerge:
print('\n{0} files already exist'.format(self.ensemble_folderpath))
return
# Available time directories (excluding Ensemble and Result) and file names
timedirs = os.listdir(self.case_fullpath)[:-2]
# Sort the time directories
timedirs.sort(key=__numberStringToFloat)
filenames = os.listdir(self.case_fullpath + timedirs[0])
# In case excl_files is provided, remove it from filenames
if excl_files is not None:
excl_files = self._ensureTupleInput(excl_files)
for i in range(len(excl_files)):
try:
filenames.remove(excl_files[i])
except ValueError:
warn('\n' + self.casename + ' does not have ' + excl_files[i] + ' to exclude!', stacklevel=2)
pass
# Initialize ensemble files
file_ensembles = {}
for i in range(len(filenames)):
file_ensembles[filenames[i]] = open(self.ensemble_folderpath + filenames[i], "w")
if self.timecols == 'infer':
self.timecols = []
for filename in filenames:
with open(self.case_fullpath + timedirs[0] + '/' + filename, 'r') as file:
header = (file.readline()).split()
self.timecols.append(header.index(list(filter(lambda kw: time_kw in kw, header))[0]))
else:
self.timecols *= len(filenames)
# Go through time folders and append files to ensemble
# Excluding Ensemble folder
for i in range(len(timedirs)):
# If trim overlapped time and not in last time directory
if trim_overlaptime and i < len(timedirs) - 1:
knowntime_cols, times, itrim = {}, {}, {}
# Go through all time columns of each file in order
for j in range(len(self.timecols)):
# Retrieve list of time and trim index information for jth file in ith time directory
# After each retrieval, add this time column to known time column dictionary as key
# and corresponding file name as value
if str(self.timecols[j]) not in knowntime_cols.keys():
try:
times[filenames[j]] = np.genfromtxt(self.case_fullpath + timedirs[i] + '/' + filenames[j])[:, self.timecols[j]]
# In case the last line wasn't written properly,
# which means the simulation was probably aborted, discard the last line
except ValueError:
times[filenames[j]] = np.genfromtxt(self.case_fullpath + timedirs[i] + '/' + filenames[j], skip_footer = 1)[:, self.timecols[j]]
# Index at which trim should start for this file
itrim[filenames[j]] = np.searchsorted(times[filenames[j]], np.float_(timedirs[i + 1]))
# Add this time column to known time column list
knowntime_cols[str(self.timecols[j])] = filenames[j]
# If current time column already exists in remembered dictionary,
# then skip it and retrieve the file name the last time it had this number of time column
else:
times[filenames[j]] = times[knowntime_cols[str(self.timecols[j])]]
itrim[filenames[j]] = itrim[knowntime_cols[str(self.timecols[j])]]
# Go through each file in this time directory
for filename in filenames:
# If trim overlapped time and not last time directory and trim is indeed needed
if trim_overlaptime and i < len(timedirs) - 1 and itrim[filename] < (len(times[filename]) - 1):
with open(self.case_fullpath + timedirs[i] + '/' + filename, 'r') as file:
# Filter out empty lines before itrim indices can be mapped
lines = list(filter(None, (line.rstrip() for line in file)))
print('\nTrimming overlapped time and adding {0} from {1} to Ensemble...'.format(filename, timedirs[i]))
# Writelines support writing a 1D list, since lines is 2D,
# join each row with "\n"
# Note: the header of 2nd file onward will still be written in ensemble,
# just that when reading file into array using numpy, the headers should automatically be ignored
# since it starts with "#"
# Write the 1st line as empty new line so that the 1st line of lines is not on the same line as last line of file_ensembles
file_ensembles[filename].writelines("\n")
file_ensembles[filename].writelines("\n".join(lines[:itrim[filename] + 1]))
# Otherwise, append this file directly to Ensemble
else:
print('\nAdding {0} from {1} to Ensemble...'.format(filename, timedirs[i]))
# Again, write the 1st line as empty new line to avoid 1st line of next file being on the same line of old file
file_ensembles[filename].writelines("\n")
file_ensembles[filename].write(open(self.case_fullpath + timedirs[i] + '/' + filename).read())
print("\nMerged time directories for " + str(self.casename) + " files are stored at:\n " + str(self.ensemble_folderpath))
def _selectTimes(self, starttime=None, stoptime=None):
starttime = self.times_all[0] if starttime is None else starttime
stoptime = self.times_all[len(self.times_all)] if stoptime is None else stoptime
# Bisection left to find actual starting and ending time and their indices
(istart, istop) = np.searchsorted(self.times_all, (starttime, stoptime))
# If stoptime larger than any time, istop = len(times_all)
istop = min(istop, len(self.times_all) - 1)
starttime_real, stoptime_real = self.times_all[istart], self.times_all[istop]
times_selected = self.times_all[istart:istop + 1]
return times_selected, starttime_real, stoptime_real, istart, istop
def readPropertyData(self, filenames=('*',), skiprow=0, skipcol=0, skipfooter=0):
self.filenames = self._ensureTupleInput(filenames)
if isinstance(skiprow, int): skiprow = (skiprow,)*len(self.filenames)
if isinstance(skipcol, int): skipcol = (skipcol,)*len(self.filenames)
if isinstance(skipfooter, int): skipfooter = (skipfooter,)*len(self.filenames)
for i in range(len(self.filenames)):
# Data dictionary of specified property(s) of all times
self.data[self.filenames[i]] = \
np.genfromtxt(self.ensemble_folderpath + self.filename_pre + self.filenames[i] + self.filename_sub,
skip_footer=skipfooter[i])[skiprow[i]:, skipcol[i]:]
print('\n' + str(self.filenames) + ' read')
def calculatePropertyMean(self, axis=1, starttime=None, stoptime=None):
self.times_selected, _, _, istart, istop = self._selectTimes(starttime=starttime, stoptime=stoptime)
for i in range(len(self.filenames)):
self.data[self.filenames[i] + '_mean'] = np.mean(self.data[self.filenames[i]][istart:istop],
axis=axis)
print('\nTemporal average calculated for {} from {:.4f} s - {:.4f} s'.format(self.filenames, self.times_selected[0], self.times_selected[-1]))
def trimInvalidCharacters(self, filenames, invalid_chars):
filenames = self._ensureTupleInput(filenames)
if isinstance(invalid_chars, str): invalid_chars = (invalid_chars,)
for filename in filenames:
with open(self.ensemble_folderpath + filename, 'r') as f:
lst = [line.rstrip('\n \t') for line in f]
for invalid_char in invalid_chars:
lst = [string.replace(invalid_char, '') for string in lst]
with open(self.ensemble_folderpath + filename, "w") as f:
f.writelines('\n'.join(lst))
class BoundaryLayerProfiles(BaseProperties):
def __init__(self, casename, height_filename='hLevelsCell', bl_folder='ABL', **kwargs):
self.height_filename = height_filename
super(BoundaryLayerProfiles, self).__init__(casename=casename + '/' + bl_folder, timecols=0, excl_files=height_filename, **kwargs)
# Copy height_filename to Ensemble in order to use it later
time = os.listdir(self.case_fullpath)[0]
shutil.copy2(self.case_fullpath + time + '/' + height_filename, self.ensemble_folderpath)
def readPropertyData(self, filenames=('*',), **kwargs):
# Read height levels
self.hLvls = np.genfromtxt(self.ensemble_folderpath + self.height_filename)
# Override skipcol to suit inflow property files
# Columns to skip are 0: time; 1: time step
super(BoundaryLayerProfiles, self).readPropertyData(filenames=filenames, skipcol=2)
def calculatePropertyMean(self, starttime=None, stoptime=None, **kwargs):
# Override axis to suit inflow property files
super(BoundaryLayerProfiles, self).calculatePropertyMean(axis = 0, starttime = starttime, stoptime = stoptime, **kwargs)
class TurbineOutputs(BaseProperties):
def __init__(self, casename, datafolder='turbineOutput', global_quantities=('powerRotor', 'rotSpeed', 'thrust',
'torqueRotor',
'torqueGen', 'azimuth', 'nacYaw', 'pitch'), **kwargs):
self.global_quantities = global_quantities
super(TurbineOutputs, self).__init__(casename + '/' + datafolder, **kwargs)
self.nTurb, self.nBlade = 0, 0
@timer
def readPropertyData(self, filenames=('*',), skiprow=0, skipcol='infer', verbose=True, turbinfo=('infer',)):
filenames = self._ensureTupleInput(filenames)
global_quantities = (
'powerRotor', 'rotSpeed', 'thrust', 'torqueRotor', 'torqueGen', 'azimuth', 'nacYaw', 'pitch', 'powerGenerator')
if skipcol is 'infer':
skipcol = []
for file in filenames:
if file in global_quantities:
skipcol.append(3)
else:
skipcol.append(4)
super(TurbineOutputs, self).readPropertyData(filenames=filenames, skiprow=skiprow, skipcol=skipcol)
if turbinfo[0] is 'infer':
turbinfo = np.genfromtxt(self.ensemble_folderpath + self.filename_pre + 'Cl' + self.filename_sub)[skiprow:, :2]
# Number of turbines and blades
(self.nTurb, self.nBlade) = (int(np.max(turbinfo[:, 0]) + 1), int(np.max(turbinfo[:, 1]) + 1))
fileNamesOld, self.filenames = self.filenames, list(self.filenames)
for filename in fileNamesOld:
for i in range(self.nTurb):
if filename not in global_quantities:
for j in range(self.nBlade):
newFileName = filename + '_Turb' + str(i) + '_Bld' + str(j)
self.data[newFileName] = self.data[filename][(i*self.nBlade + j)::(self.nTurb*self.nBlade), :]
self.filenames.append(newFileName)
else:
newFileName = filename + '_Turb' + str(i)
self.data[newFileName] = self.data[filename][i::self.nTurb]
self.filenames.append(newFileName)
if verbose:
print('\n' + str(self.filenames) + ' | |
/ %s!',
server_id, server.pid, pid)
lock.release()
continue
if server_task:
if server.status == 'pending':
server.status = 'running'
else:
pycos.logger.warning('Invalid status %s for server %s', server.status, server.sid)
lock.release()
else:
lock.release()
# assert server_task is None
if server.status == 'running':
close_servers([server])
else:
pycos.logger.warning('Invalid status %s for server %s', server.status, server.sid)
spawn_closed = True
mp_q.close()
_dispycos_node_q.put({'msg': 'closed', 'auth': _dispycos_config['auth']})
exit(0)
def _dispycos_node():
if ((hasattr(os, 'setresuid') or hasattr(os, 'setreuid')) and os.getuid() != os.geteuid()):
_dispycos_config['suid'] = os.geteuid()
_dispycos_config['sgid'] = os.getegid()
if _dispycos_config['suid'] == 0:
print('\n WARNING: Python interpreter %s likely has suid set to 0 '
'\n (administrator privilege), which is dangerous.\n\n' %
sys.executable)
if _dispycos_config['sgid'] == 0:
print('\n WARNING: Python interpreter %s likely has sgid set to 0 '
'\n (administrator privilege), which is dangerous.\n\n' %
sys.executable)
os.setegid(os.getgid())
os.seteuid(os.getuid())
os.umask(0x007)
pycos.logger.info('Clients will run with uid %s and gid %s' %
(_dispycos_config['suid'], _dispycos_config['sgid']))
else:
_dispycos_config.pop('suid', None)
_dispycos_config.pop('sgid', None)
if not _dispycos_config['min_pulse_interval']:
_dispycos_config['min_pulse_interval'] = MinPulseInterval
if not _dispycos_config['max_pulse_interval']:
_dispycos_config['max_pulse_interval'] = MaxPulseInterval
if _dispycos_config['msg_timeout'] < 1:
raise Exception('msg_timeout must be at least 1')
if (_dispycos_config['min_pulse_interval'] and
_dispycos_config['min_pulse_interval'] < _dispycos_config['msg_timeout']):
raise Exception('min_pulse_interval must be at least msg_timeout')
if (_dispycos_config['max_pulse_interval'] and _dispycos_config['min_pulse_interval'] and
_dispycos_config['max_pulse_interval'] < _dispycos_config['min_pulse_interval']):
raise Exception('max_pulse_interval must be at least min_pulse_interval')
if _dispycos_config['zombie_period']:
if _dispycos_config['zombie_period'] < _dispycos_config['min_pulse_interval']:
raise Exception('zombie_period must be at least min_pulse_interval')
else:
_dispycos_config['zombie_period'] = 0
num_cpus = multiprocessing.cpu_count()
if _dispycos_config['cpus'] > 0:
if _dispycos_config['cpus'] > num_cpus:
raise Exception('CPU count must be <= %s' % num_cpus)
num_cpus = _dispycos_config['cpus']
elif _dispycos_config['cpus'] < 0:
if -_dispycos_config['cpus'] >= num_cpus:
raise Exception('CPU count must be > -%s' % num_cpus)
num_cpus += _dispycos_config['cpus']
del _dispycos_config['cpus']
node_ports = set()
for node_port in _dispycos_config.pop('node_ports', []):
node_port = node_port.split('-')
if len(node_port) == 1:
node_ports.add(int(node_port[0]))
elif len(node_port) == 2:
node_port = (int(node_port[0]), min(int(node_port[1]),
int(node_port[0]) + num_cpus - len(node_ports)))
node_ports = node_ports.union(range(node_port[0], node_port[1] + 1))
else:
raise Exception('Invalid TCP port range "%s"' % str(node_port))
if node_ports:
node_ports = sorted(node_ports)
node_ports = node_ports[:num_cpus + 1]
else:
node_ports = [eval(pycos.config.DispycosNodePort)]
for node_port in range(node_ports[-1] + 1, node_ports[-1] + 1 + num_cpus - len(node_ports) + 1):
if node_ports[-1]:
node_ports.append(node_port)
else:
node_ports.append(0)
del node_port
_dispycos_config['udp_port'] = node_ports[0]
peer = None
for _dispycos_id in range(len(_dispycos_config['peers'])):
peer = _dispycos_config['peers'][_dispycos_id].rsplit(':', 1)
if len(peer) != 2:
print('peer "%s" is not valid' % _dispycos_config['peers'][_dispycos_id])
exit(1)
try:
peer = pycos.Location(peer[0], peer[1])
except Exception:
print('peer "%s" is not valid' % _dispycos_config['peers'][_dispycos_id])
exit(1)
_dispycos_config['peers'][_dispycos_id] = pycos.serialize(peer)
del peer
node_name = _dispycos_config['name']
if not node_name:
node_name = socket.gethostname()
if not node_name:
node_name = 'dispycos_server'
daemon = _dispycos_config.pop('daemon', False)
if not daemon:
try:
if os.getpgrp() != os.tcgetpgrp(sys.stdin.fileno()):
daemon = True
except Exception:
pass
_dispycos_config['discover_peers'] = False
class Struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __setattr__(self, name, value):
if hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError('Invalid attribute "%s"' % name)
service_times = Struct(start=None, stop=None, end=None)
# time at start of day
_dispycos_var = time.localtime()
_dispycos_var = (int(time.time()) - (_dispycos_var.tm_hour * 3600) -
(_dispycos_var.tm_min * 60) - _dispycos_var.tm_sec)
if _dispycos_config['service_start']:
service_times.start = time.strptime(_dispycos_config.pop('service_start'), '%H:%M')
service_times.start = (_dispycos_var + (service_times.start.tm_hour * 3600) +
(service_times.start.tm_min * 60))
if _dispycos_config['service_stop']:
service_times.stop = time.strptime(_dispycos_config.pop('service_stop'), '%H:%M')
service_times.stop = (_dispycos_var + (service_times.stop.tm_hour * 3600) +
(service_times.stop.tm_min * 60))
if _dispycos_config['service_end']:
service_times.end = time.strptime(_dispycos_config.pop('service_end'), '%H:%M')
service_times.end = (_dispycos_var + (service_times.end.tm_hour * 3600) +
(service_times.end.tm_min * 60))
if (service_times.start or service_times.stop or service_times.end):
if not service_times.start:
service_times.start = int(time.time())
if service_times.stop:
if service_times.start >= service_times.stop:
raise Exception('"service_start" must be before "service_stop"')
if service_times.end:
if service_times.start >= service_times.end:
raise Exception('"service_start" must be before "service_end"')
if service_times.stop and service_times.stop >= service_times.end:
raise Exception('"service_stop" must be before "service_end"')
if not service_times.stop and not service_times.end:
raise Exception('"service_stop" or "service_end" must also be given')
if _dispycos_config['max_file_size']:
_dispycos_var = re.match(r'(\d+)([kKmMgGtT]?)', _dispycos_config['max_file_size'])
if (not _dispycos_var or
len(_dispycos_var.group(0)) != len(_dispycos_config['max_file_size'])):
raise Exception('Invalid max_file_size option')
_dispycos_config['max_file_size'] = int(_dispycos_var.group(1))
_dispycos_var = _dispycos_var.group(2)
if _dispycos_var:
_dispycos_config['max_file_size'] *= 1024**({'k': 1, 'm': 2, 'g': 3,
't': 4}[_dispycos_var.lower()])
else:
_dispycos_config['max_file_size'] = 0
if _dispycos_config['certfile']:
_dispycos_config['certfile'] = os.path.abspath(_dispycos_config['certfile'])
else:
_dispycos_config['certfile'] = None
if _dispycos_config['keyfile']:
_dispycos_config['keyfile'] = os.path.abspath(_dispycos_config['keyfile'])
else:
_dispycos_config['keyfile'] = None
node_auth = hashlib.sha1(os.urandom(20)).hexdigest()
node_servers = [None] * (num_cpus + 1)
if _dispycos_config['dest_path']:
dispycos_path = _dispycos_config['dest_path']
else:
import tempfile
dispycos_path = os.path.join(os.sep, tempfile.gettempdir(), 'pycos')
dispycos_path = os.path.join(dispycos_path, 'dispycos', 'node')
_dispycos_config['dest_path'] = dispycos_path
if not os.path.isdir(dispycos_path):
try:
os.makedirs(dispycos_path)
except Exception:
print('Could not create directory "%s"' % dispycos_path)
exit(1)
if os.name == 'nt':
proc_signals = [signal.CTRL_C_EVENT, signal.CTRL_C_EVENT, signal.SIGTERM]
else:
proc_signals = [signal.SIGINT, 0, signal.SIGKILL]
def kill_proc(pid, ppid, kill):
if pid <= 0:
return 0
pycos.logger.info('Killing process with PID %s', pid)
psproc = None
if psutil:
try:
psproc = psutil.Process(pid)
assert psproc.is_running()
if psproc.ppid() not in [ppid, 1]:
pycos.logger.warning('PPID of PID %s is different from expected %s: %s',
pid, ppid, psproc.ppid())
return -1
assert any(arg.endswith('dispycosnode.py') for arg in psproc.cmdline())
psproc.send_signal(proc_signals[0])
except psutil.NoSuchProcess:
return 0
except Exception:
pycos.logger.debug(traceback.format_exc())
return -1
if not psproc:
try:
os.kill(pid, proc_signals[0])
except OSError:
return 0
except Exception:
# TODO: handle failures
pycos.logger.debug(traceback.format_exc())
for signum in range(1, len(proc_signals) if kill else (len(proc_signals) - 1)):
for i in range(20):
if psproc:
try:
psproc.wait(0.2)
except Exception:
pass
if not psproc.is_running():
return 0
if i == 15:
try:
if signum == 1:
psproc.terminate()
else:
psproc.kill()
except psutil.NoSuchProcess:
return 0
except Exception:
pycos.logger.debug(traceback.format_exc())
else:
time.sleep(0.2)
if proc_signals[signum] == 0 or i == 15:
try:
os.kill(pid, proc_signals[signum])
except OSError:
return 0
pycos.logger.debug('Could not terminate PID %s', pid)
return -1
for _dispycos_id in range(0, num_cpus + 1):
_dispycos_var = os.path.join(dispycos_path, '..', 'server-%d.pkl' % _dispycos_id)
node_servers[_dispycos_id] = Struct(
id=_dispycos_id, pid=0, task=None, name='%s_server-%s' % (node_name, _dispycos_id),
port=node_ports[_dispycos_id], restart=False, pid_file=_dispycos_var, done=pycos.Event(),
busy_time=multiprocessing.RawValue('L', 0)
)
node_servers[0].name = None
client_info = Struct(auth=None, scheduler=None, client_location=None, cpus_reserved=0,
spawn_mpproc=None, interval=_dispycos_config['max_pulse_interval'],
zombie_period=0, restart_servers=False, served=0, node_q=None, spawn_q=None)
if _dispycos_config['clean']:
if os.path.isfile(node_servers[0].pid_file):
with open(node_servers[0].pid_file, 'rb') as fd:
pid_info = pickle.load(fd)
for (pid, ppid) in [(pid_info['spid'], pid_info['pid']),
(pid_info['pid'], pid_info['ppid'])]:
_dispycos_var = kill_proc(pid, ppid, kill=False)
if _dispycos_var:
for _dispycos_id in range(20):
if not os.path.isfile(node_servers[0].pid_file):
_dispycos_var = 0
break
time.sleep(0.2)
else:
_dispycos_var = kill_proc(pid, ppid, kill=True)
if (pid == pid_info['pid'] and os.path.exists(node_servers[0].pid_file) and
_dispycos_var == 0):
try:
os.remove(node_servers[0].pid_file)
except Exception:
pycos.logger.debug(traceback.format_exc())
pass
if os.path.exists(node_servers[0].pid_file):
print('\n Another dispycosnode seems to be running;\n'
' ensure no dispycosnode and servers are running and\n'
' remove *.pkl files in %s"\n' % (os.path.join(dispycos_path, '..')))
exit(1)
dispycos_pid = os.getpid()
if hasattr(os, 'getppid'):
dispycos_ppid = os.getppid()
else:
dispycos_ppid = 1
server_config = {}
for _dispycos_var in ['udp_port', 'tcp_port', 'host', 'ext_host', 'name',
'discover_peers', 'secret', 'certfile', 'keyfile', 'dest_path',
'max_file_size', 'ipv4_udp_multicast']:
server_config[_dispycos_var] = _dispycos_config.get(_dispycos_var, None)
server_config['name'] = '%s_dispycosnode' % node_name
server_config['tcp_port'] = node_ports[0]
dispycos_scheduler = pycos.Pycos(**server_config)
dispycos_scheduler.ignore_peers = True
for _dispycos_var in dispycos_scheduler.peers():
pycos.Task(dispycos_scheduler.close_peer, _dispycos_var)
if dispycos_path != dispycos_scheduler.dest_path:
print('\n Destination paths inconsistent: "%s" != "%s"\n' %
(dispycos_path, dispycos_scheduler.dest_path))
exit(1)
if 'suid' in _dispycos_config:
os.chown(dispycos_path, -1, _dispycos_config['sgid'])
os.chmod(dispycos_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_ISGID)
os.chmod(os.path.join(dispycos_path, '..'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IXGRP)
os.chdir(dispycos_path)
with open(node_servers[0].pid_file, 'wb') as fd:
# TODO: store and check crate_time with psutil
pickle.dump({'pid': dispycos_pid, 'ppid': dispycos_ppid, 'spid': -1}, fd)
del _dispycos_id
def node_proc(task=None):
from pycos.dispycos import DispycosNodeAvailInfo, DispycosNodeInfo
task.register('dispycos_node')
ping_interval = _dispycos_config.pop('ping_interval')
msg_timeout = _dispycos_config['msg_timeout']
disk_path = dispycos_scheduler.dest_path
_dispycos_config['node_location'] = pycos.serialize(task.location)
def close_server(server, pid, terminate=False, restart=False, task=None):
if not server.task or server.pid != pid:
raise StopIteration
if (yield server.task.deliver({'req': 'terminate' if terminate else 'quit',
'pid': pid, 'auth': client_info.auth})) == 1:
if not terminate:
raise StopIteration
yield server.done.wait(timeout=10)
if server.done.is_set():
raise StopIteration
yield server.done.wait(timeout=0.5)
if not client_info.spawn_q or not server.task:
raise StopIteration
client_info.spawn_q.put({'msg': 'close_server', 'auth': client_info.auth,
'sid': server.id, 'pid': pid, 'status': 0, 'terminate': True})
if client_info.scheduler:
msg = {'status': Scheduler.ServerDisconnected, 'auth': client_info.auth,
'location': server.task.location, 'pid': pid}
client_info.scheduler.send(msg)
yield dispycos_scheduler.close_peer(server.task.location, timeout=2)
msg = {'req': 'server_task', 'auth': client_info.auth, 'server_id': server.id,
'task': None, 'pid': pid, 'restart': restart}
server_task_msg(msg)
def close_spawn_proc():
proc = client_info.spawn_mpproc
cur_auth = client_info.auth
if not proc:
return 0
if not proc.is_alive():
client_info.spawn_mpproc = None
return 0
client_info.spawn_q.put({'msg': 'quit', 'auth': client_info.auth})
for j in range(10):
try:
msg = client_info.node_q.get(True, 2)
except Exception:
pycos.logger.debug(traceback.format_exc())
continue
if client_info.auth != cur_auth:
return 0
if (isinstance(msg, dict) and msg.get('msg', None) == 'closed' and
msg.get('auth', None) == client_info.auth):
proc.join(2)
if | |
_callback=self.callback,
silent=self.silent)
elif d_type == 'release':
ioc_clean.IOCClean(silent=self.silent).clean_releases()
ioc_common.logit(
{
'level': 'INFO',
'message': 'All iocage RELEASE and jail datasets have been'
' destroyed.'
},
_callback=self.callback,
silent=self.silent)
elif d_type == 'template':
ioc_clean.IOCClean(silent=self.silent).clean_templates()
ioc_common.logit(
{
'level': 'INFO',
'message':
'All iocage template datasets have been destroyed.'
},
_callback=self.callback,
silent=self.silent)
elif d_type == 'images':
ioc_clean.IOCClean(silent=self.silent).clean_images()
ioc_common.logit(
{
'level': 'INFO',
'message': 'The iocage images dataset has been destroyed.'
},
_callback=self.callback,
silent=self.silent)
elif d_type == 'debug':
ioc_clean.IOCClean(silent=self.silent).clean_debug()
ioc_common.logit(
{
'level': 'INFO',
'message': 'All iocage debugs have been destroyed.'
},
_callback=self.callback,
silent=self.silent)
else:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": "Please specify a dataset type to clean!"
},
_callback=self.callback,
silent=self.silent)
def create(self,
release,
props,
count=0,
pkglist=None,
template=False,
short=False,
_uuid=None,
basejail=False,
thickjail=False,
empty=False,
clone=None,
skip_batch=False,
thickconfig=False,
clone_basejail=False):
"""Creates the jail dataset"""
count = 0 if count == 1 and not skip_batch else count
if short and _uuid:
_uuid = _uuid[:8]
if len(_uuid) != 8:
ioc_common.logit(
{
"level":
"EXCEPTION",
"message":
"Need a minimum of 8 characters to use --short"
" (-s) and --uuid (-u) together!"
},
_callback=self.callback,
silent=self.silent)
if not template and not release and not empty and not clone:
ioc_common.logit(
{
"level":
"EXCEPTION",
"message":
"Must supply either --template (-t) or"
" --release (-r)!"
},
_callback=self.callback,
silent=self.silent)
if release is not None:
if os.path.isdir(
f'{self.iocroot}/releases/{release.upper()}'
) and not template and not empty and not clone:
release = release.upper()
if not os.path.isdir(
f'{self.iocroot}/releases/{release}'
) and not template and not empty and not clone:
freebsd_version = ioc_common.checkoutput(["freebsd-version"])
if "HBSD" in freebsd_version:
hardened = True
else:
hardened = False
arch = os.uname()[4]
if arch == 'arm64':
files = ['MANIFEST', 'base.txz', 'src.txz']
else:
files = ['MANIFEST', 'base.txz', 'lib32.txz', 'src.txz']
try:
if int(release.rsplit('-')[0].rsplit('.')[0]) < 12:
# doc.txz has relevance here still
files.append('doc.txz')
except (AttributeError, ValueError):
# Non-standard naming scheme, assuming it's current
pass
ioc_fetch.IOCFetch(
release,
hardened=hardened,
files=files,
silent=self.silent
).fetch_release()
if clone:
clone_uuid, path = self.__check_jail_existence__()
if 'templates' in path:
template = True
status, _ = self.list("jid", uuid=clone_uuid)
if status:
ioc_common.logit(
{
"level":
"EXCEPTION",
"message":
f"Jail: {self.jail} must not be running to be"
" cloned!"
},
_callback=self.callback,
silent=self.silent)
release = clone_uuid
clone = self.jail
try:
if count > 1 and not skip_batch:
for j in range(1, count + 1):
self.create(
release,
props,
j,
pkglist=pkglist,
template=template,
short=short,
_uuid=f"{_uuid}_{j}" if _uuid else None,
basejail=basejail,
thickjail=thickjail,
empty=empty,
clone=clone,
skip_batch=True,
thickconfig=thickconfig,
clone_basejail=clone_basejail)
else:
ioc_create.IOCCreate(
release,
props,
count,
pkglist,
silent=self.silent,
template=template,
short=short,
basejail=basejail,
thickjail=thickjail,
empty=empty,
uuid=_uuid,
clone=clone,
thickconfig=thickconfig,
clone_basejail=clone_basejail
).create_jail()
except BaseException:
if clone:
su.run(
[
'zfs', 'destroy', '-r',
f'{self.pool}/iocage/jails/{clone}@{_uuid}'
]
)
raise
return False, None
def destroy_release(self, download=False):
"""Destroy supplied RELEASE and the download dataset if asked"""
path = f"{self.pool}/iocage/releases/{self.jail}"
release = Release(self.jail)
# Let's make sure the release exists before we try to destroy it
if not release:
ioc_common.logit({
'level': 'EXCEPTION',
'message': f'Release: {self.jail} not found!'
})
ioc_common.logit(
{
"level": "INFO",
"message": f"Destroying RELEASE dataset: {self.jail}"
},
_callback=self.callback,
silent=self.silent)
ioc_destroy.IOCDestroy().__destroy_parse_datasets__(path, stop=False)
if download:
path = f"{self.pool}/iocage/download/{self.jail}"
ioc_common.logit(
{
"level": "INFO",
"message":
f"Destroying RELEASE download dataset: {self.jail}"
},
_callback=self.callback,
silent=self.silent)
ioc_destroy.IOCDestroy().__destroy_parse_datasets__(path,
stop=False)
def destroy_jail(self, force=False):
"""
Destroys the supplied jail, to reduce perfomance hit,
call IOCage with skip_jails=True
"""
try:
self.jails = self.list("uuid")
except (RuntimeError, SystemExit) as err:
err = str(err)
if "Configuration is missing" in err:
uuid = err.split()[5]
path = f"{self.pool}/iocage/jails/{uuid}"
if uuid == self.jail:
ioc_destroy.IOCDestroy().__destroy_parse_datasets__(
path, stop=False)
ioc_common.logit(
{
"level": "INFO",
"message": f"{uuid} destroyed"
},
_callback=self.callback,
silent=self.silent)
return
else:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": err
},
_callback=self.callback,
silent=self.silent)
except FileNotFoundError as err:
# Jail is lacking a configuration, time to nuke it from orbit.
uuid = str(err).rsplit("/")[-2]
path = f"{self.pool}/iocage/jails/{uuid}"
if uuid == self.jail:
ioc_destroy.IOCDestroy().__destroy_parse_datasets__(path)
return
else:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": err
},
_callback=self.callback,
silent=self.silent)
uuid, path = self.__check_jail_existence__()
status, _ = self.list("jid", uuid=uuid)
if status:
if not force:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": (f"Jail {uuid} is still running, "
f"please stop the jail first "
f"or destroy it with -f")
},
_callback=self.callback,
silent=self.silent)
else:
ioc_common.logit(
{
"level": "INFO",
"message": f"Stopping {uuid}"
},
_callback=self.callback,
silent=self.silent)
ioc_common.logit(
{
"level": "INFO",
"message": f"Destroying {uuid}"
},
_callback=self.callback,
silent=self.silent)
ioc_destroy.IOCDestroy().destroy_jail(path)
def df(self):
"""Returns a list containing the resource usage of all jails"""
jail_list = []
for jail, path in self.jails.items():
conf = ioc_json.IOCJson(path).json_get_value('all')
mountpoint = f"{self.pool}/iocage/jails/{jail}"
template = conf["type"]
if template == "template":
mountpoint = f"{self.pool}/iocage/templates/{jail}"
ds = self.zfs.get_dataset(mountpoint)
zconf = ds.properties
compressratio = zconf["compressratio"].value
reservation = zconf["reservation"].value
quota = zconf["quota"].value
used = zconf["used"].value
available = zconf["available"].value
jail_list.append(
[jail, compressratio, reservation, quota, used, available])
return jail_list
def exec_all(
self, command, host_user='root', jail_user=None, console=False,
start_jail=False, interactive=False, unjailed=False, msg_return=False
):
"""Runs exec for all jails"""
self._all = False
for jail in self.jails:
self.jail = jail
self.exec(
command, host_user, jail_user, console, start_jail,
interactive, unjailed, msg_return
)
def exec(
self, command, host_user='root', jail_user=None, console=False,
start_jail=False, interactive=False, unjailed=False, msg_return=False
):
"""Executes a command in the jail as the supplied users."""
if self._all:
self.exec_all(
command, host_user, jail_user, console, start_jail,
interactive, unjailed, msg_return
)
return
pkg = unjailed
if host_user and jail_user is not None:
ioc_common.logit(
{
'level': 'EXCEPTION',
'message': 'Please only specify either host_user or'
' jail_user, not both!'
},
_callback=self.callback,
silent=self.silent)
uuid, path = self.__check_jail_existence__()
exec_clean = self.get('exec_clean')
if exec_clean:
env_path = '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:' \
'/usr/local/bin:/root/bin'
env_lang = os.environ.get('LANG', 'en_US.UTF-8')
su_env = {
'PATH': env_path,
'PWD': '/',
'HOME': '/',
'TERM': 'xterm-256color',
'LANG': env_lang,
'LC_ALL': env_lang
}
else:
su_env = os.environ.copy()
status, jid = self.list("jid", uuid=uuid)
if not status and not start_jail:
if not ioc_common.INTERACTIVE:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": f'{self.jail} is not running! Please supply'
' start_jail=True or start the jail'
},
_callback=self.callback,
silent=self.silent)
else:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": f'{self.jail} is not running! Please supply'
' --force (-f) or start the jail'
},
_callback=self.callback,
silent=self.silent)
elif not status:
self.start()
status, jid = self.list("jid", uuid=uuid)
if pkg:
ip4_addr = self.get("ip4_addr")
ip6_addr = self.get("ip6_addr")
dhcp = self.get("dhcp")
nat = self.get('nat')
if (
ip4_addr == ip6_addr == "none" and not dhcp and not nat
):
ioc_common.logit(
{
"level":
"EXCEPTION",
"message":
"The jail requires an IP address before you "
"can use pkg. Set one and restart the jail."
},
_callback=self.callback,
silent=self.silent)
command = ["pkg", "-j", jid] + list(command)
if console:
login_flags = self.get('login_flags').split()
console_cmd = ['login', '-p'] + login_flags
try:
ioc_exec.InteractiveExec(console_cmd, path, uuid=uuid)
except BaseException as e:
ioc_common.logit(
{
'level': 'ERROR',
'message': 'Console failed!\nThe cause could be bad '
f'permissions for {path}/root/usr/lib.'
},
_callback=self.callback,
silent=False
)
raise e
return
if interactive:
ioc_exec.InteractiveExec(
command,
path,
uuid=uuid,
host_user=host_user,
jail_user=jail_user,
skip=True
)
return
try:
with ioc_exec.IOCExec(
command,
path,
uuid=uuid,
host_user=host_user,
jail_user=jail_user,
unjailed=pkg,
su_env=su_env
) as _exec:
output = ioc_common.consume_and_log(
_exec
)
if msg_return:
return output['stdout']
for line in output['stdout']:
ioc_common.logit(
{
"level": "INFO",
"message": line
},
_callback=self.callback,
silent=self.silent)
except ioc_exceptions.CommandFailed as e:
msgs = [_msg.decode().rstrip() for _msg in e.message]
if msgs:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": '\n'.join(msgs)
},
_callback=self.callback,
silent=self.silent)
else:
ioc_common.logit(
{
"level": "EXCEPTION",
"message": f'Command: {command} failed!'
},
_callback=self.callback,
silent=self.silent)
def export(self, compression_algo='zip'):
"""Will export a jail"""
uuid, path = self.__check_jail_existence__()
status, _ = self.list("jid", uuid=uuid)
if status:
ioc_common.logit(
{
"level":
"EXCEPTION",
"message":
f"{uuid} is running, stop the jail before"
" exporting!"
},
_callback=self.callback,
silent=self.silent)
ioc_image.IOCImage().export_jail(
uuid, path, compression_algo=compression_algo
)
def fetch(self, **kwargs):
"""Fetches a release or plugin."""
release = kwargs.pop("release", None)
name = kwargs.pop("name", None)
props = kwargs.pop("props", ())
plugins = kwargs.pop("plugins", False)
plugin_name = kwargs.pop("plugin_name", None)
count = kwargs.pop("count", 1)
accept = kwargs.pop("accept", False)
_list = kwargs.pop("list", False)
remote = kwargs.pop("remote", False)
http = kwargs.get("http", True)
hardened = kwargs.get("hardened", False)
header = kwargs.pop("header", True)
_long = kwargs.pop("_long", False)
official = kwargs.pop("official", False)
branch = kwargs.pop("branch", None)
keep_jail_on_failure = kwargs.pop("keep_jail_on_failure", False)
thick_config = kwargs.pop("thickconfig", False)
freebsd_version = ioc_common.checkoutput(["freebsd-version"])
arch = os.uname()[4]
if not _list:
if not kwargs.get('files', None):
if arch == 'arm64':
kwargs['files'] = ['MANIFEST', 'base.txz', 'src.txz']
else:
kwargs['files'] = ['MANIFEST', 'base.txz', 'lib32.txz',
'src.txz']
try:
if int(release.rsplit('-')[0].rsplit('.')[0]) < 12:
# doc.txz has relevance here still
kwargs['files'].append('doc.txz')
except (AttributeError, ValueError):
# Non-standard naming scheme, assuming it's current
pass
if "HBSD" in freebsd_version:
if kwargs["server"] == "download.freebsd.org":
kwargs["hardened"] = True
else:
kwargs["hardened"] | |
0 7 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 3 0 0 0 0 0 2 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 3 0 0 0]]
Output:
[[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 7 0 0 0 0 0 0 0 0 0 0 0]
[0 0 7 1 7 0 0 0 0 0 0 0 0 0 0]
[0 0 0 7 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 3 2 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 3 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]
Colour Encoding:
Black = 0, Dark Blue = 1, Red =2 , Green = 3 , Yellow = 4 , Grey = 5 , Pink = 6 , Orange = 7 , Sky Blue = 8 , Brown = 9
Algorithm:
The description of the task is, there would be any different colours(0-9) in the given matrix, and some colour would be center colour (or one time occurence in the matrix). \
For the center color, there may be another colour in the same row and/or column (its occurence would be more than one), so lets call that colour as pair colour. \
For every center colour, the respective pair colours should move towards the centre and should be placed to neighbouring indices of the its center colour, \
but the direction of the pair colour should be maintained, if the pair color was on right side of the center colour then the pair colour should be placed right side neighbouring indices of the center colour and respectively other positions.
Implementation:
First all the unique colours in the matrix are fetched except black(0), iterating through each colour and find their indices if the length of the indices is equal to one then it is a center colour, if the lenghth of the indices is more than one then it is pair color. \
Filtering through center colours and fetch the whole colours again and filter all the pair colours (if length of the indices is more than one) for comparasion. \
Each center colour is compared with pair colours and finding whether the pair colour exists on the same row or same column, if the pair colour exists in the same row or same column, flip the pair colour to black and then check which side of the center color it exists using source matrix(x) . \
If the pair colour row index is more than the center colour row index, then the position is bottom.
If the pair colour row index is less than the center colour row index, then the position is top.
If the pair colour column index is more than the center colour column index, then the position is right.
If the pair colour column index is less than the center colour column index, then the position is left.
After finding the position, from the center colour either column indices or row indices are adjusted to +1 or -1 based on the above if conditions and flipped it the respective pair colour.
Results:
All the 3 train test cases and 1 testing test cases passed
"""
assert type(x) == np.ndarray
x_copy = x.copy()
unique = np.unique(x)[1:] # all the unique colours are fetched from the matrix
for center in unique: # iterating through each colour
row, column = np.where(x == center) # fetch the indices of the colour
# focusing on the center point
if len(row) == 1 and len(column) == 1: # if the length of the indices is equal to one, then it is a center colour
for pair in unique: # parsing the colours again to compare and find its respective pair colour
p_row, p_column = np.where(x == pair) # fetch the indices of the color
# Focusing on the pair points
if center != pair and len(p_row) != 1 and len(p_column) != 1: # filter out if the center colour and pair colour are same, then filter through the colours whose length of indcies are more than one. ie., pair colour
for p_r, p_c in zip(p_row,p_column): # since it is pair colour, above np.where would return more than one row and column indcies, i.e., parse through each pair colour.
if p_r == row: # check whether the pair colour is in same row for the respective center colour and then check for which position or side
x_copy[p_r][p_c] = 0 # flip the actual cell to black
if p_c < column: # check if pair colour column index is less than center colour column index
x_copy[p_r][column-1] = pair # if yes, then the position/side of the pair colour is left so subtracting center colour column index with -1 and flip the colour of the cell to respective iteration of the colour(pair)
elif p_c > column: # extra check inorder to aviod unnecesary matrix alteration, check whether the pair colour column index is more than the center colour column index
x_copy[p_r][column+1] = pair # if yes, then the position/side of the pair colour is right so adding center colour column index with +1 and flip the colour of the cell to respective iteration of the colour(pair)
if p_c == column: # check if the pair colour is in the same column
x_copy[p_r][p_c] = 0 # flip the actual cell to black
if p_r < row: # check if pair colour row index is less than center colour row index
x_copy[row[0]-1][p_c] = pair # if yes, then the position/side of the pair colour is top so subtracting center colour row index with -1 and flip the colour of the cell to respective iteration of the colour(pair)
elif p_r > row: # extra check inorder to aviod unnecesary matrix alteration, check whether the pair colour row index is more than the center colour row index
x_copy[row[0]+1][p_c] = pair # if yes, then the position/side of the pair colour is bottom so adding center colour row index with +1 and flip the colour of the cell to respective iteration of the colour(pair)
return x_copy
################################################################################################################################
def solve_a65b410d(x):
"""
Task Description:
Input:
[[0 0 0 0 0 0 0]
[0 0 0 0 0 0 0]
[0 0 0 0 0 0 0]
[2 2 0 0 0 0 | |
<gh_stars>1-10
# pylint: disable=too-many-lines
from collections.abc import Sequence
from datetime import datetime
from enum import auto, Flag
from typing import Optional, List, Tuple
from dataclasses import dataclass, field
from triple_agent.classes.action_tests import ActionTest
from triple_agent.classes.books import Books
from triple_agent.classes.characters import Characters, CHARACTERS_TO_STRING
from triple_agent.classes.missions import Missions
from triple_agent.classes.roles import Roles
from triple_agent.classes.ordered_enum import ReverseOrderedFlag
class TimelineCoherency(Flag):
Coherent = 0
NoTimeline = auto()
TimeRewind = auto()
BookMissingColor = auto()
NoGameStart = auto()
NoGameEnding = auto()
StartClockMismatch = auto()
PickedMissionsMismatch = auto()
CompletedMissionsMismatch = auto()
SelectedMissionsMismatch = auto()
GuestCountMismatch = auto()
SpyNotCastInBeginning = auto()
CharacterNotAssignedRole = auto()
RoleWithNoCharacter = auto()
ElapsedTimeMissing = auto()
# using ReverseOrderedFlag gives a deterministic sort to
# TimelineCategory, even if it doesn't make sense to sort these
# things.
class TimelineCategory(ReverseOrderedFlag):
NoCategory = 0
ActionTest = auto()
ActionTriggered = auto()
MissionPartial = auto()
SniperLights = auto()
MissionComplete = auto()
MissionCountdown = auto()
GameEnd = auto()
GameStart = auto()
TimeAdd = auto()
Cast = auto()
# selected means that the sniper can see it as a possible mission
MissionSelected = auto()
# enabled means that the spy can complete it, this will be smaller than
# selected in pick modes.
MissionEnabled = auto()
SniperShot = auto()
# Modifiers
BananaBread = auto()
# FP Objects
Briefcase = auto()
Statues = auto()
Books = auto()
Drinks = auto()
# Modifiers cont.
Conversation = auto()
Watch = auto()
Overtime = auto()
def serialize(self):
return [cat.name for cat in TimelineCategory if cat & self]
EVENT_IMAGE_HASH_DICT = {
"b38d6b3162ecee914e68191eba678139": "got cupcake from waiter.",
"5a109148b0321fa0458133f8a1ab3ab3": "waiter stopped offering drink.",
"7817b66ae1b7e67f096667f3919a135a": "waiter stopped offering cupcake.",
"5b399a954b788c11678efb89e55587fd": "spy leaves conversation.",
"0ad1e5c65ab347f5908224802c504270": "spy enters conversation.",
"3230377406adffef4544607df49fb432": "spy joined conversation with double agent.",
"0faaca34326fffbbfde957989958fd7d": "marked book.",
"62e3e62b1c77944948dfdda56666446a": "guest list purloined.",
"1a8cdd526ca2256c8444c42aee40ece3": "action triggered: contact double agent",
"3a5271e6847abcd01e7eee30b9f232cb": "real banana bread started.",
"2d8b1b30edb6d62a27f953e0c79dded9": "action test red: contact double agent",
"649e5e3e7ad3f714d74410361f3ff50e": "banana bread uttered.",
"226a718a5e231ffa2c133531639b4439": "double agent contacted.",
"23f4c27de9985f4bd254e5aee4caba1e": "45 seconds added to match.",
"d26a40c458fca862d893e3ce71c6ec1b": "spy left conversation with double agent.",
"1570134cb95670f10dd4a1570c6aed8a": "spy picks up briefcase.",
"e5341af159fc7288343a5a937c8fccce": "picked up fingerprintable briefcase.",
"d26597036c74e800f5e4f39cff214492": "action triggered: fingerprint ambassador",
"b053b28d82e5ff6228658ee890605166": "started fingerprinting briefcase.",
"909a216af5765cb4dfd217fb04860ed2": "fingerprinted briefcase.",
"aec62ede9621d7bd65d2a5a23eabf532": "spy returns briefcase.",
"ed3cddc39e5fac4372d65d0b2f3303fd": "game started.",
"7a1416528959d1213aca828389ca462f": "marked less suspicious.",
"32a02ddef19e9a5ec01aba6eacb054b6": "spy player takes control from ai.",
"161e6be1904781e7b946643d2e310365": "action triggered: bug ambassador",
"d8d68ffabbba7320ec394917bcefa51c": "begin planting bug while walking.",
"b3dcc24ab5bb9fdfefda54cf90270600": "failed planting bug while walking.",
"c9b69e088afdbd9b359ca79996c27aa4": "marked suspicious.",
"67baba0bdce2ed478ef21b6f768c9a17": "action test green: contact double agent",
"3ee230a7c3f1590ae5515270b80a38ba": "marked neutral suspicion.",
"6faa9813756ee4a7f81cd379f84a5dad": "took last sip of drink.",
"ed06251db400b4e711b57ac6a80780e2": "picked up statue.",
"309d30bad509119670bb028f3e14463d": "action triggered: inspect statues",
"3d2afd3ab54d4c31e6205f0d17950f1d": "action test white: inspect statues",
"00e630506955a56d637b4a1de65771e5": "left statue inspected.",
"951a4c5cc056b74f7cf85cbfee3b8acc": "spy cast.",
"1f794e2c8a778a54748c4855941ad096": "ambassador cast.",
"3faf4b9c22f75b93f4261c36a5e5ca94": "double agent cast.",
"db3d6fe633fccf6f965a7de7cdb3b945": "suspected double agent cast.",
"1d0c3a6a016d31b8e5e8df0325129844": "seduction target cast.",
"fe4657cedf546b464cc46c3a21f59ab3": "left statue inspection cancelled.",
"e1c3bfe9177f6ae2441ee84d6790ec9a": "right statue inspection cancelled.",
"80d549213a0a5a64eb5fd5c1e9b03c9d": "left statue inspection interrupted.",
"be3cda5592a1a916846e7196fbaa5e95": "statue inspection interrupted.",
"5840248148cd6b0cea38d2ad58474535": "right statue inspection interrupted.",
"5626495338f84019f99d6956a4ed02a5": "civilian cast.",
"579670a2f2248b94e0e77daac2f62bf6": "bug ambassador selected.",
"7c524e5a4be7dfb182dbccd671b9d881": "contact double agent selected.",
"8e084c3d6391c6373d3c608382da1fdb": "transfer microfilm selected.",
"9c0faeee1aa19ebf7b1dcc836f57310e": "swap statue selected.",
"6c4824628cc971e5c735680d131626b5": "inspect 3 statues selected.",
"3de9dc934fded6e28638ae579e601338": "seduce target selected.",
"e512eb60cb8f40af55558f54a43865c1": "purloin guest list selected.",
"82774a310f33d24a4242b6413719223e": "fingerprint ambassador selected.",
"eb9751e75f502c69d13b8d290053cf6c": "bug ambassador enabled.",
"d93491adf3331ca5160e8a970449b6f2": "contact double agent enabled.",
"5b73571ec9cc0dec0fce8876ff772876": "transfer microfilm enabled.",
"5975a54affafd2db3fe55c881170a7a2": "swap statue enabled.",
"bb66cc6ca9c6f74a67c616a720c0a677": "inspect 3 statues enabled.",
"521503717667e4825207996a4932656c": "seduce target enabled.",
"2681afe7f005b1a374bf06beb5f5c0af": "purloin guest list enabled.",
"0b43fbf44919051ad8f98cceca3b236e": "fingerprint ambassador enabled.",
"d20d62e6307b4a0f25d503037cec0a30": "get book from bookcase.",
"5a0e1b02ebe0269b60f3ef0911aba2db": "action triggered: transfer microfilm",
"c8e97c2b128707e66b2a4a2105e04cbf": "action test white: transfer microfilm",
"a60d7f6b936ce3d95bc03346704b709c": "remove microfilm from book.",
"8069b4cc9125c5e564e33b1ad6431959": "action test green: transfer microfilm",
"0d5a71d5447c4bc2348e5927f8ddb6be": "hide microfilm in book.",
"6954ce09e91c6d69a80618db1e96fdc2": "put book in bookcase.",
"8d4f2df56952c641d700766603b9f4b5": "transferred microfilm.",
"5b59b4f3cab30a2f3e03f4056f1b38c4": "request drink from waiter.",
"323b91fcf1a9da56c28fe81b4dd1877c": "picked up fingerprintable briefcase (difficult).",
"bf24d6a70eb44615a2395a61d0019f41": "action test red: fingerprint ambassador",
"34a781cf07abac08e110cfa9539489da": "fingerprinting failed.",
"236090881b10fd68589e5b4d7a05fc09": "begin planting bug while standing.",
"536509cfac1fba12e632f7b3bf0026ca": "bugged ambassador while standing.",
"b09a162d3cb74796dfbfa152a77ccdd0": "spy puts down briefcase.",
"fd2f4abf24058cc36b4472b294f70624": "missions reset.",
"0202f0c6b885b60ffcdd9144e6225b37": "held statue inspected.",
"b50d53e2c3cf20ead52832ccf1a8595f": "all statues inspected.",
"dc8719e17cde5f7428da1799321f2e91": "put back statue.",
"c0d169a807b1704ca503e2c1293ed876": "marked spy suspicious.",
"12d041286098ee6c213abcc44d662b27": "action triggered: seduce target",
"70c9710c4dc5adbb3e2de4045c4c8040": "begin flirtation with seduction target.",
"e97828c37d813d423c65a40161b77ac0": "action test green: seduce target",
"e46964ac0ea006ade7add17e114c8f6d": "flirt with seduction target: 51%",
"39fd4381f226270075665d11f1b51360": "stopped talking.",
"c640b53fec92bc1be46447e0ef1b3c90": "waiter offered drink.",
"72759a3476e816057472567a63d1febd": "action triggered: purloin guest list",
"371891839d73f956745c1438ca4da47a": "action test white: purloin guest list",
"620e7ebe2fc3e5d805d1b6688d1eb307": "got drink from waiter.",
"5b5c8345245a87d6e050ae4794504602": "sipped drink.",
"5f0832dec6f670d263c68d65f7801bfb": "bit cupcake.",
"d0bd47210b12ac6d2b62214cad7bbd5d": "flirtation cooldown expired.",
"5784c6cdd823ff417b562567cf0c46bc": "gulped drink.",
"a8b8c5ba7649dbebd31db6dd44dd40ed": "chomped cupcake.",
"0c00f6ba136e8633041eab53ba8b218a": "bartender offered cupcake.",
"bc913c68f90054fb95f53a8a1d4f42c4": "waiter offered cupcake.",
"b2d269c794d079e0653d409d5536a6db": "waiter gave up.",
"489d536dabec41f643ea9de811ee688e": "purloin guest list aborted.",
"ce317bb385b4f8231b5888e1d1850de8": "action test green: inspect statues",
"f0ad7ef2bd2d2bcdf950bb97d60ee4a9": "action triggered: swap statue",
"8bdce8a2dd6fd66cca261892dbfa66f3": "action test white: swap statue",
"9f9eba5d7ef5d94718f4c291a7f8415f": "statue swapped.",
"fd34d424047339fc13e7b7dd5e1600f2": "missions completed. 10 second countdown.",
"d5c8686d37e1f1f7bb98fb1efd696186": "ambassador's personal space violated.",
"2ece32018f973bb00aef7c9105d1cb77": "rejected drink from waiter.",
"ccfa5907d7b6f28ef8c22fec72687403": "picked up fingerprintable statue (difficult).",
"329757ba419e524dc7198c7e5fa7bd17": "started fingerprinting statue.",
"24bda9d67dfda798ba137e3cda969893": "action test green: fingerprint ambassador",
"c4fd5b425ac587af10b56910f87389ef": "fingerprinted statue.",
"128b75c0a0a13988f4560621a5a5ac9b": "action test green: swap statue",
"f58915975f4fc169d8fc14873342d744": "statue swap pending.",
"28e077700c5ee0d5fc4792e76bcfb098": "character picked up pending statue.",
"7dccf8237bfe794727b060d8ba89dd96": "action test green: purloin guest list",
"d91f89f90080a33b1e6bd222c574a191": "guest list purloin pending.",
"ce77fd8d3d86f48174fbb6b477db81f1": "flirt with seduction target: 100%",
"a82426e62385f9a189bad9ded2fef016": "target seduced.",
"df37dbbb795a37ae7ab461fc3f864e77": "missions completed successfully.",
"8e248ff240ba72a33ed2752a8e89f546": "inspect 2 statues selected.",
"3767785942cb613f40586ec045fe7a85": "inspect 2 statues enabled.",
"0f0cbe5ce306d857323d3c7842db6659": "action test white: contact double agent",
"895902a2f65b2af298ce2fb77b192835": "flirt with seduction target: 94%",
"e904df6aa47d6684f0e50de6e21ad9c2": "right statue inspected.",
"ade41b6279ad9a98c3e6a96b9ad41025": "took shot.",
"b2b98f845cc3d02c34c9a0ff3c980a08": "sniper shot civilian.",
"7367356bfef3a56eea270a9acbd80776": "double agent left conversation with spy.",
"115e1b6c144d18ac729e0ea7225db27c": "action test white: seduce target",
"0bf2418405cd6e0733d60b6914266852": "flirt with seduction target: 68%",
"c1d2ae516933eccf018d208a768102f4": "action test ignored: seduce target",
"c89b2d7400e4702393a610042db50f59": "flirt with seduction target: 85%",
"f13a4c11f29b2a800e941fcb907f390c": "overtime!",
"426fbe1a317e86986dcba9d8e8d5d0f4": "delegated purloin timer expired.",
"1d449ca29e070eecd4481d9d79bfbd1e": "picked up fingerprintable statue.",
"a682c48dc256d52c7ed06f0f1d54db71": "action triggered: check watch",
"882d5890f602fa469ed90a4cd4310e6d": "watch checked to add time.",
"1c4a6f8bffcbcd01b524d851df179940": "action test green: check watch",
"890a04a48d8f318c9cb49f9e8515f255": "delegated purloin to ms. o.",
"008abd07ef76e22b0970394c43d95478": "flirt with seduction target: 69%",
"0130bdee457add93dd4e24249903b170": "delegated purloin to ms. l.",
"02f4960217f6b685ffcbb58ffff14a00": "flirt with seduction target: 33%",
"0ca3cb9c9a0b28522390c7f2ee380e7f": "flirt with seduction target: 75%",
"0d0cc1e5d137ad86292e3cc47c6e9c86": "flirt with seduction target: 47%",
"0eb79ab25238e4d05f2f547201f836d0": "flirt with seduction target: 27%",
"0fb9e9bcb97904da6874dc9b29f4928e": "action test ignored: inspect statues",
"11446c98c0b8f4987562e3b38bad4ffd": "action test red: transfer microfilm",
"132b29dc8813572dcb41c82a0559f8a6": "flirt with seduction target: 97%",
"14304c57b4232b2559f4b668a5a4b807": "flirt with seduction target: 22%",
"168ee1e7b870d92522aaeb07b1451a82": "flirt with seduction target: 39%",
"178d3ac1b90f3f4161df72e742d742a8": "flirt with seduction target: 65%",
"1863ed6523d63c66010d16538608116c": "flirt with seduction target: 19%",
"1be6585aed858a0246d055744143eed3": "flirt with seduction target: 38%",
"1c960a4197ed2ceaedb5e1b12f53d7cf": "flirt with seduction target: 30%",
"1e064e90ef4af023e7a6080fbd75a044": "sniper shot spy.",
"25986b7bc02f31d4fc7b273cb733feef": "request cupcake from waiter.",
"2628aaa77d4e77bc36e799d565a83e77": "flirt with seduction target: 48%",
"26442e507157bd7d9cbddd6150356ff0": "delegated purloin to ms. j.",
"29ebb827c6d08d45f1e2bc1b5f06993c": "got drink from bartender.",
"2d034815cfdb1306e9c697140bf042c0": "double agent joined conversation with spy.",
"2e89204875819b9f5448152eda389c30": "flirt with seduction target: 32%",
"2eaeedeae6f8dbc4a3149582c12222aa": "delegated purloin to ms. b.",
"2f0cdf7cc401f92c4d34f4cca7032655": "flirt with seduction target: 34%",
"2fdeb005aa5b68090c3a9130c5fe81bc": "flirt with seduction target: 70%",
"3030c027b0685a2f445cabb477843472": "action test ignored: check watch",
"32f4fa708cb96e2a9d45384e11514e6e": "flirt with seduction target: 41%",
"33366c01b3e6f8299b3cf1d14f360bb4": "aborted watch check to add time.",
"35223e69dde5ceb8fe18f8711ece2b02": "action test ignored: transfer microfilm",
"3954f07db87d365583d71987e58fda6f": "flirt with seduction target: 57%",
"39c1dd079ff9aa1274f5150d3304c7d1": "delegated purloin to mr. u.",
"3a65f078b7ad9924ae0f3bf058b51169": "flirt with seduction target: 72%",
"3f6b563150eea7b32708323cca213c54": "delegated purloin to dr. n.",
"415ff3a1e6e51aaf0611421ac6e0d6fd": "flirt with seduction target: 29%",
"441754b3111e3913e18e698ae9c207bc": "spy ran out of time.",
"448b0b18c3a09ed41d84488a034c18f3": "fingerprinted ambassador.",
"46b91310294671a31ad3a0623ea3cdb4": "flirt with seduction target: 25%",
"47bd2c9d987d0e9ef1d145d8cd7033ce": "left alone while attempting banana bread.",
"49bdce27aa60c4f34ffdafd86a8a7c16": "flirt with seduction target: 60%",
"49f9b59d18b99f13583febd808cd7917": "delegated purloin to mr. g.",
"4b24a9e9b9df01d427e2edf869e5ee9d": "bartender offered drink.",
"4bcc20d93c4b850b8985b6197031f8e7": "flirt with seduction target: 35%",
"4bcdc2d5fd310907ee499c433b776f0f": "flirt with seduction target: 54%",
"4bd26f4a9e6c1edc3e2035de7c8a8c2a": "flirt with seduction target: 55%",
"4ce8beafa62268bb71985ca4a9bead30": "flirt with seduction target: 66%",
"4e41589a466f6ac7ff08c7829f7734d9": "flirt with seduction target: 26%",
"4f32c7d2bf1e25f03b55fec2e552d10b": "bartender picked next customer.",
"503a8976d8569cbb4a8502e56b55ed44": "interrupted speaker.",
"53c9af79da8faaa274211676f6ffbda3": "rejected cupcake from bartender.",
"54806d77104c14649b38f8f2c6903e97": "flirt with seduction target: 31%",
"58af6e33a9bd712823a1abf9f2578c47": "flirt with seduction target: 56%",
"58cf3c8119175ed44d89a7955d151177": "delegated purloin to dr. m.",
"5915de854751bd0a7466146946874a65": "sniper shot too late for sync.",
"59a992b89a032dd6af2243f69e4dcbb9": "flirt with seduction target: 77%",
"5ae3800dcd8a3e50056b2209d9d6fb12": "delegated purloin to mr. s.",
"5d8d9d2b4fe851884a8306aadfcc944d": "delegated purloin to ms. f.",
"5eba266d222ef60ca6e0caa39d231bf7": "delegated purloin to mr. q.",
"5fc24bc622f8cc183deb9413a858e9ef": "flirt with seduction target: 71%",
"60fa7cfd5344d51d2d9d4d8a4d3db469": "flirt with seduction target: 86%",
"612690ee57fdff9d320284a5399bb0ab": "flirt with seduction target: 76%",
"6146b10d5097352f8072cbcef249c199": "got cupcake from bartender.",
"623c1e0675228b9a540476be10c3156c": "fingerprinted drink.",
"6336e31994ca52b268b5c6930a7c9cb4": "flirt with seduction target: 78%",
"65fe6fe94761725d2938e374d85736ff": "started talking.",
"67b9909e8f326fafeafaf50c7163e817": "flirt with seduction target: 95%",
"685fe9f450388cacf8a31ece4021062a": "cast member picked up pending statue.",
"697150cb5c35fa51bbcd4f214f30ae2e": "bugged ambassador while walking.",
"6a3691df1947c3a5203323a4875865fa": "delegated purloin to mr. i.",
"6ac3e883de358346fe230818e0fd485e": "action test | |
<filename>function_pipe.py
"""
function_pipe.py
Copyright 2012-2017 Research Affiliates
Authors: <NAME>, <NAME>, <NAME>
Common usage:
import function_pipe as fpn
"""
import functools
import inspect
import re
import sys
import types
from enum import Enum
import numpy as np
# -------------------------------------------------------------------------------
# FunctionNode and utilities
def compose(*funcs):
"""
Given a list of functions, execute them from right to left, passing
the returned value of the right f to the left f. Store the reduced function in a FunctionNode
"""
# call right first, then left of each pair; each reduction retruns a function
reducer = functools.reduce(
lambda f, g: lambda *args, **kaargs: f(g(*args, **kaargs)), funcs
)
# args are reversed to show execution from right to left
return FunctionNode(reducer, doc_function=compose, doc_args=reversed(funcs))
def _wrap_unary(func):
"""Decorator for operator overloads. Given a higher order function that takes one args, wrap it in a FunctionNode function and provide documentation labels."""
def unary(lhs):
# wrapped function will prepare correct class, even if a constant
cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode
return cls(func(lhs), doc_function=func, doc_args=(lhs,))
return unary
def _wrap_binary(func):
"""Decorator for operators. Given a higher order function that takes two args, wrap it in a FunctionNode function and provide documentation labels."""
def binary(lhs, rhs):
# wrapped function will prepare correct class, even if a constant
cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode
return cls(func(lhs, rhs), doc_function=func, doc_args=(lhs, rhs))
return binary
_BINARY_OP_MAP = {
"__add__": "+",
"__sub__": "-",
"__mul__": "*",
"__truediv__": "/",
"__pow__": "**",
"__eq__": "==",
"__lt__": "<",
"__le__": "<=",
"__gt__": ">",
"__ge__": ">=",
"__ne__": "!=",
}
_UNARY_OP_MAP = {
"__neg__": "-",
"__invert__": "~",
"__abs__": "abs",
}
def _contains_expression(repr_str: str) -> bool:
"""
Checks whether or not a `repr_str` contains an expression. (Single unary expressions are excluded)
"""
repr_str = re.sub(r"\s+", "", repr_str)
repr_str = repr_str.replace("(", "")
repr_str = repr_str.replace(")", "")
symbols = re.findall(r"[\w']+", repr_str)
non_symbols = re.findall(r"[^\w']+", repr_str)
if len(non_symbols) == 0:
return False
if len(non_symbols) == 1 and len(symbols) == 1:
return False
return True
def _format_expression(f) -> str:
"""
`f` could be either a single argument, or an expression of arguments. If it is the latter, wrap in parenthesis
"""
repr_str = _repr(f)
if _contains_expression(repr_str):
return f"({repr_str})"
return repr_str
def _repr(f) -> str:
"""Provide a string representation of the FN, recursively representing defined arguments."""
def get_function_name(f) -> str:
"""Get a string representation of the callable, or its code if it is a lambda. In some cases, `f` may not be function, so just return a string."""
if not isinstance(f, types.FunctionType) or not hasattr(f, "__name__"):
return str(f)
if f.__name__ == "<lambda>":
# split on all white space, and rejoin with single space
return " ".join(inspect.getsource(f).split())
return f.__name__
# find FunctionNode; using hasattr because of testing context issues
if isinstance(f, FunctionNode):
doc_f = get_function_name(f._doc_function)
unary_op = _UNARY_OP_MAP.get(doc_f)
binary_op = _BINARY_OP_MAP.get(doc_f)
if unary_op:
assert not f._doc_kwargs, "Unary FunctionNodes must not have doc_kwargs."
assert (
len(f._doc_args) == 1
), "Unary FunctionNodes must only have one doc_arg."
if unary_op == "abs":
arg = _repr(f._doc_args[0])
return f"{unary_op}({arg})"
arg = _format_expression(f._doc_args[0])
return f"{unary_op}{arg}"
if binary_op:
assert not f._doc_kwargs, "Binary FunctionNodes must not have doc_kwargs."
assert (
len(f._doc_args) == 2
), "Binary FunctionNodes must only have two doc_args."
left = _format_expression(f._doc_args[0])
right = _format_expression(f._doc_args[1])
return f"{left}{binary_op}{right}"
if not f._doc_args and not f._doc_kwargs:
return doc_f
predecessor = ""
sig_str = "("
if f._doc_args:
sig_str += ",".join((str(_repr(v)) for v in f._doc_args))
if f._doc_kwargs:
if f._doc_args:
sig_str += ","
for k, v in f._doc_kwargs.items():
if k == PREDECESSOR_PN:
predecessor = _repr(v)
else:
sig_str += (k + "=" + str(_repr(v))) + ","
sig_str = sig_str.rstrip(",") + ")"
if sig_str == "()":
sig_str = doc_f
else:
sig_str = doc_f + sig_str
if predecessor:
sig_str = predecessor + " | " + sig_str
return sig_str
return get_function_name(f)
class FunctionNode:
"""A wrapper for a callable that can reside in an expression of numerous FunctionNodes, or be modified with unary or binary operators."""
__slots__ = (
"_function",
"_doc_function",
"_doc_args",
"_doc_kwargs",
)
# ---------------------------------------------------------------------------
def __init__(self, function, *, doc_function=None, doc_args=None, doc_kwargs=None):
"""
Args:
function: a callable
doc_function: the function to display; will be set to `function` if nor provided
"""
# if a function node, re-wrap
if isinstance(function, FunctionNode):
for attr in self.__slots__:
setattr(self, attr, getattr(function, attr))
else:
if callable(function):
self._function = function
else:
# if not a callable, we upgrade a constant, non function value to be a function that returns that value
self._function = lambda *args, **kwargs: function
# if not supplied, doc_function is set to function
self._doc_function = doc_function if doc_function else self._function
self._doc_args = doc_args
self._doc_kwargs = doc_kwargs
@property
def unwrap(self):
"""The doc_function should be set to the core function being wrapped, no matter the level of wrapping."""
# if the stored function is using _pipe_kwarg_bind, need to go lower
doc_func = self
while hasattr(doc_func, "_doc_function"):
doc_func = getattr(doc_func, "_doc_function")
return doc_func
def __call__(self, *args, **kwargs):
"""Call the wrapped function."""
return self._function(*args, **kwargs)
def __str__(self):
return f"<FN: {_repr(self)}>"
def __repr__(self):
return f"<FN: {_repr(self)}>"
def partial(self, *args, **kwargs):
"""Return a new FunctionNode with a partialed function with args and kwargs'"""
fn = FunctionNode(functools.partial(self._function, *args, **kwargs))
for attr in self.__slots__:
if not getattr(fn, attr):
setattr(fn, attr, getattr(self, attr))
return fn
# ---------------------------------------------------------------------------
# all unary operators return a function; the _wrap_unary decorator then wraps this function in a FunctionNode
@_wrap_unary
def __neg__(self):
return lambda *args, **kwargs: self(*args, **kwargs) * -1
@_wrap_unary
def __invert__(self):
"""This is generally expected to be a Boolean inversion, such as ~ (not) applied to a numpy array or pd.Series."""
return lambda *args, **kwargs: self(*args, **kwargs).__invert__()
@_wrap_unary
def __abs__(self):
"""Absolute value; most common usage us on Numpy or Pandas objects, and thus here we np.abs."""
return lambda *args, **kwargs: np.abs(self(*args, **kwargs))
# ---------------------------------------------------------------------------
# all binary operators return a function; the _wrap_binary decorator then wraps this function in a FunctionNode definition and supplies appropriate doc args. Note both left and righ sides are wrapped in FNs to permit operations on constants
@_wrap_binary
def __add__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) + self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __sub__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) - self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __mul__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) * self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __truediv__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) / self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __pow__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) ** self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __radd__(self, lhs):
return lambda *args, **kwargs: self.__class__(lhs)(
*args, **kwargs
) + self.__class__(self)(*args, **kwargs)
@_wrap_binary
def __rsub__(self, lhs):
return lambda *args, **kwargs: self.__class__(lhs)(
*args, **kwargs
) - self.__class__(self)(*args, **kwargs)
@_wrap_binary
def __rmul__(self, lhs):
return lambda *args, **kwargs: self.__class__(lhs)(
*args, **kwargs
) * self.__class__(self)(*args, **kwargs)
@_wrap_binary
def __rtruediv__(self, lhs):
return lambda *args, **kwargs: self.__class__(lhs)(
*args, **kwargs
) / self.__class__(self)(*args, **kwargs)
# comparison operators, expected to return booleans
@_wrap_binary
def __eq__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) == self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __lt__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) < self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __le__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) <= self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __gt__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) > self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __ge__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) >= self.__class__(rhs)(*args, **kwargs)
@_wrap_binary
def __ne__(self, rhs):
return lambda *args, **kwargs: self.__class__(self)(
*args, **kwargs
) != self.__class__(rhs)(*args, **kwargs)
# ---------------------------------------------------------------------------
# composition operators
def __rshift__(self, rhs):
"""Composition; return a function that will call LHS first, then RHS"""
return compose(rhs, self)
def __rrshift__(self, lhs):
"""Composition; return a function that will call LHS first, then RHS"""
return compose(self, lhs)
def __lshift__(self, rhs):
"""Composition; return a function that will call RHS first, then LHS"""
return compose(self, rhs)
def __rlshift__(self, lhs):
"""Composition; return a function that will call RHS first, then LHS"""
return compose(lhs, self)
def __or__(self, rhs):
"""Only implemented for PipeNode."""
raise NotImplementedError()
def __ror__(self, lhs):
"""Only implemented for PipeNode."""
raise NotImplementedError()
# -------------------------------------------------------------------------------
# PipeNode and utiltiies
# PipeNode kwargs
PREDECESSOR_RETURN = "predecessor_return"
PREDECESSOR_PN = "predecessor_pn"
PN_INPUT = "pn_input"
PN_INPUT_SET = frozenset((PN_INPUT,))
PREDECESSOR_PN_SET = frozenset((PREDECESSOR_PN,))
PIPE_NODE_KWARGS = frozenset((PREDECESSOR_RETURN, PREDECESSOR_PN, | |
<reponame>deadsnakes/python2.4
# This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
#
# XXXX TO DO:
# - Implement correct missing FSSpec handling for Alias methods
# - Implement FInfo
import string
# Declarations that change for each manager
#MACHEADERFILE = 'Files.h' # The Apple header file
MODNAME = '_File' # The name of the module
LONGMODNAME = 'Carbon.File' # The "normal" external name of the module
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'File' # The prefix for module-wide routines
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Various integers:
SInt64 = Type("SInt64", "L")
UInt64 = Type("UInt64", "L")
FNMessage = Type("FNMessage", "l")
FSAllocationFlags = Type("FSAllocationFlags", "H")
FSCatalogInfoBitmap = Type("FSCatalogInfoBitmap", "l")
FSIteratorFlags = Type("FSIteratorFlags", "l")
FSVolumeRefNum = Type("FSVolumeRefNum", "h")
AliasInfoType = Type("AliasInfoType", "h")
# Various types of strings:
#class UniCharCountBuffer(InputOnlyType):
# pass
class VarReverseInputBufferType(ReverseInputBufferMixin, VarInputBufferType):
pass
FullPathName = VarReverseInputBufferType()
ConstStr31Param = OpaqueArrayType("Str31", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr32Param = OpaqueArrayType("Str32", "PyMac_BuildStr255", "PyMac_GetStr255")
ConstStr63Param = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
Str63 = OpaqueArrayType("Str63", "PyMac_BuildStr255", "PyMac_GetStr255")
HFSUniStr255 = OpaqueType("HFSUniStr255", "PyMac_BuildHFSUniStr255", "PyMac_GetHFSUniStr255")
UInt8_ptr = InputOnlyType("UInt8 *", "s")
# Other types:
class OptionalFSxxxType(OpaqueByValueType):
def declare(self, name):
Output("%s %s__buf__;", self.typeName, name)
Output("%s *%s = &%s__buf__;", self.typeName, name, name)
class FSCatalogInfoAndBitmapType(InputOnlyType):
def __init__(self):
InputOnlyType.__init__(self, "BUG", "BUG")
def declare(self, name):
Output("PyObject *%s__object = NULL;", name)
Output("FSCatalogInfoBitmap %s__bitmap = 0;", name)
Output("FSCatalogInfo %s;", name)
def getargsFormat(self):
return "lO"
def getargsArgs(self, name):
return "%s__bitmap, %s__object"%(name, name)
def getargsCheck(self, name):
Output("if (!convert_FSCatalogInfo(%s__object, %s__bitmap, &%s)) return NULL;", name, name, name)
def passInput(self, name):
return "%s__bitmap, &%s"% (name, name)
def passOutput(self, name):
return "%s__bitmap, &%s"% (name, name)
def mkvalueFormat(self):
return "O"
def mkvalueArgs(self, name):
return "%s__object" % (name)
def xxxxmkvalueCheck(self, name):
Output("if ((%s__object = new_FSCatalogInfo(%s__bitmap, &%s)) == NULL) return NULL;", name, name)
class FSCatalogInfoAndBitmap_inType(FSCatalogInfoAndBitmapType, InputOnlyMixIn):
def xxxxmkvalueCheck(self, name):
pass
class FSCatalogInfoAndBitmap_outType(FSCatalogInfoAndBitmapType):
def getargsFormat(self):
return "l"
def getargsArgs(self, name):
return "%s__bitmap" % name
def getargsCheck(self, name):
pass
FInfo = OpaqueType("FInfo", "FInfo")
FInfo_ptr = OpaqueType("FInfo", "FInfo")
AliasHandle = OpaqueByValueType("AliasHandle", "Alias")
FSSpec = OpaqueType("FSSpec", "FSSpec")
FSSpec_ptr = OpaqueType("FSSpec", "FSSpec")
OptFSSpecPtr = OptionalFSxxxType("FSSpec", "BUG", "myPyMac_GetOptFSSpecPtr")
FSRef = OpaqueType("FSRef", "FSRef")
FSRef_ptr = OpaqueType("FSRef", "FSRef")
OptFSRefPtr = OptionalFSxxxType("FSRef", "BUG", "myPyMac_GetOptFSRefPtr")
FSCatalogInfo = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
FSCatalogInfo_ptr = OpaqueType("FSCatalogInfo", "FSCatalogInfo")
# To be done:
#CatPositionRec
#FSCatalogInfo
#FSForkInfo
#FSIterator
#FSVolumeInfo
#FSSpecArrayPtr
includestuff = includestuff + """
#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern int _PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int _PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *_PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *_PyMac_BuildFSRef(FSRef *spec);
#define PyMac_GetFSSpec _PyMac_GetFSSpec
#define PyMac_GetFSRef _PyMac_GetFSRef
#define PyMac_BuildFSSpec _PyMac_BuildFSSpec
#define PyMac_BuildFSRef _PyMac_BuildFSRef
#else
extern int PyMac_GetFSSpec(PyObject *v, FSSpec *spec);
extern int PyMac_GetFSRef(PyObject *v, FSRef *fsr);
extern PyObject *PyMac_BuildFSSpec(FSSpec *spec);
extern PyObject *PyMac_BuildFSRef(FSRef *spec);
#endif
/* Forward declarations */
static PyObject *FInfo_New(FInfo *itself);
static PyObject *FSRef_New(FSRef *itself);
static PyObject *FSSpec_New(FSSpec *itself);
static PyObject *Alias_New(AliasHandle itself);
static int FInfo_Convert(PyObject *v, FInfo *p_itself);
#define FSRef_Convert PyMac_GetFSRef
#define FSSpec_Convert PyMac_GetFSSpec
static int Alias_Convert(PyObject *v, AliasHandle *p_itself);
/*
** UTCDateTime records
*/
static int
UTCDateTime_Convert(PyObject *v, UTCDateTime *ptr)
{
return PyArg_Parse(v, "(HlH)", &ptr->highSeconds, &ptr->lowSeconds, &ptr->fraction);
}
static PyObject *
UTCDateTime_New(UTCDateTime *ptr)
{
return Py_BuildValue("(HlH)", ptr->highSeconds, ptr->lowSeconds, ptr->fraction);
}
/*
** Optional fsspec and fsref pointers. None will pass NULL
*/
static int
myPyMac_GetOptFSSpecPtr(PyObject *v, FSSpec **spec)
{
if (v == Py_None) {
*spec = NULL;
return 1;
}
return PyMac_GetFSSpec(v, *spec);
}
static int
myPyMac_GetOptFSRefPtr(PyObject *v, FSRef **ref)
{
if (v == Py_None) {
*ref = NULL;
return 1;
}
return PyMac_GetFSRef(v, *ref);
}
/*
** Parse/generate objsect
*/
static PyObject *
PyMac_BuildHFSUniStr255(HFSUniStr255 *itself)
{
return Py_BuildValue("u#", itself->unicode, itself->length);
}
"""
finalstuff = finalstuff + """
int
PyMac_GetFSSpec(PyObject *v, FSSpec *spec)
{
Str255 path;
short refnum;
long parid;
OSErr err;
FSRef fsr;
if (FSSpec_Check(v)) {
*spec = ((FSSpecObject *)v)->ob_itself;
return 1;
}
if (PyArg_Parse(v, "(hlO&)",
&refnum, &parid, PyMac_GetStr255, &path)) {
err = FSMakeFSSpec(refnum, parid, path, spec);
if ( err && err != fnfErr ) {
PyMac_Error(err);
return 0;
}
return 1;
}
PyErr_Clear();
/* Otherwise we try to go via an FSRef. On OSX we go all the way,
** on OS9 we accept only a real FSRef object
*/
if ( PyMac_GetFSRef(v, &fsr) ) {
err = FSGetCatalogInfo(&fsr, kFSCatInfoNone, NULL, NULL, spec, NULL);
if (err != noErr) {
PyMac_Error(err);
return 0;
}
return 1;
}
return 0;
}
int
PyMac_GetFSRef(PyObject *v, FSRef *fsr)
{
OSStatus err;
FSSpec fss;
if (FSRef_Check(v)) {
*fsr = ((FSRefObject *)v)->ob_itself;
return 1;
}
/* On OSX we now try a pathname */
if ( PyString_Check(v) || PyUnicode_Check(v)) {
char *path = NULL;
if (!PyArg_Parse(v, "et", Py_FileSystemDefaultEncoding, &path))
return NULL;
if ( (err=FSPathMakeRef(path, fsr, NULL)) )
PyMac_Error(err);
PyMem_Free(path);
return !err;
}
/* XXXX Should try unicode here too */
/* Otherwise we try to go via an FSSpec */
if (FSSpec_Check(v)) {
fss = ((FSSpecObject *)v)->ob_itself;
if ((err=FSpMakeFSRef(&fss, fsr)) == 0)
return 1;
PyMac_Error(err);
return 0;
}
PyErr_SetString(PyExc_TypeError, "FSRef, FSSpec or pathname required");
return 0;
}
extern PyObject *
PyMac_BuildFSSpec(FSSpec *spec)
{
return FSSpec_New(spec);
}
extern PyObject *
PyMac_BuildFSRef(FSRef *spec)
{
return FSRef_New(spec);
}
"""
initstuff = initstuff + """
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSSpec *, PyMac_BuildFSSpec);
PyMac_INIT_TOOLBOX_OBJECT_NEW(FSRef *, PyMac_BuildFSRef);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSSpec, PyMac_GetFSSpec);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(FSRef, PyMac_GetFSRef);
"""
execfile(string.lower(MODPREFIX) + 'typetest.py')
# Our object types:
class FSCatalogInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("nodeFlags",
"return Py_BuildValue(\"H\", self->ob_itself.nodeFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.nodeFlags)-1;",
None
),
("volume",
"return Py_BuildValue(\"h\", self->ob_itself.volume);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.volume)-1;",
None
),
("parentDirID",
"return Py_BuildValue(\"l\", self->ob_itself.parentDirID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.parentDirID)-1;",
None
),
("nodeID",
"return Py_BuildValue(\"l\", self->ob_itself.nodeID);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.nodeID)-1;",
None
),
("createDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.createDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.createDate)-1;",
None
),
("contentModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.contentModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.contentModDate)-1;",
None
),
("attributeModDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.attributeModDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.attributeModDate)-1;",
None
),
("accessDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.accessDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.accessDate)-1;",
None
),
("backupDate",
"return Py_BuildValue(\"O&\", UTCDateTime_New, &self->ob_itself.backupDate);",
"return PyArg_Parse(v, \"O&\", UTCDateTime_Convert, &self->ob_itself.backupDate)-1;",
None
),
("permissions",
"return Py_BuildValue(\"(llll)\", self->ob_itself.permissions[0], self->ob_itself.permissions[1], self->ob_itself.permissions[2], self->ob_itself.permissions[3]);",
"return PyArg_Parse(v, \"(llll)\", &self->ob_itself.permissions[0], &self->ob_itself.permissions[1], &self->ob_itself.permissions[2], &self->ob_itself.permissions[3])-1;",
None
),
# XXXX FinderInfo TBD
# XXXX FinderXInfo TBD
("valence",
"return Py_BuildValue(\"l\", self->ob_itself.valence);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.valence)-1;",
None
),
("dataLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataLogicalSize)-1;",
None
),
("dataPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.dataPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.dataPhysicalSize)-1;",
None
),
("rsrcLogicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcLogicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcLogicalSize)-1;",
None
),
("rsrcPhysicalSize",
"return Py_BuildValue(\"l\", self->ob_itself.rsrcPhysicalSize);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.rsrcPhysicalSize)-1;",
None
),
("sharingFlags",
"return Py_BuildValue(\"l\", self->ob_itself.sharingFlags);",
"return PyArg_Parse(v, \"l\", &self->ob_itself.sharingFlags)-1;",
None
),
("userPrivileges",
"return Py_BuildValue(\"b\", self->ob_itself.userPrivileges);",
"return PyArg_Parse(v, \"b\", &self->ob_itself.userPrivileges)-1;",
None
),
]
# The same info, but in a different form
INITFORMAT = "HhllO&O&O&O&O&llllllb"
INITARGS = """&((FSCatalogInfoObject *)self)->ob_itself.nodeFlags,
&((FSCatalogInfoObject *)self)->ob_itself.volume,
&((FSCatalogInfoObject *)self)->ob_itself.parentDirID,
&((FSCatalogInfoObject *)self)->ob_itself.nodeID,
UTCDateTime_Convert, &((FSCatalogInfoObject *)self)->ob_itself.createDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)self)->ob_itself.contentModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)self)->ob_itself.attributeModDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)self)->ob_itself.accessDate,
UTCDateTime_Convert, &((FSCatalogInfoObject *)self)->ob_itself.backupDate,
&((FSCatalogInfoObject *)self)->ob_itself.valence,
&((FSCatalogInfoObject *)self)->ob_itself.dataLogicalSize,
&((FSCatalogInfoObject *)self)->ob_itself.dataPhysicalSize,
&((FSCatalogInfoObject *)self)->ob_itself.rsrcLogicalSize,
&((FSCatalogInfoObject *)self)->ob_itself.rsrcPhysicalSize,
&((FSCatalogInfoObject *)self)->ob_itself.sharingFlags,
&((FSCatalogInfoObject *)self)->ob_itself.userPrivileges"""
INITNAMES = """
"nodeFlags",
"volume",
"parentDirID",
"nodeID",
"createDate",
"contentModDate",
"atributeModDate",
"accessDate",
"backupDate",
"valence",
"dataLogicalSize",
"dataPhysicalSize",
"rsrcLogicalSize",
"rsrcPhysicalSize",
"sharingFlags",
"userPrivileges"
"""
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return Py_None;")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("static char *kw[] = {%s, 0};", self.INITNAMES)
Output()
Output("if (!PyArg_ParseTupleAndKeywords(args, kwds, \"|%s\", kw, %s))",
self.INITFORMAT, self.INITARGS)
OutLbrace()
Output("return -1;")
OutRbrace()
Output("return 0;")
class FInfoDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("Type",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdType);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdType)-1;",
"4-char file type"
),
("Creator",
"return Py_BuildValue(\"O&\", PyMac_BuildOSType, self->ob_itself.fdCreator);",
"return PyArg_Parse(v, \"O&\", PyMac_GetOSType, &self->ob_itself.fdCreator)-1;",
"4-char file creator"
),
("Flags",
"return Py_BuildValue(\"H\", self->ob_itself.fdFlags);",
"return PyArg_Parse(v, \"H\", &self->ob_itself.fdFlags)-1;",
"Finder flag bits"
),
("Location",
"return Py_BuildValue(\"O&\", PyMac_BuildPoint, self->ob_itself.fdLocation);",
"return PyArg_Parse(v, \"O&\", PyMac_GetPoint, &self->ob_itself.fdLocation)-1;",
"(x, y) location of the file's icon in its parent finder window"
),
("Fldr",
"return Py_BuildValue(\"h\", self->ob_itself.fdFldr);",
"return PyArg_Parse(v, \"h\", &self->ob_itself.fdFldr)-1;",
"Original folder, for 'put away'"
),
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, itselftype)
self.argref = "*" # Store FSSpecs, but pass them by address
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def output_tp_newBody(self):
Output("PyObject *self;");
Output()
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("memset(&((%s *)self)->ob_itself, 0, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return self;")
def output_tp_initBody(self):
Output("%s *itself = NULL;", self.itselftype)
Output("static char *kw[] = {\"itself\", 0};")
Output()
Output("if (PyArg_ParseTupleAndKeywords(args, kwds, \"|O&\", kw, FInfo_Convert, &itself))")
OutLbrace()
Output("if (itself) memcpy(&((%s *)self)->ob_itself, itself, sizeof(%s));",
self.objecttype, self.itselftype)
Output("return 0;")
OutRbrace()
Output("return -1;")
class FSSpecDefinition(PEP253Mixin, ObjectDefinition):
getsetlist = [
("data",
"return PyString_FromStringAndSize((char *)&self->ob_itself, sizeof(self->ob_itself));",
None,
"Raw data of the FSSpec object"
)
]
def __init__(self, name, prefix, itselftype):
ObjectDefinition.__init__(self, name, prefix, | |
#!/usr/bin/env python
ANSIBLE_METADATA = {
'metadata_version': '2.0',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
---
module: ovh
short_description: Manage OVH API for DNS, monitoring and Dedicated servers
description:
- Add/Delete/Modify entries in OVH DNS
- Add reverse on OVH dedicated servers
- Install new dedicated servers from a template (both OVH and personal ones)
- Create a personal OVH template from a file (with h/w and s/w raid support)
- Monitor installation status on dedicated servers
- Add/Remove OVH Monitoring on dedicated servers
- Add/Remove a dedicated server from a OVH vrack
- Restart a dedicate server on debian rescue or disk
- List dedicated servers, personal templates
- Create a template from a yml file inside an ansible role (see README)
- Terminate a dedicated server (doesn't confirm termination, has to be done manually)
author: <NAME> and Synthesio SRE Team
notes:
- In /etc/ovh.conf (on host that executes module), you should add your
OVH API credentials like:
[default]
; general configuration: default endpoint
endpoint=ovh-eu
[ovh-eu]
; configuration specific to 'ovh-eu' endpoint
application_key=<YOUR APPLICATION KEY>
application_secret=<YOUR APPLICATIOM SECRET>
consumer_key=<YOUR CONSUMER KEY>
Or you can provide these values as module's attributes.
requirements:
- ovh > 0.3.5
options:
endpoint:
required: false
description: The API endpoint to use
application_key:
required: false
description: The application key to use to connect to the API
application_secret:
required: false
description: The application secret to use to connect to the API
consumer_key:
required: false
description: The consumer key to use to connect to the API
name:
required: true
description: The name of the service (dedicated, dns)
state:
required: false
default: present
choices: ['present', 'absent', 'modified']
description:
- Determines whether the dedicated/dns is to be created/modified
or deleted
service:
required: true
choices: ['boot', 'dns', 'vrack', 'reverse', 'monitoring', 'install', 'status', 'list', 'template', 'terminate']
description:
- Determines the service you want to use in the module
boot, change the bootid and can reboot the dedicated server
dns, manage A entries in your domain
vrack, add or remove a dedicated from a vrack
reverse, add/modify a reverse on a dedicated server
monitoring, add/removing a dedicated server from OVH monitoring
install, install from a template
status, used after install to know install status
list, get a list of personal dedicated servers, personal templates and ovh installationTemplate's
template, create/delete an ovh template from a yaml file
terminate, give back a dedicated server to OVH
domain:
required: false
default: None
description:
- The domain used in dns and reverse services
ip:
required: false
default: None
description:
- The public IP used in reverse and dns services
vrack:
required: false
default: None
description:
- The vrack ID used in vrack service
boot:
required: false
default: harddisk
choices: ['harddisk','rescue']
description:
- Which way you want to boot your dedicated server
force_reboot:
required: false
default: no
choices: ['yes','no','true','false']
description:
- When you want to restart a dedicated server without changing his boot mode
template:
required: false
default: None
description:
- One of your personal template on OVH
hostname:
required: false
default: None
description:
- The hostname you want to replace in /etc/hostname when applying a template
'''
EXAMPLES = '''
# Add a host into the vrack
- name: Add server to vrack
ovh: service='vrack' vrack='VRACK ID' name='HOSTNAME'
# Add a DNS entry for `internal.bar.foo.com`
- name: Add server IP to DNS
ovh: service='dns' domain='foo.com' ip='192.168.3.11' name='internal.bar'
- name: Refresh domain
ovh: service='dns' name='refresh' domain='{{ domain }}'
# Change a server reverse
- name: Change Reverse on server
ovh: service=reverse name='internal.bar' ip='192.168.3.11' domain='foo.com'
# Install a server from a template
- name: Install the dedicated server
ovh: endpoint='ovh-eu' application_key='my_app_key' application_secret='my_application_secret' consumer_key='my_consumer_key' service='install' name='foo.ovh.eu' hostname='internal.bar.foo.com' template='SOME TEMPLATE' ssh_key_name='My Key' use_distrib_kernel=True
- name: Wait until installation is finished
local_action:
module: ovh
service: status
name: 'foo.ovh.eu'
register: result
until: result.msg.find("done") != -1
retries: 150
delay: 10
# Enable / disable OVH monitoring
- name: Remove ovh monitoring when necessary
ovh: service='monitoring' name='foo.ovh.eu' state='present / absent'
# List personal dedicated servers
- name: Get list of servers
ovh: service='list' name='dedicated'
register: servers
# List personal templates
- name: Get list of personal templates
ovh: service='list' name='templates'
register: templates
# Create a new template and install it
- name: check if template is already installed
ovh: service='list' name='templates'
register: templates
# the template musts be located in files directory inside the role
- name: Create template
ovh: service='template' name='custom' state='present'
run_once: yes
when: template not in templates.objects
- name: Install the dedicated server
ovh: service='install' name='foo.ovh.eu' hostname='internal.bar.foo.com' template='custom'
- name: Delete template
ovh: service='template' name='{{ template }}' state='absent'
run_once: yes
'''
RETURN = ''' # '''
import ast
import yaml
try:
import json
except ImportError:
import simplejson as json
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
from ansible.module_utils.basic import AnsibleModule
def getStatusInstall(ovhclient, module):
if module.params['name']:
if module.check_mode:
module.exit_json(changed=False, msg="done - (dry run mode)")
try:
tasklist = ovhclient.get('/dedicated/server/%s/task' % module.params['name'], function='reinstallServer')
result = ovhclient.get('/dedicated/server/%s/task/%s' % (module.params['name'], max(tasklist)))
module.exit_json(changed=False, msg="%i: %s" % (max(tasklist), result['status']))
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.fail_json(changed=False, msg="Please give the service's name you want to know the install status")
def launchInstall(ovhclient, module):
if module.params['name'] and module.params['hostname'] and module.params['template']:
try:
compatible_templates = ovhclient.get('/dedicated/server/%s/install/compatibleTemplates' % module.params['name'])
compatible_templates = set([template for template_type in compatible_templates.keys() for template in compatible_templates[template_type]])
if module.params['template'] not in compatible_templates:
module.fail_json(changed=False, msg="%s doesn't exist in compatibles templates" % module.params['template'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if module.check_mode:
module.exit_json(changed=True, msg="Installation in progress on %s ! - (dry run mode)" % module.params['name'])
details = {"details":{"language":"en","customHostname":module.params['hostname']},"templateName":module.params['template']}
if module.params.get('ssh_key_name', None):
try:
result = ovhclient.get('/me/sshKey')
if module.params['ssh_key_name'] not in result:
module.fail_json(changed=False, msg="%s doesn't exist in public SSH keys" % module.params['ssh_key_name'])
else:
details['details']['sshKeyName'] = module.params['ssh_key_name']
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if module.params.get('use_distrib_kernel', False):
details['details']['useDistribKernel'] = module.params['use_distrib_kernel']
try:
ovhclient.post('/dedicated/server/%s/install/start' % module.params['name'],
**details)
module.exit_json(changed=True, msg="Installation in progress on %s !" % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
if not module.params['name']:
module.fail_json(changed=False, msg="Please give the service's name you want to install")
if not module.params['template']:
module.fail_json(changed=False, msg="Please give a template to install")
if not module.params['hostname']:
module.fail_json(changed=False, msg="Please give a hostname for your installation")
def changeMonitoring(ovhclient, module):
if module.params['name'] and module.params['state']:
if module.check_mode:
module.exit_json(changed=True, msg="Monitoring %s on %s - (dry run mode)" % (module.params['state'], module.params['name']))
if module.params['state'] == 'present':
try:
ovhclient.put('/dedicated/server/%s' % module.params['name'],
monitoring=True)
module.exit_json(changed=True, msg="Monitoring activated on %s" % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
elif module.params['state'] == 'absent':
try:
ovhclient.put('/dedicated/server/%s' % module.params['name'],
monitoring=False)
module.exit_json(changed=True, msg="Monitoring deactivated on %s" % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.fail_json(changed=False, msg="State %s does not match 'present' or 'absent'" % module.params['state'])
else:
if not module.params['name']:
module.fail_json(changed=False, msg="Please give a name to change monitoring state")
if not module.params['state']:
module.fail_json(changed=False, msg="Please give a state for your monitoring")
def terminateServer(ovhclient, module):
if module.params['name']:
if module.check_mode:
module.exit_json(changed=True, msg="Terminate %s is done, please confirm via the email sent - (dry run mode)" % module.params['name'])
try:
ovhclient.post('/dedicated/server/%s/terminate' % module.params['name'])
module.exit_json(changed=True, msg="Terminate %s is done, please confirm via the email sent" % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.fail_json(changed=False, msg="Please give a dedicated name to terminate")
def changeReverse(ovhclient, module):
if module.params['domain'] and module.params['ip'] :
fqdn = module.params['name'] + '.' + module.params['domain'] + '.'
result = {}
try:
result = ovhclient.get('/ip/%s/reverse/%s' % (module.params['ip'], module.params['ip']))
except ovh.exceptions.ResourceNotFoundError:
result['reverse'] = ''
if result['reverse'] != fqdn:
if module.check_mode:
module.exit_json(changed=True, msg="Reverse %s to %s succesfully set ! - (dry run mode)" % (module.params['ip'], fqdn))
try:
ovhclient.post('/ip/%s/reverse' % module.params['ip'],
ipReverse=module.params['ip'],
reverse=fqdn)
module.exit_json(changed=True, msg="Reverse %s to %s succesfully set !" % (module.params['ip'], fqdn))
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.exit_json(changed=False, msg="Reverse already set")
else:
if not module.params['domain']:
module.fail_json(changed=False, msg="Please give a domain to add your target")
if not module.params['ip']:
module.fail_json(changed=False, msg="Please give an IP to add your target")
def changeDNS(ovhclient, module):
msg = ''
if module.params['name'] == 'refresh':
if module.check_mode:
module.exit_json(changed=True, msg="Domain %s succesfully refreshed ! - (dry run mode)" % module.params['domain'])
try:
ovhclient.post('/domain/zone/%s/refresh' % module.params['domain'])
module.exit_json(changed=True, | |
item.xpath(xpEngagedDate, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'engaged_date_date_collected', item.xpath(xpEngagedDateDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'engaged_date_data_collection_stage', item.xpath(xpEngagedDateDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, EngagedDate)
''' Parse sub-tables '''
def parse_employment(self, element):
''' Element paths '''
xpEmployment = 'hmis:Employment'
xpEmploymentIDIDNum = 'hmis:PriorResidenceID/hmis:IDNum'
xpEmploymentIDIDStr = 'hmis:PriorResidenceID/hmis:IDStr'
xpEmploymentIDDeleteOccurredDate = 'hmis:PriorResidenceID/@hmis:deleteOccurredDate'
xpEmploymentIDDeleteEffective = 'hmis:PriorResidenceID/@hmis:deleteEffective'
xpEmploymentIDDelete = 'hmis:PriorResidenceID/@hmis:delete'
xpCurrentlyEmployed = 'hmis:CurrentlyEmployed'
xpCurrentlyEmployedDateCollected = 'hmis:CurrentlyEmployed/@hmis:dateCollected'
xpCurrentlyEmployedDateEffective = 'hmis:CurrentlyEmployed/@hmis:dateEffective'
xpCurrentlyEmployedDataCollectionStage = 'hmis:CurrentlyEmployed/@hmis:dataCollectionStage'
xpHoursWorkedLastWeek = 'hmis:HoursWorkedLastWeek'
xpHoursWorkedLastWeekDateCollected = 'hmis:HoursWorkedLastWeek/@hmis:dateCollected'
xpHoursWorkedLastWeekDateEffective = 'hmis:HoursWorkedLastWeek/@hmis:dateEffective'
xpHoursWorkedLastWeekDataCollectionStage = 'hmis:HoursWorkedLastWeek/@hmis:dataCollectionStage'
xpEmploymentTenure = 'hmis:EmploymentTenure'
xpEmploymentTenureDateCollected = 'hmis:EmploymentTenure/@hmis:dateCollected'
xpEmploymentTenureDateEffective = 'hmis:EmploymentTenure/@hmis:dateEffective'
xpEmploymentTenureDataCollectionStage = 'hmis:EmploymentTenure/@hmis:dataCollectionStage'
xpLookingForWork = 'hmis:LookingForWork'
xpLookingForWorkDateCollected = 'hmis:LookingForWork/@hmis:dateCollected'
xpLookingForWorkDateEffective = 'hmis:LookingForWork/@hmis:dateEffective'
xpLookingForWorkDataCollectionStage = 'hmis:LookingForWork/@hmis:dataCollectionStage'
itemElements = element.xpath(xpEmployment, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'employment_id_id_id_num', item.xpath(xpEmploymentIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_id_id_id_str', item.xpath(xpEmploymentIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_id_id_delete_occurred_date', item.xpath(xpEmploymentIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_id_id_delete_effective_date', item.xpath(xpEmploymentIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_id_id_delete', item.xpath(xpEmploymentIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'currently_employed', item.xpath(xpCurrentlyEmployed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'currently_employed_date_collected', item.xpath(xpCurrentlyEmployedDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_employed_date_effective', item.xpath(xpCurrentlyEmployedDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_employed_data_collection_stage', item.xpath(xpCurrentlyEmployedDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'hours_worked_last_week', item.xpath(xpHoursWorkedLastWeek, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'hours_worked_last_week_date_collected', item.xpath(xpHoursWorkedLastWeekDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hours_worked_last_week_date_effective', item.xpath(xpHoursWorkedLastWeekDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'hours_worked_last_week_data_collection_stage', item.xpath(xpHoursWorkedLastWeekDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'employment_tenure', item.xpath(xpEmploymentTenure, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'employment_tenure_date_collected', item.xpath(xpEmploymentTenureDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_tenure_date_effective', item.xpath(xpEmploymentTenureDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'employment_tenure_data_collection_stage', item.xpath(xpEmploymentTenureDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'looking_for_work', item.xpath(xpLookingForWork, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'looking_for_work_date_collected', item.xpath(xpLookingForWorkDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'looking_for_work_date_effective', item.xpath(xpLookingForWorkDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'looking_for_work_data_collection_stage', item.xpath(xpLookingForWorkDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Employment)
''' Parse sub-tables '''
def parse_domestic_violence(self, element):
''' Element paths '''
xpDomesticViolence = 'hmis:DomesticViolence'
xpDomesticViolenceSurvivor = 'hmis:DomesticViolenceSurvivor'
xpDomesticViolenceSurvivorDateCollected = 'hmis:DomesticViolenceSurvivor/@hmis:dateCollected'
xpDomesticViolenceSurvivorDateEffective = 'hmis:DomesticViolenceSurvivor/@hmis:dateEffective'
xpDomesticViolenceSurvivorDataCollectionStage = 'hmis:DomesticViolenceSurvivor/@hmis:dataCollectionStage'
xpDVOccurred = 'hmis:DVOccurred'
xpDVOccurredDateCollected = 'hmis:DVOccurred/@hmis:dateCollected'
xpDVOccurredDateEffective = 'hmis:DVOccurred/@hmis:dateEffective'
xpDVOccurredDataCollectionStage = 'hmis:DVOccurred/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDomesticViolence, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'domestic_violence_survivor', item.xpath(xpDomesticViolenceSurvivor, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'domestic_violence_survivor_date_collected', item.xpath(xpDomesticViolenceSurvivorDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'domestic_violence_survivor_date_effective', item.xpath(xpDomesticViolenceSurvivorDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'domestic_violence_survivor_data_collection_stage', item.xpath(xpDomesticViolenceSurvivorDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'dv_occurred', item.xpath(xpDVOccurred, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'dv_occurred_date_collected', item.xpath(xpDVOccurredDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'dv_occurred_date_effective', item.xpath(xpDVOccurredDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'dv_occurred_data_collection_stage', item.xpath(xpDVOccurredDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DomesticViolence)
''' Parse sub-tables '''
def parse_disabling_condition(self, element):
''' Element paths '''
xpDisablingCondition = 'hmis:DisablingCondition'
xpDisablingConditionDateCollected = 'hmis:DisablingCondition/@hmis:dateCollected'
xpDisablingConditionDateEffective = 'hmis:DisablingCondition/@hmis:dateEffective'
xpDisablingConditionDataCollectionStage = 'hmis:DisablingCondition/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDisablingCondition, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'disabling_condition', item.xpath(xpDisablingCondition, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'disabling_condition_date_collected', item.xpath(xpDisablingConditionDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'disabling_condition_date_effective', item.xpath(xpDisablingConditionDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'disabling_condition_data_collection_stage', item.xpath(xpDisablingConditionDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DisablingCondition)
''' Parse sub-tables '''
def parse_developmental_disability(self, element):
''' Element paths '''
xpDevelopmentalDisability = 'hmis:DevelopmentalDisability'
xpHasDevelopmentalDisability = 'hmis:HasDevelopmentalDisability'
xpHasDevelopmentalDisabilityDateCollected = 'hmis:HasDevelopmentalDisability/@hmis:dateCollected'
xpHasDevelopmentalDisabilityDateEffective = 'hmis:HasDevelopmentalDisability/@hmis:dateEffective'
xpHasDevelopmentalDisabilityDataCollectionStage = 'hmis:HasDevelopmentalDisability/@hmis:dataCollectionStage'
xpReceiveDevelopmentalDisability = 'hmis:ReceiveDevelopmentalDisability'
xpReceiveDevelopmentalDisabilityDateCollected = 'hmis:ReceiveDevelopmentalDisability/@hmis:dateCollected'
xpReceiveDevelopmentalDisabilityDateEffective = 'hmis:ReceiveDevelopmentalDisability/@hmis:dateEffective'
xpReceiveDevelopmentalDisabilityDataCollectionStage = 'hmis:ReceiveDevelopmentalDisability/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDevelopmentalDisability, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'has_developmental_disability', item.xpath(xpHasDevelopmentalDisability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'has_developmental_disability_date_collected', item.xpath(xpHasDevelopmentalDisabilityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_developmental_disability_date_effective', item.xpath(xpHasDevelopmentalDisabilityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'has_developmental_disability_data_collection_stage', item.xpath(xpHasDevelopmentalDisabilityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'receive_developmental_disability', item.xpath(xpReceiveDevelopmentalDisability, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'receive_developmental_disability_date_collected', item.xpath(xpReceiveDevelopmentalDisabilityDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_developmental_disability_date_effective', item.xpath(xpReceiveDevelopmentalDisabilityDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'receive_developmental_disability_data_collection_stage', item.xpath(xpReceiveDevelopmentalDisabilityDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DevelopmentalDisability)
''' Parse sub-tables '''
def parse_destinations(self, element):
''' Element paths '''
xpDestinations = 'hmis:Destinations/hmis:Destination'
xpDestinationIDIDNum = 'hmis:DestinationID/hmis:IDNum'
xpDestinationIDIDStr = 'hmis:DestinationID/hmis:IDStr'
xpDestinationIDDeleteOccurredDate = 'hmis:DestinationID/@hmis:deleteOccurredDate'
xpDestinationIDDeleteEffective = 'hmis:DestinationID/@hmis:deleteEffective'
xpDestinationIDDelete = 'hmis:DestinationID/@hmis:delete'
xpDestinationCode = 'hmis:DestinationCode'
xpDestinationCodeDateCollected = 'hmis:DestinationCode/@hmis:dateCollected'
xpDestinationCodeDateEffective = 'hmis:DestinationCode/@hmis:dateEffective'
xpDestinationCodeDataCollectionStage = 'hmis:DestinationCode/@hmis:dataCollectionStage'
xpDestinationOther = 'hmis:DestinationOther'
xpDestinationOtherDateCollected = 'hmis:DestinationOther/@hmis:dateCollected'
xpDestinationOtherDateEffective = 'hmis:DestinationOther/@hmis:dateEffective'
xpDestinationOtherDataCollectionStage = 'hmis:DestinationOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDestinations, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'destination_id_id_num', item.xpath(xpDestinationIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_id_id_str', item.xpath(xpDestinationIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_id_delete_occurred_date', item.xpath(xpDestinationIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_id_delete_effective_date', item.xpath(xpDestinationIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_id_delete', item.xpath(xpDestinationIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'destination_code', item.xpath(xpDestinationCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_code_date_collected', item.xpath(xpDestinationCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_code_date_effective', item.xpath(xpDestinationCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_code_data_collection_stage', item.xpath(xpDestinationCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'destination_other', item.xpath(xpDestinationOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'destination_other_date_collected', item.xpath(xpDestinationOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_other_date_effective', item.xpath(xpDestinationOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'destination_other_data_collection_stage', item.xpath(xpDestinationOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Destinations)
''' Parse sub-tables '''
def parse_degree(self, element):
''' Element paths '''
xpDegree = 'hmis:Degree'
xpDegreeIDIDNum = './hmis:IDNum'
xpDegreeIDIDStr = './hmis:IDStr'
xpDegreeIDDeleteOccurredDate = './@hmis:deleteOccurredDate'
xpDegreeIDDeleteEffective = './@hmis:deleteEffective'
xpDegreeIDDelete = './@hmis:delete'
xpDegreeOther = 'hmis:DegreeOther'
xpDegreeOtherDateCollected = 'hmis:DegreeOther/@hmis:dateCollected'
xpDegreeOtherDateEffective = 'hmis:DegreeOther/@hmis:dateEffective'
xpDegreeOtherDataCollectionStage = 'hmis:DegreeOther/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDegree, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'degree_id_id_num', item.xpath(xpDegreeIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_id_id_str', item.xpath(xpDegreeIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_id_delete_occurred_date', item.xpath(xpDegreeIDDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_id_delete_effective_date', item.xpath(xpDegreeIDDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_id_delete', item.xpath(xpDegreeIDDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'degree_other', item.xpath(xpDegreeOther, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_other_date_collected', item.xpath(xpDegreeOtherDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_other_date_effective', item.xpath(xpDegreeOtherDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_other_data_collection_stage', item.xpath(xpDegreeOtherDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Degree)
''' Parse sub-tables '''
parse_degree_code(self, item)
def parse_degree_code(self, element):
''' Element paths '''
xpDegreeCode = 'hmis:DegreeCode'
xpDegreeCodeDateCollected = 'hmis:DegreeCode/@hmis:dateCollected'
xpDegreeCodeDateEffective = 'hmis:DegreeCode/@hmis:dateEffective'
xpDegreeCodeDataCollectionStage = 'hmis:DegreeCode/@hmis:dataCollectionStage'
itemElements = element.xpath(xpDegreeCode, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'degree_code', item.xpath(xpDegreeCode, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'degree_date_collected', item.xpath(xpDegreeCodeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_date_effective', item.xpath(xpDegreeCodeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'degree_data_collection_stage', item.xpath(xpDegreeCodeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'degree_index_id', self.degree_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, DegreeCode)
''' Parse sub-tables '''
def parse_currently_in_school(self, element):
''' Element paths '''
xpCurrentlyInSchool = 'hmis:CurrentlyInSchool'
xpCurrentlyInSchoolDateCollected = 'hmis:CurrentlyInSchool/@hmis:dateCollected'
xpCurrentlyInSchoolDateEffective = 'hmis:CurrentlyInSchool/@hmis:dateEffective'
xpCurrentlyInSchoolDataCollectionStage = 'hmis:CurrentlyInSchool/@hmis:dataCollectionStage'
itemElements = element.xpath(xpCurrentlyInSchool, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'currently_in_school', item.xpath(xpCurrentlyInSchool, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'currently_in_school_date_collected', item.xpath(xpCurrentlyInSchoolDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_in_school_date_effective', item.xpath(xpCurrentlyInSchoolDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'currently_in_school_data_collection_stage', item.xpath(xpCurrentlyInSchoolDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'person_historical_index_id', self.person_historical_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, CurrentlyInSchool)
''' Parse sub-tables '''
def parse_contact_made(self, element):
''' Element paths '''
xpContactsMade = 'hmis:ContactsMade/hmis:ContactMade'
xpContactIDIDNum = 'hmis:ContactID/hmis:IDNum'
xpContactIDIDStr = 'hmis:ContactID/hmis:IDStr'
xpContactIDDeleteOccurredDate = 'hmis:ContactID/@hmis:deleteOccurredDate'
xpContactIDDeleteEffective = 'hmis:ContactID/@hmis:deleteEffective'
xpContactIDDelete = 'hmis:ContactID/@hmis:delete'
xpContactDate = 'hmis:ContactDate'
xpContactDateDataCollectionStage = 'hmis:ContactDate/@hmis:dataCollectionStage'
xpContactLocation = 'hmis:ContactLocation'
xpContactLocationDataCollectionStage = 'hmis:ContactLocation/@hmis:dataCollectionStage'
itemElements = element.xpath(xpContactsMade, namespaces = self.nsmap)
if itemElements is not None:
for | |
YOU TO PUNCH GHOSTS->(R)Ghost Punching Gloves]]"
}
],
"hooks": [],
"cleanText": "You take out the nearest robot guard and disguised yourself as one of them. You walk towards the security room. Waving and saying wassup to the other robot guards trying to blend in. This works and you make it to the front door. You enter to see one guard sitting in front of all the screens he was asleep all this time. You disable the cameras and walk back out, you see three treasures that peak your interest. Which one will you choose the take?"
},
{
"name": "(ER)Experiment Raygun",
"tags": "",
"id": "116",
"text": "You pick up the Experimental Orbital Beam Raygun. You attempt to fire it but nothing happened. Must be out of batteries. You put the raygun into your backpack and left the Science Hall\n\n[[CONTINUE->(ERS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(ERS)Main Hall",
"original": "[[CONTINUE->(ERS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up the Experimental Orbital Beam Raygun. You attempt to fire it but nothing happened. Must be out of batteries. You put the raygun into your backpack and left the Science Hall"
},
{
"name": "(ER)<NAME>",
"tags": "",
"id": "117",
"text": "You pick up <NAME>'s Skull, I guess you like old people's bones for some reason, still it will fetch a hefty price. You put the skull into your backpack and left the Science Hall.\n\n[[CONTINUE->(ERS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(ERS)Main Hall",
"original": "[[CONTINUE->(ERS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up <NAME>strong's Skull, I guess you like old people's bones for some reason, still it will fetch a hefty price. You put the skull into your backpack and left the Science Hall."
},
{
"name": "(ER)Ghost Punch<NAME>",
"tags": "",
"id": "118",
"text": "You pick up the boxing gloves that can punch ghosts. You put them on and swung around in the air. Nothing happened because you don't believe in ghost therefore no ghosts revealed themselves to you. (in reality you just punched out 4 ghosts around you.) You put the gloves into your backpack and left the Science Hall.\n\n[[CONTINUE->(ERS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(ERS)Main Hall",
"original": "[[CONTINUE->(ERS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up the boxing gloves that can punch ghosts. You put them on and swung around in the air. Nothing happened because you don't believe in ghost therefore no ghosts revealed themselves to you. (in reality you just punched out 4 ghosts around you.) You put the gloves into your backpack and left the Science Hall."
},
{
"name": "(R)Experiment Raygun",
"tags": "",
"id": "119",
"text": "You pick up the Experimental Orbital Beam Raygun. You attempt to fire it but nothing happened. Must be out of batteries. You put the raygun into your backpack and left the Science Hall\n\n[[CONTINUE->(RS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(RS)Main Hall",
"original": "[[CONTINUE->(RS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up the Experimental Orbital Beam Raygun. You attempt to fire it but nothing happened. Must be out of batteries. You put the raygun into your backpack and left the Science Hall"
},
{
"name": "(R)<NAME>",
"tags": "",
"id": "120",
"text": "You pick up Neil Armstrong's Skull, I guess you like old people's bones for some reason, still it will fetch a hefty price. You put the skull into your backpack and left the Science Hall.\n\n[[CONTINUE->(RS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(RS)Main Hall",
"original": "[[CONTINUE->(RS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up Neil Armstrong's Skull, I guess you like old people's bones for some reason, still it will fetch a hefty price. You put the skull into your backpack and left the Science Hall."
},
{
"name": "(R)Ghost Punching Gloves",
"tags": "",
"id": "121",
"text": "You pick up the boxing gloves that can punch ghosts. You put them on and swung around in the air. Nothing happened because you don't believe in ghost therefore no ghosts revealed themselves to you. (in reality you just punched out 4 ghosts around you.) You put the gloves into your backpack and left the Science Hall.\n\n[[CONTINUE->(RS)Main Hall]]",
"links": [
{
"linkText": "CONTINUE",
"passageName": "(RS)Main Hall",
"original": "[[CONTINUE->(RS)Main Hall]]"
}
],
"hooks": [],
"cleanText": "You pick up the boxing gloves that can punch ghosts. You put them on and swung around in the air. Nothing happened because you don't believe in ghost therefore no ghosts revealed themselves to you. (in reality you just punched out 4 ghosts around you.) You put the gloves into your backpack and left the Science Hall."
},
{
"name": "(RS)Main Hall",
"tags": "",
"id": "122",
"text": "There is one hall left to explore.\n\n[[EGYPTIAN HALL->(RS)Egyptian Hall]]",
"links": [
{
"linkText": "EGYPTIAN HALL",
"passageName": "(RS)Egyptian Hall",
"original": "[[EGYPTIAN HALL->(RS)Egyptian Hall]]"
}
],
"hooks": [],
"cleanText": "There is one hall left to explore."
},
{
"name": "(ERS)Main Hall",
"tags": "",
"id": "123",
"text": "You have finally taken one treasure from each hall now time to find out how much money you've made.\n\n[[TOTAL->TOTAL MONEY]]",
"links": [
{
"linkText": "TOTAL",
"passageName": "TOTAL MONEY",
"original": "[[TOTAL->TOTAL MONEY]]"
}
],
"hooks": [],
"cleanText": "You have finally taken one treasure from each hall now time to find out how much money you've made."
},
{
"name": "(RS)Egyptian Hall",
"tags": "",
"id": "124",
"text": "You walk into the Egyptian Hall and find incredible pieces of art displayed. Things from sculptures to the mummy's sarcophagus can be seen. You take a few steps in and all of a sudden sand starts to appear. There is a sandstorm starting in here. What do you do?\n\n[[FIND THE SOURCE->(RS)Pharaoh's Room]]\n[[LEAVE->(RS)Ending 3]]",
"links": [
{
"linkText": "FIND THE SOURCE",
"passageName": "(RS)Pharaoh's Room",
"original": "[[FIND THE SOURCE->(RS)Pharaoh's Room]]"
},
{
"linkText": "LEAVE",
"passageName": "(RS)Ending 3",
"original": "[[LEAVE->(RS)Ending 3]]"
}
],
"hooks": [],
"cleanText": "You walk into the Egyptian Hall and find incredible pieces of art displayed. Things from sculptures to the mummy's sarcophagus can be seen. You take a few steps in and all of a sudden sand starts to appear. There is a sandstorm starting in here. What do you do?"
},
{
"name": "(R)<NAME>",
"tags": "",
"id": "125",
"text": "You put on your mask to keep the sand out of your mouth determined to find the source of this sandstorm. You push against the winds knowing that the closer you get to the source the more winds resist you. Before you know it you made to it the source, the pharaoh's room.''WHO DARES DISTURB MY SLUMBER!!!'' What do you do?\n\n[[TELL HIM YOU'RE HIS MOTHER->(R)Mother]]\n[[BEAT HIM UP->(R)WWE SMACKDOWN]]",
"links": [
{
"linkText": "TELL HIM YOU'RE HIS MOTHER",
"passageName": "(R)Mother",
"original": "[[TELL HIM YOU'RE HIS MOTHER->(R)Mother]]"
},
{
"linkText": "BEAT HIM UP",
"passageName": "(R)WWE SMACKDOWN",
"original": "[[BEAT HIM UP->(R)WWE SMACKDOWN]]"
}
],
"hooks": [],
"cleanText": "You put on your mask to keep the sand out of your mouth determined to find the source of this sandstorm. You push against the winds knowing that the closer you get to the source the more winds resist you. Before you know it you made to it the source, the pharaoh's room.''WHO DARES DISTURB MY SLUMBER!!!'' What do you do?"
},
{
"name": "(R)Ending 3",
"tags": "",
"id": "126",
"text": "You decided to choose life so you turned around and headed for the exit. Only to have the storm get even stronger. You start to get lost but keep walking forward for what it seems minutes have passed and you haven't made any progress. Eventually your lungs fill with sand and you die. GAMEOVER!\n\n[[TRY AGAIN->(R)Egyptian Hall]]",
"links": [
{
"linkText": "TRY AGAIN",
"passageName": "(R)Egyptian Hall",
"original": "[[TRY AGAIN->(R)Egyptian Hall]]"
}
],
"hooks": [],
"cleanText": "You decided to choose life so you turned around and headed for the exit. Only to | |
the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 2D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 4 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
if np.isscalar(d):
d = d * np.ones(N)
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,d,Bx,By,Bz=e.flatten(),d.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
for i in range(2):
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1,step=2)], H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i)] = (-1)**(i)*(np.repeat(e,2) + Bz)
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2*Nz)] = -1*(-1)**(i)*ty+1j*aRx_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2*Nz)] = -1*(-1)**(i)*ty-1j*aRx_ky
H[diagonal(int(m/2)*(i+1),k=2*Nz-1,step=2,init=1+int(m/2)*i)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),k=-2*Nz+1,step=2,init=1+int(m/2)*i)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1-2*Nz,step=2)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),k=1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),k=-1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=3,step=2)] += -1*(-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-3,step=2)] += -1*(-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(m,k=int(m/2)+1,step=2)], H[diagonal(m,k=-int(m/2)-1,step=2)] = -np.conj(d), -d
H[diagonal(m,k=int(m/2)-1,step=2,init=1)], H[diagonal(m,k=-int(m/2)+1,step=2,init=1)] = np.conj(d), d
return (H)
#%%
def LO_2D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
2D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 2D
Lutchy-Oreg chain without superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 2D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 2D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 2D array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 2 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,Bx,By,Bz=e.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
H[diagonal(m,k=1,step=2)], H[diagonal(m,k=-1,step=2)] = Bx-1j*By, Bx+1j*By
H[diagonal(m)] = np.repeat(e,2) + Bz
H[diagonal(m,k=2*Nz)] = -ty+1j*aRx_ky
H[diagonal(m,k=-2*Nz)] = -ty-1j*aRx_ky
H[diagonal(m,k=2*Nz-1,step=2,init=1)] += -1j*aRz_ky
H[diagonal(m,k=-2*Nz+1,step=2,init=1)] += 1j*aRz_ky
H[diagonal(m,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(m,k=-1-2*Nz,step=2)] += 1j*aRz_ky
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
H[diagonal(m,k=2)] = -tz
H[diagonal(m,k=-2)] = -tz
H[diagonal(m,k=1,step=2,init=1)] += aRx_kz+1j*aRy_kz
H[diagonal(m,k=-1,step=2,init=1)] += aRx_kz-1j*aRy_kz
H[diagonal(m,k=3,step=2)] += -aRx_kz+1j*aRy_kz
H[diagonal(m,k=-3,step=2)] += -aRx_kz-1j*aRy_kz
return (H)
#%%
def LO_3D_builder(N,dis,m_eff,mu,B,aR,d, space='position', k_vec=np.nan ,sparse='yes'):
"""
3D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 3D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 3D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 3D array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 3D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Nx, Ny, Nz = N[0], N[1], N[2]
if np.ndim(dis)==0:
dis_x, dis_y, dis_z = dis, dis, dis
else:
dis_x, dis_y, dis_z = dis[0], dis[1], dis[2]
m = 4 * Nx * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Nx,Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
aRz=aR*np.ones((Nx,Ny,Nz))
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones((Nx,Ny,Nz))
aRy=aR[1]*np.ones((Nx,Ny,Nz))
aRz=aR[2]*np.ones((Nx,Ny,Nz))
else:
aRx=np.zeros((Nx,Ny,Nz))
aRy=np.zeros((Nx,Ny,Nz))
| |
<reponame>eaton-lab/physeqs<filename>ipcoal/utils/utils.py
#!/usr/bin/env python
"""
Miscellaneous functions
"""
from typing import Tuple, Optional
import time
import datetime
import itertools
import toytree
import numpy as np
import pandas as pd
from numba import njit
from ipcoal.utils.jitted import count_matrix_int
try:
from IPython.display import display
from ipywidgets import IntProgress, HTML, Box
except ImportError:
pass
ABBA_IDX = [
(1, 4), (2, 8), (3, 12), (4, 1),
(6, 9), (7, 13), (8, 2), (9, 6),
(11, 14), (12, 3), (13, 7), (14, 11),
]
BABA_IDX = [
(1, 1), (2, 2), (3, 3), (4, 4),
(6, 6), (7, 7), (8, 8), (9, 9),
(11, 11), (12, 12), (13, 13), (14, 14),
]
FIXED_IDX = [
(0, 0), (5, 5), (10, 10), (15, 15),
]
class IpcoalError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class Progress:
"""
Interactive progress bar for jupyter notebooks.
"""
def __init__(self, njobs, message, children):
# data
self.njobs = njobs
self.message = message
self.start = time.time()
# the progress bar
self.prog = IntProgress(
value=0, min=0, max=self.njobs,
layout={
"width": "350px",
"height": "30px",
"margin": "5px 0px 0px 0px",
})
# the message above progress bar
self.label = HTML(
self.printstr,
layout={
"height": "25px",
"margin": "0px",
})
# the box widget container
heights = [
int(i.layout.height[:-2]) for i in
children + [self.label, self.prog]
]
self.widget = Box(
children=children + [self.label, self.prog],
layout={
"display": "flex",
"flex_flow": "column",
"height": "{}px".format(sum(heights) + 5),
"margin": "5px 0px 5px 0px",
})
@property
def printstr(self):
"""
message as html
"""
elapsed = datetime.timedelta(seconds=int(time.time() - self.start))
str1 = "<span style='font-size:14px; font-family:monospace'>"
str2 = "</span>"
inner = "{} | {:>3}% | {}".format(
self.message,
int(100 * (self.prog.value / self.njobs)),
elapsed,
)
return str1 + inner + str2
def display(self):
"""Show html"""
display(self.widget)
def increment_all(self, value=1):
"""adds value to prog"""
self.prog.value += value
if self.prog.value == self.njobs:
self.prog.bar_style = "success"
self.increment_time()
def increment_time(self):
"""sets label value to printstr"""
self.label.value = self.printstr
def get_admix_interval_as_gens(
tree: 'ToyTree',
idx0:int,
idx1:int,
heights:Optional[Tuple[int, int]]=None,
props:Optional[Tuple[float, float]]=None,
) -> Tuple[int, int]:
"""
Returns the branch interval in units of generations that two
edges of a tree are overlapping, with the lower and upper edges
optionally trimmed. If user enters admix times as integers then
they are checked only for validation, no trimming.
"""
if tree.idx_dict[idx0].is_root() or tree.idx_dict[idx1].is_root():
raise IpcoalError(f"no shared admix interval for idxs: {idx0} {idx1}")
# get full possible intervals for these two nodes from the tree
node0 = tree.idx_dict[idx0]
ival0 = (node0.height, node0.up.height)
node1 = tree.idx_dict[idx1]
ival1 = (node1.height, node1.up.height)
low_bin = max([ival0[0], ival1[0]])
top_bin = min([ival0[1], ival1[1]])
if top_bin < low_bin:
raise IpcoalError(f"no shared admix interval for idxs: {idx0} {idx1}")
# if user entered a time in gens then check if it works
if heights is not None:
if not ((heights[0] >= low_bin) and (heights[1] <= top_bin)):
raise IpcoalError(
f"admix interval ({heights}) not within a shared "
f"edge interval for idxs: {idx0} {idx1}")
return max(low_bin + 1e-3, heights[0]), min(top_bin - 1e-3, heights[1])
# restrict migration within bin to a smaller interval
length = top_bin - low_bin
low_limit = max(low_bin + 1e-3, low_bin + (length * props[0]))
top_limit = min(top_bin - 1e-3, low_bin + (length * props[1]))
return low_limit, top_limit
def get_all_admix_edges(ttree, lower=0.25, upper=0.75, exclude_sisters=False):
"""
Find all possible admixture edges on a tree.
Edges are unidirectional, so the source and dest need to overlap in
time interval. To retrict migration to occur away from nodes (these
can be harder to detect when validating methods) you can set upper
and lower limits. For example, to make all source migrations to occur
at the midpoint of overlapping intervals in which migration can occur
you can set upper=.5, lower=.5.
"""
# bounds on edge overlaps
if lower is None:
lower = 0.0
if upper is None:
upper = 1.0
# for all nodes map the potential admixture interval
for snode in ttree.treenode.traverse():
if snode.is_root():
snode.interval = (None, None)
else:
snode.interval = (snode.height, snode.up.height)
# for all nodes find overlapping intervals
intervals = {}
for snode in ttree.treenode.traverse():
for dnode in ttree.treenode.traverse():
if not any([snode.is_root(), dnode.is_root(), dnode == snode]):
# [option] skip sisters
if (exclude_sisters) & (dnode.up == snode.up):
continue
# check for overlap
smin, smax = snode.interval
dmin, dmax = dnode.interval
# find if nodes have interval where admixture can occur
low_bin = np.max([smin, dmin])
top_bin = np.min([smax, dmax])
if top_bin > low_bin:
# restrict migration within bin to a smaller interval
length = top_bin - low_bin
low_limit = low_bin + (length * lower)
top_limit = low_bin + (length * upper)
intervals[(snode.idx, dnode.idx)] = (low_limit, top_limit)
return intervals
def get_snps_count_matrix(tree, seqs):
"""
Return a multidimensional SNP count matrix (sensu simcat and SVDquartets).
Compiles SNP data into a nquartets x 16 x 16 count matrix with the order
of quartets determined by the shape of the tree.
"""
# get all quartets for this size tree
if isinstance(tree, toytree.ToyTree):
quarts = list(itertools.combinations(range(tree.ntips), 4))
else:
# or, can be entered as tuples directly, e.g., [(0, 1, 2, 3)]
quarts = tree
# shape of the arr (count matrix)
arr = np.zeros((len(quarts), 16, 16), dtype=np.int64)
# iterator for quartets, e.g., (0, 1, 2, 3), (0, 1, 2, 4)...
quartidx = 0
for currquart in quarts:
# cols indices match tip labels b/c we named tips node.idx
quartsnps = seqs[currquart, :]
# save as stacked matrices
arr[quartidx] = count_matrix_int(quartsnps)
# save flattened to counts
quartidx += 1
return arr
def calculate_dstat(seqs, p1, p2, p3, p4):
"""
Calculate ABBA-BABA (D-statistic) from a count matrix.
"""
# order tips into ab|cd tree based on hypothesis
mat = get_snps_count_matrix([(0, 1, 2, 3)], seqs[[p1, p2, p3, p4], :])[0]
# calculate
abba = sum([mat[i] for i in ABBA_IDX])
baba = sum([mat[i] for i in BABA_IDX])
if abba + baba == 0:
dstat = 0.
else:
dstat = (abba - baba) / float(abba + baba)
return pd.DataFrame({'dstat': [dstat], 'baba': [baba], 'abba': [abba]})
def abba_baba(model, testtuples):
"""
Calculate ABBA/BABA statistic (D) as (ABBA - BABA) / (ABBA + BABA)
Parameters:
-----------
model (ipcoal.Model Class object):
A model class object from ipcoal that has generated sequence data by
calling either .sim_loci() or .sim_snps().
testtuples (tuple, list):
A tuple or list of tuples with the ordered taxon names for each test.
The order should be (P1, P2, P3, P4). You can see the names of taxa
from the tree on which data were simulated from the model object using
model.treeorig.draw();
Returns:
---------
pandas.DataFrame
"""
# check that data was simulated
if not model.seqs:
raise ipcoalError(
"you must first simulate data with .sim_snps() or .sim_loci()")
# ensure testtuples is a list of tuples
if isinstance(testtuples, tuple):
testtuples = [testtuples]
# get tip order of tree and check that testtuple names are in tips
tips = [i for i in model.treeorig.get_tip_labels()]
for tup in testtuples:
for name in tup:
if name not in tips:
raise ipcoalError(
"name {} is not in the tree {}"
.format(name, tips))
# get counts matrix
counts = get_snps_count_matrix(model.tree, model.seqs)
# store vals
abbas = np.zeros(counts.shape[0], dtype=int)
babas = np.zeros(counts.shape[0], dtype=int)
dstats = np.zeros(counts.shape[0], dtype=float)
p1 = np.zeros(counts.shape[0], dtype="U10")
p2 = np.zeros(counts.shape[0], dtype="U10")
p3 = np.zeros(counts.shape[0], dtype="U10")
p4 = np.zeros(counts.shape[0], dtype="U10")
# quartet iterator
quarts = itertools.combinations(range(len(tips)), 4)
# iterate over each (mat, quartet)
idx = 0
for count, qrt in zip(counts, quarts):
# calculate
abba = sum([count[i] for i in ABBA_IDX])
baba = sum([count[i] for i in BABA_IDX])
dstat = abs(abba - baba) / (abba + baba)
# store stats
abbas[idx] = abba
babas[idx] = baba
dstats[idx] = dstat
# store taxa
p1[idx] = tips[qrt[0]]
p2[idx] = tips[qrt[1]]
p3[idx] = tips[qrt[2]]
p4[idx] = tips[qrt[3]]
idx += 1
# convert to dataframe
data = pd.DataFrame({
"ABBA": np.array(abbas, dtype=int),
"BABA": np.array(babas, dtype=int),
"D": dstats,
"p1": p1,
"p2": p2,
"p3": p3,
"p4": p4,
},
columns=["ABBA", "BABA", "D", "p1", "p2", "p3", "p4"],
)
return data
class Params(object):
"""
A dict-like object for storing params values with a custom repr
that shortens | |
<filename>prog_ex/foobar/find-the-access-codes.py
def solution(l):
"""
Solution 4 again passes all but the last test case. Try to speed things
up some using a dynamic programming-like approach.
This solution wound up passing all of the test cases -- the key here is to
uses a memorization/dynamic programming approach. A core component of this
problem involves finding all multiples of a number after a given number in
the list. In the brute force approach, we do the following:
0: for each li:
1: for each lj such that j > i:
2: if li divides lj:
3: for each lk such that k > j:
4: if lj divides lk:
(li, lj, lk) is a valid solution
Note that steps 3 and 4 involve counting the number of valid values of lk
for a given lj. Since we are evaluating all possible values of lj for each
possible value of li, this means that we would potentially repeat steps 3
and 4 multiple times for the *same value of lj*.
Take the example:
l = [1, 1, 1, 1, 3]
In this case we would evaluate the number of valid lks for the final '1'
3 times. In the worst case, where l is of length N and consists of
all 1's, we would be finding the valid lks for the penultimate lj (N-2)
times.
To improve on this, we can cache/memorize the values as we compute them.
We'll store the smallest computation -- the number of possible values of lk
for a given lj. Then, as we traverse the list, if we have already
computed the values of lk for a given lj, we just use the value that we
previously computed. This touches on the concept of Dynamic Programming.
"""
# Make sure no numbers are less than 1 or greater than 999999
for li in l:
if li > 999999 or li < 1:
return 0
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in the list, then there
# can't be any lucky triples, so return 0
if n_l < 3 or n_l > 2000:
return 0
# Initialize counts -- d_cts[j] corresponds to the number of valid values
# of l[k] for l[j].
d_cts = [-1] * n_l
ctr = 0
# First iterate over i
for i in range(n_l-2):
for j in range(i+1, n_l-1):
if l[j] % l[i] == 0:
# Check to see if we already computed this
if d_cts[j] == -1:
# Count the number of valid divisors for l[j]
d_ctr = 0
for k in range(j+1, n_l):
if l[k] % l[j] == 0:
d_ctr += 1
d_cts[j] = d_ctr
# Add the pre-computed value
ctr += d_cts[j]
return ctr
def solution_4(l):
# Solution 3 passes all but the last test case. I suspect this is a timing
# problem, so see if we can speed things up.
# Make sure no numbers are less than 1 or greater than 999999
for li in l:
if li > 999999 or li < 1:
return 0
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in the list, then there
# can't be any lucky triples, so return 0
if n_l < 3 or n_l > 2000:
return 0
ctr = 0
# First iterate over i
for i in range(n_l-2):
for j in range(i+1, n_l-1):
if l[j] % l[i] == 0:
for k in range(j+1, n_l):
if l[k] % l[j] == 0:
ctr += 1
return ctr
def solution_3(l):
# Solution 2 appears to work but fails all 3 hidden test cases. The only
# thing I can guess is that duplicates can be counted -- this seems to be
# implied in the description but isn't explicitly said, so it might be the
# case that we should count duplicates. The fact that the output fits in a
# 32-bit integer might also be a clue -- it seems unlikely that this would
# need to be stated if we were only counting unique triples
# Make sure no numbers are less than 1 or greater than 999999
for li in l:
if li > 999999 or li < 1:
return 0
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in the list, then there
# can't be any lucky triples, so return 0
if n_l < 3 or n_l > 2000:
return 0
# Create a flipped version of l -- this is easier to traverse
l_flip = l[::-1]
# Define maximum value
max_val = int(2**32 / 2) - 1
# For this case, we have to iterate through the entire list
ctr = 0
for k, l_k in enumerate(l_flip[:-2]):
# Get all possible values of l_j
l_j_all = l_flip[k+1:]
l_j_valid = [lja for lja in set(l_j_all) if l_k % lja == 0] # All valid unique numbers
# Iterate over each unique value of l_j
for l_j in l_j_valid:
# Get the number of times l_j appears
n_l_j = l_j_all.count(l_j)
# Find each instance
st = k + 1
for nlj in range(n_l_j):
# Get the index for the current instance of nlj
j = l_flip.index(l_j, st)
# Look for all valid numbers after index j
l_i_all = l_flip[j+1:]
l_i_valid = [l_i for l_i in l_i_all if l_j % l_i == 0]
ctr += len(l_i_valid)
# Perform a check on the counter -- since the output must be
# represented by a signed int, then if the output is greater
# than this we could run into problems
if ctr > max_val:
return max_val
# Update the starting point
st = j + 1
return ctr
def solution_2(l):
# Make sure no numbers are less than 1 or greater than 999999
for li in l:
if li > 999999 or li < 1:
return 0
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in the list, then there
# can't be any lucky triples, so return 0
if n_l < 3 or n_l > 2000:
return 0
# Create a flipped version of l -- this is easier to traverse
l_flip = l[::-1]
# Get unique elements
lucky_triples = []
lk_unique = set(l_flip)
for lk in lk_unique:
# Find the first instance of lk in l_flip
k = l_flip.index(lk)
# If there aren't at least two elements left, continue -- this
# means that there aren't sufficient elements left in the list
# to create a valid tuple.
if n_l - k < 3:
continue
# Get all unique elements after k and filter by those which
# are divisible by lk
lj_unique = set(l_flip[k+1:])
lj_unique = [lju for lju in lj_unique if lk % lju == 0]
# Iterate over possible ljs:
for lj in lj_unique:
# Find the first instance of lj in l_flip starting at k+1
j = l_flip.index(lj, k+1)
# Get all possible unique elements after j and filter
li_unique = set(l_flip[j+1:])
li_unique = [liu for liu in li_unique if lj % liu == 0]
# Add tuples
for liu in li_unique:
lucky_triples.append((liu, lj, lk))
# If no lucky triples were found, return 0
if not lucky_triples:
return 0
# Remove any duplicates and return the number of elements
lucky_triples = set(lucky_triples)
return len(lucky_triples)
def solution_1(l):
# NOTE -- this solution does not appear to work -- the 'verify'
# command seems to time out. I'm guessing that's because it
# takes too long (execution time appears to be limited).
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in | |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
from operator import itemgetter
from collections import OrderedDict
import sys, getopt
import json
# Get entityID
def getEntityID(EntityDescriptor, namespaces):
return EntityDescriptor.get('entityID')
# Get MDUI Descriptions
def getDescriptions(EntityDescriptor,namespaces,entType='idp'):
description_list = list()
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
descriptions = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Description" % entityType, namespaces)
if (len(descriptions) != 0):
for desc in descriptions:
descriptions_dict = dict()
descriptions_dict['value'] = desc.text
descriptions_dict['lang'] = desc.get("{http://www.w3.org/XML/1998/namespace}lang")
description_list.append(descriptions_dict)
return description_list
# Get MDUI Logo BIG
def getLogoBig(EntityDescriptor,namespaces,entType='idp'):
entityType = ""
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
logoUrl = ""
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo[@xml:lang='it']" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight != logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo[@xml:lang='en']" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight != logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight != logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
return ""
# Get MDUI Logo SMALL
def getLogoSmall(EntityDescriptor,namespaces,entType='idp'):
entityType = ""
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
logoUrl = ""
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo[@xml:lang='it']" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight == logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo[@xml:lang='en']" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight == logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
logos = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:Logo" % entityType,namespaces)
if (len(logos) != 0):
for logo in logos:
logoHeight = logo.get("height")
logoWidth = logo.get("width")
if (logoHeight == logoWidth):
# Avoid "embedded" logos
if ("data:image" in logo.text):
logoUrl = "embeddedLogo"
return logoUrl
else:
logoUrl = logo.text
return logoUrl
else:
return ""
# Get ServiceName
def getServiceName(EntityDescriptor,namespaces):
serviceName = EntityDescriptor.find("./md:SPSSODescriptor/md:AttributeConsumingService/md:ServiceName[@xml:lang='it']", namespaces)
if (serviceName != None):
return serviceName.text
else:
serviceName = EntityDescriptor.find("./md:SPSSODescriptor/md:AttributeConsumingService/md:ServiceName[@xml:lang='en']", namespaces)
if (serviceName != None):
return serviceName.text
else:
return ""
# Get Organization Name
def getOrganizationName(EntityDescriptor, namespaces,lang='it'):
orgName = EntityDescriptor.find("./md:Organization/md:OrganizationName[@xml:lang='%s']" % lang,namespaces)
if (orgName != None):
return orgName.text
else:
return ""
# Get DisplayName
def getDisplayName(EntityDescriptor, namespaces, entType='idp'):
entityType = ""
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
displayName = EntityDescriptor.find("%s/md:Extensions/mdui:DisplayName[@xml:lang='it']" % entityType,namespaces)
if (displayName != None):
return displayName.text
else:
displayName = EntityDescriptor.find("%s/md:Extensions/mdui:DisplayName[@xml:lang='en']" % entityType,namespaces)
if (displayName != None):
return displayName.text
else:
if (entType == 'sp'):
displayName = getServiceName(EntityDescriptor,namespaces)
if (displayName != None):
return displayName
else:
return ""
else:
displayName = getOrganizationName(EntityDescriptor,namespaces)
return displayName
# Get MDUI InformationURLs
def getInformationURLs(EntityDescriptor,namespaces,entType='idp'):
entityType = ""
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
info_pages = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:InformationURL" % entityType, namespaces)
info_dict = dict()
for infop in info_pages:
lang = infop.get("{http://www.w3.org/XML/1998/namespace}lang")
info_dict[lang] = infop.text
if ('it' not in info_dict):
info_dict['it'] = ""
if ('en' not in info_dict):
info_dict['en'] = ""
return info_dict
# Get MDUI PrivacyStatementURLs
def getPrivacyStatementURLs(EntityDescriptor,namespaces,entType='idp'):
entityType = ""
if (entType.lower() == 'idp'):
entityType = "./md:IDPSSODescriptor"
if (entType.lower() == 'sp'):
entityType = "./md:SPSSODescriptor"
privacy_pages = EntityDescriptor.findall("%s/md:Extensions/mdui:UIInfo/mdui:PrivacyStatementURL" % entityType, namespaces)
privacy_dict = dict()
for pp in privacy_pages:
lang = pp.get("{http://www.w3.org/XML/1998/namespace}lang")
privacy_dict[lang] = pp.text
if ('it' not in privacy_dict):
privacy_dict['it'] = ""
if ('en' not in privacy_dict):
privacy_dict['en'] = ""
return privacy_dict
# Get OrganizationURL
def getOrganizationURL(EntityDescriptor,namespaces,lang='it'):
orgUrl = EntityDescriptor.find("./md:Organization/md:OrganizationURL[@xml:lang='%s']" % lang,namespaces)
if (orgUrl != None):
return orgUrl.text
else:
return ""
# Get RequestedAttribute
def getRequestedAttribute(EntityDescriptor,namespaces):
reqAttr = EntityDescriptor.findall("./md:SPSSODescriptor/md:AttributeConsumingService/md:RequestedAttribute", namespaces)
requireList = list()
requestedList = list()
requestedAttributes = dict()
if (len(reqAttr) != 0):
for ra in reqAttr:
if (ra.get('isRequired') == "true"):
requireList.append(ra.get('FriendlyName'))
else:
requestedList.append(ra.get('FriendlyName'))
requestedAttributes['required'] = requireList
requestedAttributes['requested'] = requestedList
return requestedAttributes
# Get Contacts
def getContacts(EntityDescriptor,namespaces,contactType='technical'):
contactsList = list()
if (contactType.lower() == 'technical'):
contacts = EntityDescriptor.findall("./md:ContactPerson[@contactType='technical']/md:EmailAddress", namespaces)
elif (contactType.lower() == 'support'):
contacts = EntityDescriptor.findall("./md:ContactPerson[@contactType='support']/md:EmailAddress", namespaces)
elif (contactType.lower() == 'administrative'):
contacts = EntityDescriptor.findall("./md:ContactPerson[@contactType='administrative']/md:EmailAddress", namespaces)
if (len(contacts) != 0):
for ctc in contacts:
if ctc.text.startswith("mailto:"):
contactsList.append(ctc.text.replace("mailto:", ""))
else:
contactsList.append(ctc.text)
return '<br/>'.join(contactsList)
def main(argv):
try:
# 'm:o:hd' means that 'm' and 'o' needs an argument(confirmed by ':'), while 'h' and 'd' don't need it
opts, args = getopt.getopt(sys.argv[1:], 'm:o:hd', ['metadata=','output=','help','debug' ])
except getopt.GetoptError as err:
print (str(err))
print ("Usage: ./extractDataFromMD.py -m <md_inputfile> -o <output_path>")
print ("The idem-sps.json and idem-idps.json files will be put in the output directory")
sys.exit(2)
inputfile = None
outputpath = None
for opt, arg in opts:
if opt in ('-h', '--help'):
print ("Usage: ./extractDataFromMD.py -m <md_inputfile> -o <output_path>")
print ("The idem-sps.json and idem-idps.json files will be put in the output directory")
sys.exit()
elif opt in ('-m', '--metadata'):
inputfile = arg
elif opt in ('-o', '--output'):
outputpath = arg
elif opt == '-d':
global _debug
_debug = 1
else:
print ("Usage: ./extractDataFromMD.py -m <md_inputfile> -o <output_path>")
print ("The idem-sps.json and idem-idps.json files will be put in the output directory")
sys.exit()
namespaces = {
'xml':'http://www.w3.org/XML/1998/namespace',
'md': 'urn:oasis:names:tc:SAML:2.0:metadata',
'mdrpi': 'urn:oasis:names:tc:SAML:metadata:rpi',
'shibmd': 'urn:mace:shibboleth:metadata:1.0',
'mdattr': 'urn:oasis:names:tc:SAML:metadata:attribute',
'saml': 'urn:oasis:names:tc:SAML:2.0:assertion',
'ds': 'http://www.w3.org/2000/09/xmldsig#',
'mdui': 'urn:oasis:names:tc:SAML:metadata:ui'
}
if inputfile == None:
print ("Metadata file is missing!\n")
print ("Usage: ./extractDataFromMD.py -m <md_inputfile> -o <output_path>")
print ("The JSON content will be put in the output directory")
sys.exit()
if outputpath == None:
print ("Output path is missing!\n")
print ("Usage: ./extractDataFromMD.py -m <md_inputfile> -o <output_path>")
print ("The JSON content will be put in the output directory")
sys.exit()
tree = ET.parse(inputfile)
root = tree.getroot()
sp = root.findall("./md:EntityDescriptor[md:SPSSODescriptor]", namespaces)
idp = root.findall("./md:EntityDescriptor[md:IDPSSODescriptor]", namespaces)
sps = dict()
idps = dict()
list_sps = list()
list_idps = list()
cont_id = 0
# JSON SP Output:
# [
# {
# "id": #_number_#,
# "resourceName": "#_resource-display-name_#",
# "resourceProvider": "#_organization-name-linked_#",
# "resourceAttributes": {
# "required": [
# "eduPersonPrincipalName",
# "email",
# "givenName",
# "surname"
# ],
# "requested": []
# },
# "entityID": "#_entityID-resource_#",
# "resourceContacts": {
# "technical": [
# "#_email-address-list_#"
# ],
# "support": [],
# "administrative": []
# },
# "info": "<a href='#_info-url-it_#'>IT</a>, <a href='#_info-url-en_#'>EN</a>",
# "privacy": "<a href='#_privacy-url-it_#'>IT</a>, <a href='#_privacy-url-en_#'>EN</a>"
# }
# ]
for EntityDescriptor in sp:
cont_id = cont_id + 1
info = ""
privacy = ""
# Get entityID
entityID = getEntityID(EntityDescriptor,namespaces)
# Get InformationURL
infoDict = getInformationURLs(EntityDescriptor, namespaces, 'sp')
# Get PrivacyStatementURL
privacyDict = getPrivacyStatementURLs(EntityDescriptor, namespaces, 'sp')
# Get ServiceName
serviceName = getDisplayName(EntityDescriptor,namespaces,'sp')
# Build Resource Info Pages
if (infoDict['it'] != "" and infoDict['en'] != ""):
info = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/it.png' alt='Info ITA' height='18' width='18' /></a> <a href='%s' target='_blank'><img src='https://idem.garr.it/images/uk.png' alt='Info ENG' height='18' width='18' /></a>" % (infoDict['it'],infoDict['en'])
elif (infoDict['it'] != "" and infoDict['en'] == ""):
info = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/it.png' alt='Info ITA' height='18' width='18' /></a>" % (infoDict['it'])
elif (infoDict['it'] == "" and infoDict['en'] != ""):
info = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/uk.png' alt='Info ENG' height='18' width='18' /></a>" % (infoDict['en'])
elif (infoDict['it'] == infoDict['en'] == ""):
info = ""
# Build Resource Privacy Pages
if (privacyDict['it'] != "" and privacyDict['en'] != ""):
privacy = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/it.png' alt='Info ITA' height='18' width='18' /></a> <a href='%s' target='_blank'><img src='https://idem.garr.it/images/uk.png' alt='Info ENG' height='18' width='18' /></a>" % (privacyDict['it'],privacyDict['en'])
elif (privacyDict['it'] != "" and privacyDict['en'] == ""):
privacy = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/it.png' alt='Info ITA' height='18' width='18' /></a>" % (privacyDict['it'])
elif (privacyDict['it'] == "" and privacyDict['en'] != ""):
privacy = "<a href='%s' target='_blank'><img src='https://idem.garr.it/images/uk.png' alt='Info ENG' height='18' width='18' /></a>" % (privacyDict['en'])
elif (privacyDict['it'] == privacyDict['en'] == ""):
privacy = ""
# Get Requested Attributes
requestedAttributes = getRequestedAttribute(EntityDescriptor,namespaces)
# Get Organization Name
orgName = getOrganizationName(EntityDescriptor,namespaces,'it')
if (orgName == ""):
orgName = getOrganizationName(EntityDescriptor,namespaces,'en')
# Get Organization Page
orgUrl = getOrganizationURL(EntityDescriptor,namespaces,'it')
if (orgUrl | |
"""
You have to write the perc_train function that trains the feature weights using the perceptron algorithm for the CoNLL 2000 chunking task.
Each element of train_data is a (labeled_list, feat_list) pair.
Inside the perceptron training loop:
- Call perc_test to get the tagging based on the current feat_vec and compare it with the true output from the labeled_list
- If the output is incorrect then we have to update feat_vec (the weight vector)
- In the notation used in the paper we have w = w_0, w_1, ..., w_n corresponding to \phi_0(x,y), \phi_1(x,y), ..., \phi_n(x,y)
- Instead of indexing each feature with an integer we index each feature using a string we called feature_id
- The feature_id is constructed using the elements of feat_list (which correspond to x above) combined with the output tag (which correspond to y above)
- The function perc_test shows how the feature_id is constructed for each word in the input, including the bigram feature "B:" which is a special case
- feat_vec[feature_id] is the weight associated with feature_id
- This dictionary lookup lets us implement a sparse vector dot product where any feature_id not used in a particular example does not participate in the dot product
- To save space and time make sure you do not store zero values in the feat_vec dictionary which can happen if \phi(x_i,y_i) - \phi(x_i,y_{perc_test}) results in a zero value
- If you are going word by word to check if the predicted tag is equal to the true tag, there is a corner case where the bigram 'T_{i-1} T_i' is incorrect even though T_i is correct.
"""
import perc
import sys, optparse, os
from collections import defaultdict
def train_tags(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[2])
i = i + 1
return output
def word_list(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[0])
i = i + 1
return output
def pos_list(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[1])
i = i + 1
return output
def add_one_feat(feat_vec,key_z,key_true):
if key_z != None:
if feat_vec[key_z] != None:
feat_vec[key_z] -= 1
#if feat_vec[key_z] <= 0:
# feat_vec.pop(key_z)
if key_true != None:
if feat_vec[key_true] == None:
feat_vec[key_true] = 1
else:
feat_vec[key_true] += 1
return
def strip(feat_vec):
items_to_pop = []
for i in feat_vec:
if feat_vec[i] <= 0:
items_to_pop.append(i)
for i in range(0,len(items_to_pop)):
feat_vec.pop(items_to_pop[i])
#Every Feat function uses the feature that ranges from feat_00 to feat_22
def feat_00(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 2) < 0):
return
if(tag_list[position-2] != z_list[position-2]):
key_z = ("U00:" + word_list[position-2], z_list[position-2])
key_true = ("U00:" + word_list[position-2], tag_list[position-2])
add_one_feat(feat_vec,key_z,key_true)
def feat_01(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if(tag_list[position-1] != z_list[position-1]):
key_z = ("U01:" + word_list[position-1], z_list[position-1])
key_true = ("U01:" + word_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
def feat_02(feat_vec,word_list,pos_list,tag_list,z_list,position):
if(tag_list[position] != z_list[position]):
key_z = ("U02:" + word_list[position], z_list[position])
key_true = ("U02:" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
return
def feat_03(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position+1] != z_list[position+1]):
key_z = ("U03:" + word_list[position+1], z_list[position+1])
key_true = ("U03:" + word_list[position+1], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
def feat_04(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 2) > len(z_list) - 1):
return
if(tag_list[position+2] != z_list[position+2]):
key_z = ("U04:" + word_list[position+2], z_list[position+2])
key_true = ("U04:" + word_list[position+2], tag_list[position+2])
add_one_feat(feat_vec,key_z,key_true)
def feat_05(feat_vec,word_list,pos_list,tag_list,z_list,position):
offset = 1
if ((position - offset) < 0):
return
if(tag_list[position -offset] == tag_list[position]):
if(z_list[position -offset] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position])
key_true = ("U05:" + word_list[position-1] + "/" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position-1])
key_z2 = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position])
key_true = ("U05:" + word_list[position-1] + "/" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z1,key_true)
add_one_feat(feat_vec,key_z2,key_true)
add_one_feat(feat_vec,None,key_true)
def feat_06(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position +1] == tag_list[position]):
if(z_list[position +1] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position])
key_true = ("U06:" + word_list[position] + "/" + word_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position])
key_z2 = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position+1])
key_true = ("U06:" + word_list[position] + "/" + word_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,key_true)
add_one_feat(feat_vec,key_z2,key_true)
add_one_feat(feat_vec,None,key_true)
def feat_10(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -2) < 0):
return
if(tag_list[position-2] != z_list[position-2]):
key_z = ("U10:" + pos_list[position-2], z_list[position-2])
key_true = ("U10:" + pos_list[position-2], tag_list[position-2])
add_one_feat(feat_vec,key_z,key_true)
def feat_11(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -1) < 0):
return
if(tag_list[position-1] != z_list[position-1]):
key_z = ("U11:" + pos_list[position-1], z_list[position-1])
key_true = ("U11:" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
def feat_12(feat_vec,word_list,pos_list,tag_list,z_list,position):
if(tag_list[position] != z_list[position]):
key_z = ("U12:" + pos_list[position] + "q", z_list[position])
key_true = ("U12:" + pos_list[position] + "q", tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
def feat_13(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +1) > len(z_list) - 1):
return
if(tag_list[position+1] != z_list[position+1]):
key_z = ("U13:" + pos_list[position+1], z_list[position+1])
key_true = ("U13:" + pos_list[position+1], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
def feat_14(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +2) > len(z_list) - 1):
return
if(tag_list[position+2] != z_list[position+2]):
key_z = ("U14:" + pos_list[position+2], z_list[position+2])
key_true = ("U14:" + pos_list[position+2], tag_list[position+2])
add_one_feat(feat_vec,key_z,key_true)
def feat_15(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 2) < 0):
return
if(tag_list[position -2] == tag_list[position-1]):
if(z_list[position -2] == z_list[position-1]):
if(z_list[position-1] != tag_list[position-1]):
key_z = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-1])
key_true = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-2])
key_z2 = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-1])
key_true = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,None,key_true)
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
def feat_16(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if(tag_list[position -1] == tag_list[position]):
if(z_list[position -1] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U16:" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position-1])
key_z2 = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U16:" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,None,key_true)
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
def feat_17(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position] == tag_list[position+1]):
if(z_list[position] == z_list[position+1]):
if(z_list[position] != tag_list[position]):
key_z = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_true = ("U17:" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_z2 = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position+1])
key_true = ("U17:" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,None,key_true)
def feat_18(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 2) > len(z_list) - 1):
return
if(tag_list[position+1] == tag_list[position+2]):
if(z_list[position+1] == z_list[position+2]):
if(z_list[position+1] != tag_list[position+1]):
key_z = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_true = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_z2 = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+2])
key_true = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position+1])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,None,key_true)
def feat_20(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -2 ) < 0):
return
if((tag_list[position-2] == tag_list[position-1]) and (tag_list[position-1] == tag_list[position])):
if((z_list[position-2] == z_list[position-1]) and (z_list[position-1] == z_list[position])):
if(z_list[position] != tag_list[position]):
key_z = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position-2])
key_z2 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position-1])
key_z3 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
def feat_21(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if ((position + 1) > len(z_list) - 1):
return
if((tag_list[position-1] == tag_list[position]) and (tag_list[position] == tag_list[position+1])):
if((z_list[position-1] == z_list[position]) and (z_list[position] == z_list[position+1])):
if(z_list[position] != tag_list[position]):
key_z = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_true = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position-1])
key_z2 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_z3 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position+1])
key_true = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
def feat_22(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +2 ) > len(z_list) - 1):
return
if((tag_list[position] == tag_list[position+1]) and (tag_list[position+2] == tag_list[position])):
if((z_list[position] == z_list[position+1]) and (z_list[position+2] == z_list[position])):
if(z_list[position] != tag_list[position]):
key_z = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position])
key_true = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position])
key_z2 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_z3 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+2])
key_true = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
#The check and change function calls all the features that may either add or
def check_and_change(feat_vec,word_list,pos_list,tag_list,z_list,position):
feat_00(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_01(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_02(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_03(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_04(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_05(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_06(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_10(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_11(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_12(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_13(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_14(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_15(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_16(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_17(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_18(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_20(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_21(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_22(feat_vec,word_list,pos_list,tag_list,z_list,position)
strip(feat_vec)
#perc train calls the viterbi algorithm then calls the check and change function to manipulate the weights
def perc_train(train_data,tagset,numepochs):
feat_vec = defaultdict(int)
tags = {}
for i in range(0,numepochs):
for j in range(0,len(train_data)):
label_list = train_data[j][0]
feat_list = train_data[j][1]
z = perc.perc_test(feat_vec,label_list, feat_list,tagset,tagset[0])
for k in range(0,len(z)):
temp = train_tags(label_list)
if(z[k] != temp[k]):
check_and_change(feat_vec,word_list(label_list),pos_list(label_list),train_tags(label_list),z,k)
return feat_vec
if __name__ == '__main__':
optparser = optparse.OptionParser()
optparser.add_option("-t", "--tagsetfile", dest="tagsetfile", default=os.path.join("data", "tagset.txt"), help="tagset that contains all the labels produced in the output, i.e. the y in \phi(x,y)")
optparser.add_option("-i", "--trainfile", dest="trainfile", | |
= sig.bind(*args, **kwargs)
bound_args.apply_defaults()
keepTop = bound_args.arguments["keepTop"]
keepBottom = bound_args.arguments["keepBottom"]
if (not keepTop) and (not keepBottom):
raise ValueError("You have to keep at least one half")
solid = self.findSolid()
maxDim = solid.BoundingBox().DiagonalLength * 10.0
topCutBox = self.rect(maxDim, maxDim)._extrude(maxDim)
bottomCutBox = self.rect(maxDim, maxDim)._extrude(-maxDim)
top = solid.cut(bottomCutBox)
bottom = solid.cut(topCutBox)
if keepTop and keepBottom:
# Put both on the stack, leave original unchanged.
rv = [top, bottom]
else:
# Put the one we are keeping on the stack, and also update the
# context solid to the one we kept.
if keepTop:
rv = [top]
else:
rv = [bottom]
return self.newObject(rv)
@deprecate()
def combineSolids(
self, otherCQToCombine: Optional["Workplane"] = None
) -> "Workplane":
"""
!!!DEPRECATED!!! use union()
Combines all solids on the current stack, and any context object, together
into a single object.
After the operation, the returned solid is also the context solid.
:param otherCQToCombine: another CadQuery to combine.
:return: a cQ object with the resulting combined solid on the stack.
Most of the time, both objects will contain a single solid, which is
combined and returned on the stack of the new object.
"""
# loop through current stack objects, and combine them
toCombine = cast(List[Solid], self.solids().vals())
if otherCQToCombine:
otherSolids = cast(List[Solid], otherCQToCombine.solids().vals())
for obj in otherSolids:
toCombine.append(obj)
if len(toCombine) < 1:
raise ValueError("Cannot Combine: at least one solid required!")
# get context solid and we don't want to find our own objects
ctxSolid = self._findType(
(Solid, Compound), searchStack=False, searchParents=True
)
if ctxSolid is None:
ctxSolid = toCombine.pop(0)
# now combine them all. make sure to save a reference to the ctxSolid pointer!
s: Shape = ctxSolid
if toCombine:
s = s.fuse(*_selectShapes(toCombine))
return self.newObject([s])
def all(self: T) -> List[T]:
"""
Return a list of all CQ objects on the stack.
useful when you need to operate on the elements
individually.
Contrast with vals, which returns the underlying
objects for all of the items on the stack
"""
return [self.newObject([o]) for o in self.objects]
def size(self) -> int:
"""
Return the number of objects currently on the stack
"""
return len(self.objects)
def vals(self) -> List[CQObject]:
"""
get the values in the current list
:rtype: list of occ_impl objects
:returns: the values of the objects on the stack.
Contrast with :py:meth:`all`, which returns CQ objects for all of the items on the stack
"""
return self.objects
@overload
def add(self: T, obj: "Workplane") -> T:
...
@overload
def add(self: T, obj: CQObject) -> T:
...
@overload
def add(self: T, obj: Iterable[CQObject]) -> T:
...
def add(self, obj):
"""
Adds an object or a list of objects to the stack
:param obj: an object to add
:type obj: a Workplane, CAD primitive, or list of CAD primitives
:return: a Workplane with the requested operation performed
If an Workplane object, the values of that object's stack are added. If
a list of cad primitives, they are all added. If a single CAD primitive
then it is added.
Used in rare cases when you need to combine the results of several CQ
results into a single Workplane object. Shelling is one common example.
"""
if isinstance(obj, list):
self.objects.extend(obj)
elif isinstance(obj, Workplane):
self.objects.extend(obj.objects)
else:
self.objects.append(obj)
return self
def val(self) -> CQObject:
"""
Return the first value on the stack. If no value is present, current plane origin is returned.
:return: the first value on the stack.
:rtype: A CAD primitive
"""
return self.objects[0] if self.objects else self.plane.origin
def _getTagged(self, name: str) -> "Workplane":
"""
Search the parent chain for a an object with tag == name.
:param name: the tag to search for
:type name: string
:returns: the CQ object with tag == name
:raises: ValueError if no object tagged name
"""
rv = self.ctx.tags.get(name)
if rv is None:
raise ValueError(f"No CQ object named {name} in chain")
return rv
def toOCC(self) -> Any:
"""
Directly returns the wrapped OCCT object.
:return: The wrapped OCCT object
:rtype TopoDS_Shape or a subclass
"""
return self.val().wrapped
def workplane(
self: T,
offset: float = 0.0,
invert: bool = False,
centerOption: Literal[
"CenterOfMass", "ProjectedOrigin", "CenterOfBoundBox"
] = "ProjectedOrigin",
origin: Optional[VectorLike] = None,
) -> T:
"""
Creates a new 2-D workplane, located relative to the first face on the stack.
:param offset: offset for the work plane in the Z direction. Default
:param invert: invert the Z direction from that of the face.
:param centerOption: how local origin of workplane is determined.
:param origin: origin for plane center, requires 'ProjectedOrigin' centerOption.
:type offset: float or None=0.0
:type invert: boolean or None=False
:type centerOption: string or None='ProjectedOrigin'
:type origin: Vector or None
:rtype: Workplane object ( which is a subclass of CQ )
The first element on the stack must be a face, a set of
co-planar faces or a vertex. If a vertex, then the parent
item on the chain immediately before the vertex must be a
face.
The result will be a 2-d working plane
with a new coordinate system set up as follows:
* The centerOption parameter sets how the center is defined.
Options are 'CenterOfMass', 'CenterOfBoundBox', or 'ProjectedOrigin'.
'CenterOfMass' and 'CenterOfBoundBox' are in relation to the selected
face(s) or vertex (vertices). 'ProjectedOrigin' uses by default the current origin
or the optional origin parameter (if specified) and projects it onto the plane
defined by the selected face(s).
* The Z direction will be normal to the plane of the face,computed
at the center point.
* The X direction will be parallel to the x-y plane. If the workplane is parallel to
the global x-y plane, the x direction of the workplane will co-incide with the
global x direction.
Most commonly, the selected face will be planar, and the workplane lies in the same plane
of the face ( IE, offset=0). Occasionally, it is useful to define a face offset from
an existing surface, and even more rarely to define a workplane based on a face that is
not planar.
To create a workplane without first having a face, use the Workplane() method.
Future Enhancements:
* Allow creating workplane from planar wires
* Allow creating workplane based on an arbitrary point on a face, not just the center.
For now you can work around by creating a workplane and then offsetting the center
afterwards.
"""
def _isCoPlanar(f0, f1):
"""Test if two faces are on the same plane."""
p0 = f0.Center()
p1 = f1.Center()
n0 = f0.normalAt()
n1 = f1.normalAt()
# test normals (direction of planes)
if not (
(abs(n0.x - n1.x) < self.ctx.tolerance)
or (abs(n0.y - n1.y) < self.ctx.tolerance)
or (abs(n0.z - n1.z) < self.ctx.tolerance)
):
return False
# test if p1 is on the plane of f0 (offset of planes)
return abs(n0.dot(p0.sub(p1)) < self.ctx.tolerance)
def _computeXdir(normal):
"""
Figures out the X direction based on the given normal.
:param :normal The direction that's normal to the plane.
:type :normal A Vector
:return A vector representing the X direction.
"""
xd = Vector(0, 0, 1).cross(normal)
if xd.Length < self.ctx.tolerance:
# this face is parallel with the x-y plane, so choose x to be in global coordinates
xd = Vector(1, 0, 0)
return xd
if centerOption not in {"CenterOfMass", "ProjectedOrigin", "CenterOfBoundBox"}:
raise ValueError("Undefined centerOption value provided.")
if len(self.objects) > 1:
objs: List[Face] = [o for o in self.objects if isinstance(o, Face)]
if not all(o.geomType() in ("PLANE", "CIRCLE") for o in objs) or len(
objs
) < len(self.objects):
raise ValueError(
"If multiple objects selected, they all must be planar faces."
)
# are all faces co-planar with each other?
if not all(_isCoPlanar(self.objects[0], f) for f in self.objects[1:]):
raise ValueError("Selected faces must be co-planar.")
if centerOption in {"CenterOfMass", "ProjectedOrigin"}:
center = Shape.CombinedCenter(_selectShapes(self.objects))
elif centerOption == "CenterOfBoundBox":
center = Shape.CombinedCenterOfBoundBox(_selectShapes(self.objects))
normal = objs[0].normalAt()
xDir = _computeXdir(normal)
else:
obj = self.val()
if isinstance(obj, Face):
if centerOption in {"CenterOfMass", "ProjectedOrigin"}:
center = obj.Center()
elif centerOption == "CenterOfBoundBox":
center = obj.CenterOfBoundBox()
| |
%%
import subprocess
from subprocess import Popen, PIPE, STDOUT
cmd = 'ls /etc/fstab /etc/non-existent-file'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
print(output)
# %%
import sys
print('a')
print(sys.stdout)
# %%
# from pathlib import Path
#
#
# def send_email(subject, message, destination, password_path=None):
# """ Send an e-mail from with message to destination email.
#
# NOTE: if you get an error with google gmails you might need to do this:
# https://stackoverflow.com/questions/16512592/login-credentials-not-working-with-gmail-smtp
# To use an app password:
# https://stackoverflow.com/questions/60975490/how-does-one-send-an-e-mail-from-python-not-using-gmail
#
# Arguments:
# message {str} -- message string to send.
# destination {str} -- destination email (as string)
# """
# from socket import gethostname
# from email.message import EmailMessage
# import smtplib
# import json
# import sys
#
# server = smtplib.SMTP('smtp.gmail.com', 587)
# smtplib.stdout = sys.stdout
# server.starttls()
# with open(password_path) as f:
# config = json.load(f)
# server.login('<EMAIL>', config['password'])
#
# # craft message
# msg = EmailMessage()
#
# # message = f'{message}\nSend from Hostname: {gethostname()}'
# # msg.set_content(message)
# msg['Subject'] = subject
# msg['From'] = '<EMAIL>'
# msg['To'] = destination
# # send msg
# server.send_message(msg)
#
#
# ##
# print("-------> HELLOWWWWWWWW")
# p = Path('~/automl-meta-learning/automl/experiments/pw_app.config.json').expanduser()
# send_email(subject='TEST: send_email2', message='MESSAGE', destination='<EMAIL>', password_path=p)
# %%
"""
Demo of the errorbar function, including upper and lower limits
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["errorbar.capsize"] = 3
# https://stackoverflow.com/questions/61415955/why-dont-the-error-limits-in-my-plots-show-in-matplotlib
# example data
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# standard error bars
plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros(x.shape)
uplims[[1, 5, 9]] = True
plt.errorbar(x, y + 0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros(x.shape)
lolims[[2, 4, 8]] = True
plt.errorbar(x, y + 1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
plt.errorbar(x, y + 1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.zeros(x.shape) + 0.2
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros(x.shape)
uplims = np.zeros(x.shape)
lolims[[6]] = True
uplims[[3]] = True
plt.errorbar(x, y + 2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims, lolims=lolims,
ls='none', mec='blue', capsize=0, color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
plt.show()
# %%
from types import SimpleNamespace
from pathlib import Path
from pprint import pprint
args = SimpleNamespace()
args.data_root = "~/automl-meta-learning/data/miniImagenet"
args.data_root = Path(args.data_root).expanduser()
print(args)
# pprint(dir(args.data_root))
print(args.data_root.name)
print('miniImagenet' in args.data_root.name)
# %%
## sampling N classes for len(meta-set)
# In sampling without replacement, each sample unit of
# the population has only one chance to be selected in the sample.
# because you are NOT replacing what you removed.
import random
N = 5
len_meta_set = 64
sample = random.sample(range(0, len_meta_set), N)
print(sample)
for i, n in enumerate(sample):
print(f'i={i}\nn={n}\n')
# %%
# iterator https://www.programiz.com/python-programming/iterator
class Counter:
def __init__(self, max=0):
self.max = max # returns up to and including that number
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n <= self.max:
current_count = self.n
self.n += 1
print(f'current_count = {current_count}')
print(f'self.n = {self.n}')
print(self.n is current_count)
return current_count
else:
raise StopIteration
## test it
counter = iter(Counter(max=0))
for count in counter:
print(f'count = {count}')
# %%
from tqdm import tqdm
print(tqdm)
lst = range(3)
print(type(lst))
with tqdm(iter(lst), total=5) as tlist:
print(f'tlist = {type(tlist)}')
for i in tlist:
print(i)
# %%
from tqdm import tqdm
class Plus2:
def __init__(self, max=0):
self.max = max # returns up to and including that number
def __iter__(self):
self.it = 0
self.tot = 0
return self
def __next__(self):
if self.it <= self.max:
self.it += 1
self.tot += 2
return self.tot
else:
raise StopIteration
def __len__(self):
return self.max
##
counter = iter(Plus2(max=int(100000)))
with tqdm(counter, total=len(counter)) as tqcounter:
for idx, pow2 in enumerate(tqcounter):
print()
print(f'idx = {idx}')
print(f'powd2 = {pow2}')
pass
# %%
from tqdm import tqdm
for i in tqdm(range(int(9e6))):
pass
# %%
from tqdm import tqdm
import time
with tqdm(range(int(5))) as trange:
for i in trange:
print(f'\ni = {i}')
print('done\n')
time.sleep(1)
pass
# %%
# zip, it aligns elements in one list to elements in the other
l1 = [0, 1, 2]
l2 = ['a', 'b', 'c']
print(list(zip(l1, l2)))
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(lst, total=total) as tlst:
i = 0
for _, element in enumerate(tlst):
print(f'\n->i = {i}\n')
time.sleep(0.2)
i += 1
if i >= total:
break
print('\n--> DONE \a')
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(lst, total=total) as tlst:
for idx, element in enumerate(tlst):
print(f'\n->idx = {idx}\n')
time.sleep(0.2)
if idx >= total:
break
print('\n--> DONE \a')
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(range(total)) as tcounter:
lst = iter(lst)
for idx, element in enumerate(tcounter):
print(f'\n->idx = {idx}\n')
time.sleep(0.2)
print('\n--> DONE \a')
# %%
# Question: Do detached() tensors track their own gradients seperately?
# Ans: Yes!
# https://discuss.pytorch.org/t/why-is-the-clone-operation-part-of-the-computation-graph-is-it-even-differentiable/67054/11
import torch
a = torch.tensor([2.0], requires_grad=True)
b = a.detach()
b.requires_grad = True
la = (5.0 - a) ** 2
la.backward()
print(f'a.grad = {a.grad}')
lb = (6.0 - b) ** 2
lb.backward()
print(f'b.grad = {b.grad}')
# %%
import torch
import torch.nn as nn
from collections import OrderedDict
params = OrderedDict([
('fc0', nn.Linear(in_features=4, out_features=4)),
('ReLU0', nn.ReLU()),
('fc1', nn.Linear(in_features=4, out_features=1))
])
mdl = nn.Sequential(params)
print(params)
print(mdl._parameters)
print(params == params)
print(mdl._parameters == params)
print(mdl._modules)
print()
for name, w in mdl.named_parameters():
print(name, w.norm(2))
print()
# mdl._modules['fc0'] = nn.Linear(10,11)
mdl._modules[0]
for name, w in mdl.named_parameters():
print(name, w.norm(2))
# %%
## Q: are parameters are in computation graph?
# import torch
# import torch.nn as nn
# # from torchviz import make_dot
#
# from collections import OrderedDict
#
# fc0 = nn.Linear(in_features=3, out_features=1)
# params = [('fc0', fc0)]
# mdl = nn.Sequential(OrderedDict(params))
#
# x = torch.randn(1, 3)
# y = torch.randn(1)
#
# l = (mdl(x) - y) ** 2
#
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# print(fc0.weight)
# print(fc0.bias)
# print(fc0.weight.to_tens)
# print()
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# make_dot(l, {'x': x, 'y': y})
# make_dot(l)
# %%
'''
expand
'''
import torch
x = torch.randn([2, 3, 4, 5])
# h_0 of shape (num_layers * num_directions, batch, hidden_size)
h = torch.randn([1, 4, 8])
x_mean = x.mean()
print(x_mean.size())
print(x_mean)
x = x_mean.expand_as(h)
print(x.size())
print(x)
# %%
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
type(device)
print(device == 'cpu')
device.type
# %%
# THIS WORKS
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
# log_dir (string) – Save directory location.
# Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
tb = SummaryWriter()
tb.add_scalar('loss', 111)
# %%
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
def CURRENT_DATETIME_HOSTNAME(comment=''):
# if not log_dir:
import socket
import os
from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
log_dir = os.path.join('runs', current_time + '_' + socket.gethostname() + comment)
return Path(log_dir)
# log_dir (string) – Save directory location.
# Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
# tensorboard --logdir=runs
log_dir = (Path('//') / CURRENT_DATETIME_HOSTNAME()).expanduser()
print(log_dir)
tb = SummaryWriter(log_dir=log_dir)
tb.add_scalar('loss', 15)
# %%
# download mini-imagenet automatically
# from torchvision.utils import download_and_extract_archive
import torchvision.utils as utils
print(utils)
# print(download_and_extract_archive)
# %%
# torch concat, https://pytorch.org/docs/stable/torch.html#torch.cat
# Concatenates the given sequence of seq tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
import torch
g1 = torch.randn(3, 2)
g2 = torch.randn(4, 2)
g3 = torch.randn(4, 2, 3)
grads = [g1, g2]
print(g1.view(-1).size())
print(g2.view(-1).size())
print(g3.view(-1).size())
# print(g3.view(-1))
grads = torch.cat(grads, dim=0)
print(grads)
print(grads.size())
print(grads.mean())
print(grads.std())
# torch stack, https://pytorch.org/docs/stable/torch.html#torch.stack
# Concatenates sequence of tensors along a new dimension.
# All tensors need to be of the same size.
# torch.stack([g1,g2], dim=0)
# %%
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
a_detached = a.detach()
print(a_detached.is_leaf)
a_detached_sum = a.sum()
print(c.is_leaf)
d = c.detach()
print(d.is_leaf)
# %%
import torch
from types import SimpleNamespace
from pathlib import Path
from pprint import pprint
x = torch.empty([1, 2, 3])
print(x.size())
args = SimpleNamespace()
args.data_root = "~/automl-meta-learning/data/miniImagenet"
# n1313361300001299.jpg
args.data_root = Path(args.data_root).expanduser()
# %%
import torch
CHW = 3, 12, 12
x = torch.randn(CHW)
y = torch.randn(CHW)
new = [x, y]
new = torch.stack(new)
print(x.size())
print(new.size())
# %%
print('a');
print('b')
# %%
# conver list to tensor
import torch
x = torch.tensor([1, 2, 3.])
print(x)
# %%
from torchvision.transforms import Compose, Resize, ToTensor
import torchmeta
from torchmeta.datasets.helpers import miniimagenet
from pathlib import Path
from types import SimpleNamespace
from tqdm import tqdm
## get args
args = SimpleNamespace(episodes=5, n_classes=5, k_shot=5, k_eval=15, meta_batch_size=1, n_workers=4)
args.data_root = Path("~/automl-meta-learning/data/miniImagenet").expanduser()
## get meta-batch loader
train_transform = Compose([Resize(84), ToTensor()])
dataset = miniimagenet(
args.data_root,
ways=args.n_classes,
shots=args.k_shot,
test_shots=args.k_eval,
meta_split='train',
download=False)
dataloader = torchmeta.utils.data.BatchMetaDataLoader(
dataset,
batch_size=args.meta_batch_size,
num_workers=args.n_workers)
with tqdm(dataset):
print(f'len(dataloader)= {len(dataloader)}')
for episode, batch in enumerate(dataloader):
print(f'episode = {episode}')
train_inputs, train_labels = batch["train"]
print(f'train_labels[0] = {train_labels[0]}')
print(f'train_inputs.size() = {train_inputs.size()}')
pass
if episode >= args.episodes:
break
# %%
# zip tensors
import torch
x = torch.tensor([1., 2., 3.])
y = torch.tensor([1, 2, 3])
print(list(zip(x, y)))
xx = torch.randn(2, 3, 84, 84)
yy = torch.randn(2, 3, 32, 32)
print(len(list(zip(xx, yy))))
# %%
x = 2
print(x)
# %%
## sinusioid function
print('Starting Sinusioid cell')
from torchmeta.toy import Sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
from torchmeta.transforms import ClassSplitter
# from tqdm import tqdm
batch_size = 16
shots = 5
test_shots = 15
# dataset = torchmeta.toy.helpers.sinusoid(shots=shots, test_shots=tes_shots)
metaset_dataset = Sinusoid(num_samples_per_task=shots + test_shots, num_tasks=100, noise_std=None)
splitter_metset_dataset = ClassSplitter(
metaset_dataset,
num_train_per_class=shots,
num_test_per_class=test_shots,
shuffle=True)
dataloader = BatchMetaDataLoader(splitter_metset_dataset, batch_size=batch_size, num_workers=4)
print(f'batch_size = {batch_size}')
print(f'len(dataset) = {len(metaset_dataset)}')
print(f'len(dataloader) = {len(dataloader)}\n')
for batch_idx, batch in enumerate(dataloader):
print(f'batch_idx = {batch_idx}')
train_inputs, train_targets = batch['train']
test_inputs, test_targets = batch['test']
print(f'train_inputs.shape = {train_inputs.shape}')
print(f'train_targets.shape = {train_targets.shape}')
print(f'test_inputs.shape = {test_inputs.shape}')
print(f'test_targets.shape = {test_targets.shape}')
if batch_idx >= 1: # halt after 2 iterations
break
print('DONE\a')
# %%
## notes of torchmeta
from pathlib import Path
import torchmeta
# meta-set: creates collection of data-sets, D_meta = {D_1, ... Dn}
print('\n-- Sinusoid(MetaDataset)')
metaset_sinusoid = torchmeta.toy.Sinusoid(num_samples_per_task=10, num_tasks=1_000_000, noise_std=None)
print(f'type(metaset_sinusoid) = {type(metaset_sinusoid)}')
print(f'len(metaset_sinusoid) = {len(metaset_sinusoid)}')
print(f'metaset_sinusoid = {metaset_sinusoid}')
# this is still a data set but helps implement forming D_i
# | |
= self._test_volte_mt_mt_add_volte_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_wcdma_merge_drop_second_call_from_participant_no_cep(
self):
""" Test VoLTE Conference Call among three phones. No CEP.
Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
Call from PhoneA (VOLTE) to PhoneC (WCDMA), accept on PhoneC.
On PhoneA, merge to conference call (No CEP).
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_wcdma_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (WCDMA), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_wcdma_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (WCDMA), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_wcdma_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (WCDMA), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mo_add_wcdma_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneA (VoLTE) to PhoneC (WCDMA), accept on PhoneC.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mo_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_wcdma_merge_drop_second_call_from_participant_no_cep(
self):
""" Test VoLTE Conference Call among three phones. No CEP.
Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
On PhoneA, merge to conference call (No CEP).
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_wcdma_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_wcdma_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_wcdma_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mo_mt_add_wcdma_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneA (VoLTE) to PhoneB (WCDMA), accept on PhoneB.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-B, verify call continues.
5. On PhoneA disconnect call between A-C, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mo_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_wcdma_merge_drop_second_call_from_participant_no_cep(
self):
""" Test VoLTE Conference Call among three phones. No CEP.
Call from PhoneB (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
On PhoneA, merge to conference call (No CEP).
End call on PhoneC, verify call continues.
End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_no_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_wcdma_merge_drop_second_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneC, verify call continues.
5. End call on PhoneB, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_wcdma_merge_drop_second_call_from_host_cep(self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. On PhoneA disconnect call between A-C, verify call continues.
5. On PhoneA disconnect call between A-B, verify call continues.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_second_call_from_host_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_wcdma_merge_drop_first_call_from_participant_cep(
self):
""" Test VoLTE Conference Call among three phones. CEP enabled.
1. Call from PhoneB (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
2. Call from PhoneC (WCDMA) to PhoneA (VoLTE), accept on PhoneA.
3. On PhoneA, merge to conference call (VoLTE CEP conference call).
4. End call on PhoneB, verify call continues.
5. End call on PhoneC, verify call end on PhoneA.
Returns:
True if pass; False if fail.
"""
call_ab_id, call_ac_id = self._test_volte_mt_mt_add_wcdma_swap_x(0)
if call_ab_id is None or call_ac_id is None:
return False
return self._test_ims_conference_merge_drop_first_call_from_participant_cep(
call_ab_id, call_ac_id)
@TelephonyBaseTest.tel_test_wrap
def test_volte_mt_mt_add_wcdma_merge_drop_first_call_from_host_cep(self):
""" Test VoLTE | |
= [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return cand
def func_bba94e77e5154f6790eb5c168e946eb3(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return queue
def func_d0cc8317738649fc9b5a6a6d29c3241e(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return budget
def func_ab3a93c6d790498487c1d83861a28b2b(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return larger
def func_9cf9d0b97cb44ddea84599dd104835ec(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return needed_budget
def func_b025205560c5431fa82482f088fee293(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return remaining_budget
def func_d0269692fb8443d7a4830cf0bc5ea88a(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > | |
# Elastic search mapping definition for the Molecule entity
from glados.es.ws2es.es_util import DefaultMappings
# Shards size - can be overridden from the default calculated value here
# shards = 3,
replicas = 0
analysis = DefaultMappings.COMMON_ANALYSIS
mappings = \
{
'properties':
{
'_metadata':
{
'properties':
{
'es_completion': DefaultMappings.COMPLETION_TYPE
}
},
'assay_category': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'other' , 'confirmatory' , 'confirmatory' , 'confirmatory' , 'confirmatory' , 'confirmatory' , 'confir
# matory' , 'confirmatory' , 'confirmatory' , 'confirmatory'
'assay_classifications':
{
'properties':
{
'assay_class_id': DefaultMappings.ID_REF,
# EXAMPLES:
# '115' , '259' , '322' , '280' , '91' , '91' , '91' , '91' , '91' , '91'
'class_type': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'In vivo efficacy' , 'In vivo efficacy' , 'In vivo efficacy' , 'In vivo efficacy' , 'In vivo e
# fficacy' , 'In vivo efficacy' , 'In vivo efficacy' , 'In vivo efficacy' , 'In vivo efficacy' ,
# 'In vivo efficacy'
'l1': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'ANTINEOPLASTIC AND IMMUNOMODULATING AGENTS' , 'GENITO URINARY SYSTEM AND SEX HORMONES' , 'NER
# VOUS SYSTEM' , 'GENITO URINARY SYSTEM AND SEX HORMONES' , 'ANTINEOPLASTIC AND IMMUNOMODULATING
# AGENTS' , 'ANTINEOPLASTIC AND IMMUNOMODULATING AGENTS' , 'ANTINEOPLASTIC AND IMMUNOMODULATING
# AGENTS' , 'ANTINEOPLASTIC AND IMMUNOMODULATING AGENTS' , 'ANTINEOPLASTIC AND IMMUNOMODULATING
# AGENTS' , 'ANTINEOPLASTIC AND IMMUNOMODULATING AGENTS'
'l2': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'Neoplasm Oncology Models' , 'Assessment of Renal Function' , 'Anti-Depressant Activity' , 'Te
# sticular Steroid Hormones' , 'Melanoma Oncology Models' , 'Melanoma Oncology Models' , 'Melano
# ma Oncology Models' , 'Melanoma Oncology Models' , 'Melanoma Oncology Models' , 'Melanoma Onco
# logy Models'
'l3': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'Neoplasms' , 'Fractional Excretion Methods' , 'General Hypothermia' , 'General Androgen Activ
# ity' , 'Experimental Melanoma' , 'Experimental Melanoma' , 'Experimental Melanoma' , 'Experime
# ntal Melanoma' , 'Experimental Melanoma' , 'Experimental Melanoma'
'source': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'phenotype' , 'phenotype' , 'phenotype' , 'phenotype' , 'phenotype' , 'Hock_2016' , 'phenotype
# ' , 'phenotype' , 'phenotype' , 'phenotype'
}
},
'assay_cell_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'P388' , 'P388' , 'P388' , 'CCRF-CEM' , 'P388' , 'P388' , 'P388' , 'L1210' , 'L1210' , 'L1210'
'assay_chembl_id': DefaultMappings.CHEMBL_ID,
# EXAMPLES:
# 'CHEMBL723493' , 'CHEMBL723497' , 'CHEMBL723502' , 'CHEMBL723503' , 'CHEMBL723504' , 'CHEMBL723505' ,
# 'CHEMBL723506' , 'CHEMBL723507' , 'CHEMBL723508' , 'CHEMBL723512'
'assay_organism': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'Mus musculus' , 'Mus musculus' , 'Mus musculus' , 'Mus musculus' , 'Mus musculus' , 'Mus musculus' ,
# 'Mus musculus' , 'Mus musculus' , 'Mus musculus' , 'Mus musculus'
'assay_parameters':
{
'properties':
{
'active': DefaultMappings.SHORT,
# EXAMPLES:
# '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1'
'comments': DefaultMappings.LOWER_CASE_KEYWORD + DefaultMappings.TEXT_STD,
# EXAMPLES:
# 'Is the measured interaction considered due to direct binding to target?' , 'Is the measured i
# nteraction considered due to direct binding to target?' , 'Is the measured interaction conside
# red due to direct binding to target?' , 'Is the measured interaction considered due to direct
# binding to target?' , 'Is the measured interaction considered due to direct binding to target?
# ' , 'Is the measured interaction considered due to direct binding to target?' , 'Is the measur
# ed interaction considered due to direct binding to target?' , 'Is the measured interaction con
# sidered due to direct binding to target?' , 'Is the measured interaction considered due to dir
# ect binding to target?' , 'Is the measured interaction considered due to direct binding to tar
# get?'
'relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '='
'standard_relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '='
'standard_text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Intravenous' , 'Intravenous' , 'Intravenous' , 'Intravenous' , 'Intraduodenal' , 'Intravenous
# ' , 'Intragastric' , 'Intraduodenal' , 'Oral' , 'Intravenous'
'standard_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE'
'standard_type_fixed': DefaultMappings.KEYWORD,
# EXAMPLES:
# '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0'
'standard_units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1' , 'mg.kg-1'
# , 'mg.kg-1' , 'mg.kg-1'
'standard_value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '6.3' , '25' , '2.6' , '15' , '10' , '2.27' , '5.45' , '10' , '10' , '10'
'text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Intravenous' , 'Intravenous' , 'Intravenous' , 'Intravenous' , 'Intraduodenal' , 'Intravenous
# ' , 'Intragastric' , 'Intraduodenal' , 'Oral' , 'Intravenous'
'type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE' , 'DOSE'
'units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/kg' , 'mg/
# kg'
'value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '6.3' , '25' , '2.6' , '15' , '10' , '2.27' , '5.45' , '10' , '10' , '10'
}
},
'assay_strain': DefaultMappings.KEYWORD,
# EXAMPLES:
# '3B' , 'STIB900' , 'Sprague-Dawley' , 'Wistar' , 'Albino' , 'NMRI' , 'Swiss' , 'Swiss Albino' , 'Sprag
# ue-Dawley' , 'V1/S'
'assay_subcellular_fraction': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Microsomes' , 'Microsomes' , 'Microsomes' , 'Microsomes' , 'Microsomes' , 'Microsomes' , 'Microsomes'
# , 'Microsomes' , 'Membranes' , 'Membranes'
'assay_tax_id': DefaultMappings.ID_REF,
# EXAMPLES:
# '10090' , '10090' , '10090' , '10090' , '10090' , '10090' , '10090' , '10090' , '10090' , '10090'
'assay_test_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'In vivo' , 'In vivo' , 'In vivo' , 'In vivo' , 'In vivo' , 'In vivo' , 'In vivo' , 'In vivo' , 'In vi
# vo' , 'In vivo'
'assay_tissue': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Plasma' , 'Blood' , 'Brain' , 'Uterus' , 'Spleen' , 'Spleen' , 'Stomach' , 'Stomach' , 'Lung' , 'Plas
# ma'
'assay_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'F' , 'F' , 'F' , 'F' , 'F' , 'F' , 'F' , 'F' , 'F' , 'F'
'assay_type_description': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Functional' , 'Functional' , 'Functional' , 'Functional' , 'Functional' , 'Functional' , 'Functional'
# , 'Functional' , 'Functional' , 'Functional'
'bao_format': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'BAO_0000218' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0
# 000218' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0000218'
'bao_label': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'assay format' , 'tissue-based format' , 'assay format' , 'cell membrane format' , 'assay format' , 't
# issue-based format' , 'single protein format' , 'cell membrane format' , 'assay format' , 'tissue-base
# d format'
'cell_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL3308401' , 'CHEMBL3308401' , 'CHEMBL3308401' , 'CHEMBL3307641' , 'CHEMBL3308401' , 'CHEMBL33084
# 01' , 'CHEMBL3308401' , 'CHEMBL3308391' , 'CHEMBL3308391' , 'CHEMBL3308391'
'confidence_description': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Target assigned is non-molecular' , 'Target assigned is non-molecular' , 'Target assigned is non-mole
# cular' , 'Target assigned is non-molecular' , 'Target assigned is non-molecular' , 'Target assigned is
# non-molecular' , 'Target assigned is non-molecular' , 'Target assigned is non-molecular' , 'Target as
# signed is non-molecular' , 'Target assigned is non-molecular'
'confidence_score': DefaultMappings.DOUBLE,
# EXAMPLES:
# '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1'
'description': DefaultMappings.TEXT_STD,
# EXAMPLES:
# 'Effect on prolongation of hypoxic survival time (HS) in Carworth farms male albino mice' , 'Spermatog
# enic index (percent change from vehicle control) was calculated in Adult Male Swiss Mice at 2 (umol/kg
# ) dose' , 'Spermatogenic index (percent change from vehicle control) was calculated in Adult Male Swis
# s Mice at 3 (umol/kg) dose' , 'Spermatogenic index (percent change | |
import logging
import pickle
import random
from collections import Counter
from itertools import chain, permutations
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from snorkel.analysis import Scorer
from snorkel.labeling.analysis import LFAnalysis
from snorkel.labeling.model.graph_utils import get_clique_tree
from snorkel.labeling.model.logger import Logger
from snorkel.types import Config
from snorkel.utils import probs_to_preds
from snorkel.utils.config_utils import merge_config
from snorkel.utils.lr_schedulers import LRSchedulerConfig
from snorkel.utils.optimizers import OptimizerConfig
Metrics = Dict[str, float]
class TrainConfig(Config):
"""Settings for the fit() method of LabelModel.
Parameters
----------
n_epochs
The number of epochs to train (where each epoch is a single optimization step)
lr
Base learning rate (will also be affected by lr_scheduler choice and settings)
l2
Centered L2 regularization strength
optimizer
Which optimizer to use (one of ["sgd", "adam", "adamax"])
optimizer_config
Settings for the optimizer
lr_scheduler
Which lr_scheduler to use (one of ["constant", "linear", "exponential", "step"])
lr_scheduler_config
Settings for the LRScheduler
prec_init
LF precision initializations / priors
seed
A random seed to initialize the random number generator with
log_freq
Report loss every this many epochs (steps)
mu_eps
Restrict the learned conditional probabilities to [mu_eps, 1-mu_eps]
"""
n_epochs: int = 100
lr: float = 0.01
l2: float = 0.0
optimizer: str = "sgd"
optimizer_config: OptimizerConfig = OptimizerConfig() # type: ignore
lr_scheduler: str = "constant"
lr_scheduler_config: LRSchedulerConfig = LRSchedulerConfig() # type: ignore
prec_init: float = 0.7
seed: int = np.random.randint(1e6)
log_freq: int = 10
mu_eps: Optional[float] = None
class LabelModelConfig(Config):
"""Settings for the LabelModel initialization.
Parameters
----------
verbose
Whether to include print statements
device
What device to place the model on ('cpu' or 'cuda:0', for example)
"""
verbose: bool = True
device: str = "cpu"
class _CliqueData(NamedTuple):
start_index: int
end_index: int
max_cliques: Set[int]
class LabelModel(nn.Module):
r"""A model for learning the LF accuracies and combining their output labels.
This class learns a model of the labeling functions' conditional probabilities
of outputting the true (unobserved) label `Y`, `P(\lf | Y)`, and uses this learned
model to re-weight and combine their output labels.
This class is based on the approach in [Training Complex Models with Multi-Task
Weak Supervision](https://arxiv.org/abs/1810.02840), published in AAAI'19. In this
approach, we compute the inverse generalized covariance matrix of the junction tree
of a given LF dependency graph, and perform a matrix completion-style approach with
respect to these empirical statistics. The result is an estimate of the conditional
LF probabilities, `P(\lf | Y)`, which are then set as the parameters of the label
model used to re-weight and combine the labels output by the LFs.
Currently this class uses a conditionally independent label model, in which the LFs
are assumed to be conditionally independent given `Y`.
Examples
--------
>>> label_model = LabelModel()
>>> label_model = LabelModel(cardinality=3)
>>> label_model = LabelModel(cardinality=3, device='cpu')
>>> label_model = LabelModel(cardinality=3)
Parameters
----------
cardinality
Number of classes, by default 2
**kwargs
Arguments for changing config defaults
Raises
------
ValueError
If config device set to cuda but only cpu is available
Attributes
----------
cardinality
Number of classes, by default 2
config
Training configuration
seed
Random seed
"""
def __init__(self, cardinality: int = 2, **kwargs: Any) -> None:
super().__init__()
self.config: LabelModelConfig = LabelModelConfig(**kwargs)
self.cardinality = cardinality
# Confirm that cuda is available if config is using CUDA
if self.config.device != "cpu" and not torch.cuda.is_available():
raise ValueError("device=cuda but CUDA not available.")
# By default, put model in eval mode; switch to train mode in training
self.eval()
def _create_L_ind(self, L: np.ndarray) -> np.ndarray:
"""Convert a label matrix with labels in 0...k to a one-hot format.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
Returns
-------
np.ndarray
An [n,m*k] dense np.ndarray with values in {0,1}
"""
L_ind = np.zeros((self.n, self.m * self.cardinality))
for y in range(1, self.cardinality + 1):
# A[x::y] slices A starting at x at intervals of y
# e.g., np.arange(9)[0::3] == np.array([0,3,6])
L_ind[:, (y - 1) :: self.cardinality] = np.where(L == y, 1, 0)
return L_ind
def _get_augmented_label_matrix(
self, L: np.ndarray, higher_order: bool = False
) -> np.ndarray:
"""Create augmented version of label matrix.
In augmented version, each column is an indicator
for whether a certain source or clique of sources voted in a certain
pattern.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
Returns
-------
np.ndarray
An [n,m*k] dense matrix with values in {0,1}
"""
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data: Dict[int, _CliqueData] = {}
for i in range(self.m):
self.c_data[i] = _CliqueData(
start_index=i * self.cardinality,
end_index=(i + 1) * self.cardinality,
max_cliques=set(
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.node[j]["members"]
]
),
)
L_ind = self._create_L_ind(L)
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if higher_order:
L_aug = np.copy(L_ind)
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.node[item]
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
else:
raise ValueError(item)
members = list(C["members"])
# With unary maximal clique, just store its existing index
C["start_index"] = members[0] * self.cardinality
C["end_index"] = (members[0] + 1) * self.cardinality
return L_aug
else:
return L_ind
def _build_mask(self) -> None:
"""Build mask applied to O^{-1}, O for the matrix approx constraint."""
self.mask = torch.ones(self.d, self.d).byte()
for ci in self.c_data.values():
si = ci.start_index
ei = ci.end_index
for cj in self.c_data.values():
sj, ej = cj.start_index, cj.end_index
# Check if ci and cj are part of the same maximal clique
# If so, mask out their corresponding blocks in O^{-1}
if len(ci.max_cliques.intersection(cj.max_cliques)) > 0:
self.mask[si:ei, sj:ej] = 0
self.mask[sj:ej, si:ei] = 0
def _generate_O(self, L: np.ndarray, higher_order: bool = False) -> None:
"""Generate overlaps and conflicts matrix from label matrix.
Parameters
----------
L
An [n,m] label matrix with values in {0,1,...,k}
higher_order
Whether to include higher-order correlations (e.g. LF pairs) in matrix
"""
L_aug = self._get_augmented_label_matrix(L, higher_order=higher_order)
self.d = L_aug.shape[1]
self.O = (
torch.from_numpy(L_aug.T @ L_aug / self.n).float().to(self.config.device)
)
def _init_params(self) -> None:
r"""Initialize the learned params.
- \mu is the primary learned parameter, where each row corresponds to
the probability of a clique C emitting a specific combination of labels,
conditioned on different values of Y (for each column); that is:
self.mu[i*self.cardinality + j, y] = P(\lambda_i = j | Y = y)
and similarly for higher-order cliques.
Raises
------
ValueError
If prec_init shape does not match number of LFs
"""
# Initialize mu so as to break basic reflective symmetry
# Note that we are given either a single or per-LF initial precision
# value, prec_i = P(Y=y|\lf=y), and use:
# mu_init = P(\lf=y|Y=y) = P(\lf=y) * prec_i / P(Y=y)
# Handle single values
if isinstance(self.train_config.prec_init, (int, float)):
self._prec_init = self.train_config.prec_init * torch.ones(self.m)
if self._prec_init.shape[0] != self.m:
raise ValueError(f"prec_init must have shape {self.m}.")
# Get the per-value labeling propensities
# Note that self.O must have been computed already!
lps = torch.diag(self.O).cpu().detach().numpy()
# TODO: Update for higher-order cliques!
self.mu_init = torch.zeros(self.d, self.cardinality)
for i in range(self.m):
for y in range(self.cardinality):
idx = i * self.cardinality + y
mu_init = torch.clamp(lps[idx] * self._prec_init[i] / self.p[y], 0, 1)
self.mu_init[idx, y] += mu_init
# Initialize randomly based on self.mu_init
self.mu = nn.Parameter(self.mu_init.clone() * np.random.random()).float()
# Build the mask over O^{-1}
self._build_mask()
def _get_conditional_probs(self, mu: np.ndarray) -> np.ndarray:
r"""Return the estimated conditional probabilities table given parameters mu.
Given a parameter vector mu, return the estimated conditional probabilites
table cprobs, where cprobs is an (m, k+1, k)-dim np.ndarray with:
cprobs[i, j, k] = P(\lf_i = j-1 | Y = k)
where m is the number of LFs, k is the cardinality, and cprobs includes the
conditional abstain probabilities P(\lf_i = -1 | Y = y).
Parameters
----------
mu
An [m * k, k] np.ndarray with entries in [0, 1]
Returns
-------
np.ndarray
An [m, k + | |
( fill_colour.red(), fill_colour.green(), fill_colour.blue() )
dictionary_part[ 'colours' ][ colour_type ] = ( border_rgb, fill_rgb )
return dictionary_part
class _ServiceRatingsNumericalPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'numerical ratings' )
self._num_stars = QP.MakeQSpinBox( self, min=1, max=20 )
self._allow_zero = QW.QCheckBox( self )
#
self._num_stars.setValue( dictionary['num_stars'] )
self._allow_zero.setChecked( dictionary[ 'allow_zero' ] )
#
rows = []
rows.append( ( 'number of \'stars\': ', self._num_stars ) )
rows.append( ( 'allow a zero rating: ', self._allow_zero ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
self.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
def GetValue( self ):
dictionary_part = {}
num_stars = self._num_stars.value()
allow_zero = self._allow_zero.isChecked()
if num_stars == 1 and not allow_zero:
allow_zero = True
dictionary_part[ 'num_stars' ] = num_stars
dictionary_part[ 'allow_zero' ] = allow_zero
return dictionary_part
class _ServiceIPFSPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'ipfs' )
interaction_panel = ClientGUIPanels.IPFSDaemonStatusAndInteractionPanel( self, self.parentWidget().GetValue )
tts = 'This is an *experimental* IPFS filestore that will not copy files when they are pinned. IPFS will refer to files using their original location (i.e. your hydrus client\'s file folder(s)).'
tts += os.linesep * 2
tts += 'Only turn this on if you know what it is.'
self._use_nocopy = QW.QCheckBox( self )
self._use_nocopy.setToolTip( tts )
portable_initial_dict = dict( dictionary[ 'nocopy_abs_path_translations' ] )
abs_initial_dict = {}
current_file_locations = HG.client_controller.client_files_manager.GetCurrentFileLocations()
for ( portable_hydrus_path, ipfs_path ) in portable_initial_dict.items():
hydrus_path = HydrusPaths.ConvertPortablePathToAbsPath( portable_hydrus_path )
if hydrus_path in current_file_locations:
abs_initial_dict[ hydrus_path ] = ipfs_path
for hydrus_path in current_file_locations:
if hydrus_path not in abs_initial_dict:
abs_initial_dict[ hydrus_path ] = ''
help_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().help, self._ShowHelp )
help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this path remapping control -->', QG.QColor( 0, 0, 255 ) )
self._nocopy_abs_path_translations = ClientGUIControls.StringToStringDictControl( self, abs_initial_dict, key_name = 'hydrus path', value_name = 'ipfs path', allow_add_delete = False, edit_keys = False )
self._multihash_prefix = QW.QLineEdit( self )
tts = 'When you tell the client to copy a ipfs multihash to your clipboard, it will prefix it with whatever is set here.'
tts += os.linesep * 2
tts += 'Use this if you want to copy a full gateway url. For instance, you could put here:'
tts += os.linesep * 2
tts += 'http://127.0.0.1:8080/ipfs/'
tts += os.linesep
tts += '-or-'
tts += os.linesep
tts += 'http://ipfs.io/ipfs/'
self._multihash_prefix.setToolTip( tts )
#
self._use_nocopy.setChecked( dictionary[ 'use_nocopy' ] )
self._multihash_prefix.setText( dictionary[ 'multihash_prefix' ] )
#
rows = []
rows.append( ( 'clipboard multihash url prefix: ', self._multihash_prefix ) )
rows.append( ( 'use \'nocopy\' filestore for pinning: ', self._use_nocopy ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
self.Add( interaction_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.Add( help_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.Add( self._nocopy_abs_path_translations, CC.FLAGS_EXPAND_BOTH_WAYS )
self._UpdateButtons()
self._use_nocopy.clicked.connect( self._UpdateButtons )
def _ShowHelp( self ):
message = '\'nocopy\' is experimental and advanced!'
message += os.linesep * 2
message += 'In order to add a file through \'nocopy\', IPFS needs to be given a path that is beneath the directory in which its datastore is. Usually this is your USERDIR (default IPFS location is ~/.ipfs). Also, if your IPFS daemon runs on another computer, that path needs to be according to that machine\'s filesystem (and, perhaps, pointing to a shared folder that can stores your hydrus files).'
message += os.linesep * 2
message += 'If your hydrus client_files directory is not already in your USERDIR, you will need to make some symlinks and then put these paths in the control so hydrus knows how to translate the paths when it pins.'
message += os.linesep * 2
message += 'e.g. If you symlink E:\\hydrus\\files to C:\\users\\you\\ipfs_maps\\e_media, then put that same C:\\users\\you\\ipfs_maps\\e_media in the right column for that hydrus file location, and you _should_ be good.'
QW.QMessageBox.information( self, 'Information', message )
def _UpdateButtons( self ):
if self._use_nocopy.isChecked():
self._nocopy_abs_path_translations.setEnabled( True )
else:
self._nocopy_abs_path_translations.setEnabled( False )
def GetValue( self ):
dictionary_part = {}
dictionary_part[ 'use_nocopy' ] = self._use_nocopy.isChecked()
abs_dict = self._nocopy_abs_path_translations.GetValue()
portable_dict = {}
for ( hydrus_path, ipfs_path ) in abs_dict.items():
portable_hydrus_path = HydrusPaths.ConvertAbsPathToPortablePath( hydrus_path )
portable_dict[ portable_hydrus_path ] = ipfs_path
dictionary_part[ 'nocopy_abs_path_translations' ] = portable_dict
dictionary_part[ 'multihash_prefix' ] = self._multihash_prefix.text()
return dictionary_part
class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._listbook = ClientGUICommon.ListBook( self )
self._listbook.AddPage( 'gui', 'gui', self._GUIPanel( self._listbook ) ) # leave this at the top, to make it default page
self._listbook.AddPage( 'gui pages', 'gui pages', self._GUIPagesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'connection', 'connection', self._ConnectionPanel( self._listbook ) )
self._listbook.AddPage( 'external programs', 'external programs', self._ExternalProgramsPanel( self._listbook ) )
self._listbook.AddPage( 'files and trash', 'files and trash', self._FilesAndTrashPanel( self._listbook ) )
self._listbook.AddPage( 'file viewing statistics', 'file viewing statistics', self._FileViewingStatisticsPanel( self._listbook ) )
self._listbook.AddPage( 'speed and memory', 'speed and memory', self._SpeedAndMemoryPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'maintenance and processing', 'maintenance and processing', self._MaintenanceAndProcessingPanel( self._listbook ) )
self._listbook.AddPage( 'media', 'media', self._MediaPanel( self._listbook ) )
self._listbook.AddPage( 'audio', 'audio', self._AudioPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'default system predicates', 'default system predicates', self._DefaultFileSystemPredicatesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'colours', 'colours', self._ColoursPanel( self._listbook ) )
self._listbook.AddPage( 'regex favourites', 'regex favourites', self._RegexPanel( self._listbook ) )
self._listbook.AddPage( 'sort/collect', 'sort/collect', self._SortCollectPanel( self._listbook ) )
self._listbook.AddPage( 'downloading', 'downloading', self._DownloadingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'duplicates', 'duplicates', self._DuplicatesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'importing', 'importing', self._ImportingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'style', 'style', self._StylePanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag presentation', 'tag presentation', self._TagPresentationPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag suggestions', 'tag suggestions', self._TagSuggestionsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tags', 'tags', self._TagsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'thumbnails', 'thumbnails', self._ThumbnailsPanel( self._listbook, self._new_options ) )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._listbook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
class _AudioPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#self._media_viewer_uses_its_own_audio_volume = QW.QCheckBox( self )
self._preview_uses_its_own_audio_volume = QW.QCheckBox( self )
self._has_audio_label = QW.QLineEdit( self )
#
tt = 'If unchecked, this media canvas will use the \'global\' audio volume slider. If checked, this media canvas will have its own separate one.'
tt += os.linesep * 2
tt += 'Keep this on if you would like the preview viewer to be quieter than the main media viewer.'
#self._media_viewer_uses_its_own_audio_volume.setChecked( self._new_options.GetBoolean( 'media_viewer_uses_its_own_audio_volume' ) )
self._preview_uses_its_own_audio_volume.setChecked( self._new_options.GetBoolean( 'preview_uses_its_own_audio_volume' ) )
#self._media_viewer_uses_its_own_audio_volume.setToolTip( tt )
self._preview_uses_its_own_audio_volume.setToolTip( tt )
self._has_audio_label.setText( self._new_options.GetString( 'has_audio_label' ) )
#
vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'The preview window has its own volume: ', self._preview_uses_its_own_audio_volume ) )
#rows.append( ( 'The media viewer has its own volume: ', self._media_viewer_uses_its_own_audio_volume ) )
rows.append( ( 'Label for files with audio: ', self._has_audio_label ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
#self._new_options.SetBoolean( 'media_viewer_uses_its_own_audio_volume', self._media_viewer_uses_its_own_audio_volume.isChecked() )
self._new_options.SetBoolean( 'preview_uses_its_own_audio_volume', self._preview_uses_its_own_audio_volume.isChecked() )
self._new_options.SetString( 'has_audio_label', self._has_audio_label.text() )
class _ColoursPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
coloursets_panel = ClientGUICommon.StaticBox( self, 'coloursets' )
self._current_colourset = ClientGUICommon.BetterChoice( coloursets_panel )
self._current_colourset.addItem( 'default', 'default' )
self._current_colourset.addItem( 'darkmode', 'darkmode' )
self._current_colourset.SetValue( self._new_options.GetString( 'current_colourset' ) )
self._notebook = QW.QTabWidget( coloursets_panel )
self._gui_colours = {}
for colourset in ( 'default', 'darkmode' ):
self._gui_colours[ colourset ] = {}
| |
#!/usr/bin/env python3
import re
import sys
import pprint
import getopt
pp = pprint.PrettyPrinter(indent=4)
prefix = None
pmgfiles = list()
outfile = None
debug = False
genhdr = False
opts, args = getopt.getopt(sys.argv[1:], "p:o:dg")
for o, a in opts:
if o == "-p":
prefix = a
elif o == "-o":
outfile = a
elif o == "-d":
debug = True
elif o == "-g":
genhdr = True
if outfile is None:
outfile = "/dev/stdout"
for a in args:
assert a.endswith(".pmg")
if prefix is None and len(args) == 1:
prefix = a[0:-4]
prefix = prefix.split('/')[-1]
pmgfiles.append(a)
assert prefix is not None
current_pattern = None
patterns = dict()
state_types = dict()
udata_types = dict()
blocks = list()
ids = dict()
def rewrite_cpp(s):
t = list()
i = 0
while i < len(s):
if s[i] in ("'", '"') and i + 1 < len(s):
j = i + 1
while j + 1 < len(s) and s[j] != s[i]:
if s[j] == '\\' and j + 1 < len(s):
j += 1
j += 1
t.append(s[i:j+1])
i = j + 1
continue
if s[i] in ('$', '\\') and i + 1 < len(s):
j = i + 1
while True:
if j == len(s):
j -= 1
break
if ord('a') <= ord(s[j]) <= ord('z'):
j += 1
continue
if ord('A') <= ord(s[j]) <= ord('Z'):
j += 1
continue
if ord('0') <= ord(s[j]) <= ord('9'):
j += 1
continue
if s[j] == '_':
j += 1
continue
j -= 1
break
n = s[i:j+1]
i = j + 1
if n[0] == '$':
v = "id_d_" + n[1:]
else:
v = "id_b_" + n[1:]
if v not in ids:
ids[v] = n
else:
assert ids[v] == n
t.append(v)
continue
if s[i] == "\t":
t.append(" ")
else:
t.append(s[i])
i += 1
return "".join(t)
def process_pmgfile(f):
global current_pattern
while True:
line = f.readline()
if line == "": break
line = line.strip()
cmd = line.split()
if len(cmd) == 0 or cmd[0].startswith("//"): continue
cmd = cmd[0]
if cmd == "pattern":
if current_pattern is not None:
block = dict()
block["type"] = "final"
block["pattern"] = current_pattern
blocks.append(block)
line = line.split()
assert len(line) == 2
assert line[1] not in patterns
current_pattern = line[1]
patterns[current_pattern] = len(blocks)
state_types[current_pattern] = dict()
udata_types[current_pattern] = dict()
continue
assert current_pattern is not None
if cmd == "state":
m = re.match(r"^state\s+<(.*?)>\s+(([A-Za-z_][A-Za-z_0-9]*\s+)*[A-Za-z_][A-Za-z_0-9]*)\s*$", line)
assert m
type_str = m.group(1)
states_str = m.group(2)
for s in re.split(r"\s+", states_str):
assert s not in state_types[current_pattern]
state_types[current_pattern][s] = type_str
continue
if cmd == "udata":
m = re.match(r"^udata\s+<(.*?)>\s+(([A-Za-z_][A-Za-z_0-9]*\s+)*[A-Za-z_][A-Za-z_0-9]*)\s*$", line)
assert m
type_str = m.group(1)
udatas_str = m.group(2)
for s in re.split(r"\s+", udatas_str):
assert s not in udata_types[current_pattern]
udata_types[current_pattern][s] = type_str
continue
if cmd == "match":
block = dict()
block["type"] = "match"
block["pattern"] = current_pattern
line = line.split()
assert len(line) == 2
assert line[1] not in state_types[current_pattern]
block["cell"] = line[1]
state_types[current_pattern][line[1]] = "Cell*";
block["if"] = list()
block["select"] = list()
block["index"] = list()
block["filter"] = list()
block["optional"] = False
while True:
l = f.readline()
assert l != ""
a = l.split()
if len(a) == 0 or a[0].startswith("//"): continue
if a[0] == "endmatch": break
if a[0] == "if":
b = l.lstrip()[2:]
block["if"].append(rewrite_cpp(b.strip()))
continue
if a[0] == "select":
b = l.lstrip()[6:]
block["select"].append(rewrite_cpp(b.strip()))
continue
if a[0] == "index":
m = re.match(r"^\s*index\s+<(.*?)>\s+(.*?)\s*===\s*(.*?)\s*$", l)
assert m
block["index"].append((m.group(1), rewrite_cpp(m.group(2)), rewrite_cpp(m.group(3))))
continue
if a[0] == "filter":
b = l.lstrip()[6:]
block["filter"].append(rewrite_cpp(b.strip()))
continue
if a[0] == "optional":
block["optional"] = True
continue
assert False
blocks.append(block)
continue
if cmd == "code":
block = dict()
block["type"] = "code"
block["pattern"] = current_pattern
block["code"] = list()
block["states"] = set()
for s in line.split()[1:]:
assert s in state_types[current_pattern]
block["states"].add(s)
while True:
l = f.readline()
assert l != ""
a = l.split()
if len(a) == 0: continue
if a[0] == "endcode": break
block["code"].append(rewrite_cpp(l.rstrip()))
blocks.append(block)
continue
assert False
for fn in pmgfiles:
with open(fn, "r") as f:
process_pmgfile(f)
if current_pattern is not None:
block = dict()
block["type"] = "final"
block["pattern"] = current_pattern
blocks.append(block)
current_pattern = None
if debug:
pp.pprint(blocks)
with open(outfile, "w") as f:
for fn in pmgfiles:
print("// Generated by pmgen.py from {}".format(fn), file=f)
print("", file=f)
if genhdr:
print("#include \"kernel/yosys.h\"", file=f)
print("#include \"kernel/sigtools.h\"", file=f)
print("", file=f)
print("YOSYS_NAMESPACE_BEGIN", file=f)
print("", file=f)
print("struct {}_pm {{".format(prefix), file=f)
print(" Module *module;", file=f)
print(" SigMap sigmap;", file=f)
print(" std::function<void()> on_accept;".format(prefix), file=f)
print("", file=f)
for index in range(len(blocks)):
block = blocks[index]
if block["type"] == "match":
index_types = list()
for entry in block["index"]:
index_types.append(entry[0])
print(" typedef std::tuple<{}> index_{}_key_type;".format(", ".join(index_types), index), file=f)
print(" dict<index_{}_key_type, vector<Cell*>> index_{};".format(index, index), file=f)
print(" dict<SigBit, pool<Cell*>> sigusers;", file=f)
print(" pool<Cell*> blacklist_cells;", file=f)
print(" pool<Cell*> autoremove_cells;", file=f)
print(" bool blacklist_dirty;", file=f)
print(" int rollback;", file=f)
print("", file=f)
for current_pattern in sorted(patterns.keys()):
print(" struct state_{}_t {{".format(current_pattern), file=f)
for s, t in sorted(state_types[current_pattern].items()):
print(" {} {};".format(t, s), file=f)
print(" }} st_{};".format(current_pattern), file=f)
print("", file=f)
print(" struct udata_{}_t {{".format(current_pattern), file=f)
for s, t in sorted(udata_types[current_pattern].items()):
print(" {} {};".format(t, s), file=f)
print(" }} ud_{};".format(current_pattern), file=f)
print("", file=f)
current_pattern = None
for v, n in sorted(ids.items()):
if n[0] == "\\":
print(" IdString {}{{\"\\{}\"}};".format(v, n), file=f)
else:
print(" IdString {}{{\"{}\"}};".format(v, n), file=f)
print("", file=f)
print(" void add_siguser(const SigSpec &sig, Cell *cell) {", file=f)
print(" for (auto bit : sigmap(sig)) {", file=f)
print(" if (bit.wire == nullptr) continue;", file=f)
print(" if (sigusers.count(bit) == 0 && bit.wire->port_id)", file=f)
print(" sigusers[bit].insert(nullptr);", file=f)
print(" sigusers[bit].insert(cell);", file=f)
print(" }", file=f)
print(" }", file=f)
print("", file=f)
print(" void blacklist(Cell *cell) {", file=f)
print(" if (cell != nullptr) {", file=f)
print(" if (blacklist_cells.insert(cell).second)", file=f)
print(" blacklist_dirty = true;", file=f)
print(" }", file=f)
print(" }", file=f)
print("", file=f)
print(" void autoremove(Cell *cell) {", file=f)
print(" if (cell != nullptr) {", file=f)
print(" if (blacklist_cells.insert(cell).second)", file=f)
print(" blacklist_dirty = true;", file=f)
print(" autoremove_cells.insert(cell);", file=f)
print(" }", file=f)
print(" }", file=f)
print("", file=f)
for current_pattern in sorted(patterns.keys()):
print(" void check_blacklist_{}() {{".format(current_pattern), file=f)
print(" if (!blacklist_dirty)", file=f)
print(" return;", file=f)
print(" blacklist_dirty = false;", file=f)
for index in range(len(blocks)):
block = blocks[index]
if block["pattern"] != current_pattern:
continue
if block["type"] == "match":
print(" if (st_{}.{} != nullptr && blacklist_cells.count(st_{}.{})) {{".format(current_pattern, block["cell"], current_pattern, block["cell"]), file=f)
print(" rollback = {};".format(index+1), file=f)
print(" return;", file=f)
print(" }", file=f)
print(" rollback = 0;", file=f)
print(" }", file=f)
print("", file=f)
current_pattern = None
print(" SigSpec port(Cell *cell, IdString portname) {", file=f)
print(" return sigmap(cell->getPort(portname));", file=f)
print(" }", file=f)
print("", file=f)
print(" Const param(Cell *cell, IdString paramname) {", file=f)
print(" return cell->getParam(paramname);", file=f)
print(" }", file=f)
print("", file=f)
print(" int nusers(const SigSpec &sig) {", file=f)
print(" pool<Cell*> users;", file=f)
print(" for (auto bit : sigmap(sig))", file=f)
print(" for (auto user : sigusers[bit])", file=f)
print(" users.insert(user);", file=f)
print(" return GetSize(users);", file=f)
print(" }", file=f)
print("", file=f)
print(" {}_pm(Module *module, const vector<Cell*> &cells) :".format(prefix), file=f)
print(" module(module), sigmap(module) {", file=f)
for current_pattern in sorted(patterns.keys()):
for s, t in sorted(udata_types[current_pattern].items()):
if t.endswith("*"):
print(" ud_{}.{} = nullptr;".format(current_pattern,s), file=f)
else:
print(" ud_{}.{} = {}();".format(current_pattern, s, t), file=f)
current_pattern = None
print(" for (auto cell : module->cells()) {", file=f)
print(" for (auto &conn : cell->connections())", file=f)
print(" add_siguser(conn.second, cell);", file=f)
print(" }", file=f)
print(" for (auto cell : cells) {", file=f)
for index in range(len(blocks)):
block = blocks[index]
if block["type"] == "match":
print(" do {", file=f)
print(" Cell *{} = cell;".format(block["cell"]), file=f)
for expr in block["select"]:
print(" if (!({})) break;".format(expr), file=f)
print(" index_{}_key_type key;".format(index), file=f)
for field, entry in enumerate(block["index"]):
print(" std::get<{}>(key) = {};".format(field, entry[1]), file=f)
print(" index_{}[key].push_back(cell);".format(index), file=f)
print(" } while (0);", file=f)
print(" }", file=f)
print(" }", file=f)
print("", file=f)
print(" ~{}_pm() {{".format(prefix), file=f)
print(" for (auto cell : autoremove_cells)", file=f)
print(" module->remove(cell);", file=f)
print(" }", file=f)
print("", file=f)
for current_pattern in sorted(patterns.keys()):
print(" void run_{}(std::function<void()> on_accept_f) {{".format(current_pattern), file=f)
print(" on_accept = on_accept_f;", file=f)
print(" rollback = 0;", file=f)
print(" blacklist_dirty = false;", file=f)
for s, t in sorted(state_types[current_pattern].items()):
if t.endswith("*"):
print(" st_{}.{} = nullptr;".format(current_pattern, s), file=f)
else:
print(" st_{}.{} = {}();".format(current_pattern, s, t), file=f)
print(" block_{}();".format(patterns[current_pattern]), file=f)
print(" }", file=f)
print("", file=f)
print(" void run_{}(std::function<void({}_pm&)> on_accept_f) {{".format(current_pattern, prefix), file=f)
print(" run_{}([&](){{on_accept_f(*this);}});".format(current_pattern), file=f)
print(" }", file=f)
print("", file=f)
print(" void run_{}(std::function<void(state_{}_t&)> on_accept_f) {{".format(current_pattern, current_pattern), file=f)
print(" run_{}([&](){{on_accept_f(st_{});}});".format(current_pattern, current_pattern), file=f)
print(" }", file=f)
print("", | |
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]]],
device='cuda:0')
ftm_y = torch.tensor([[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]],
[[4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2],
[4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2],
[5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1],
[5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1],
[5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1],
[5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1],
[5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1],
[5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1],
[5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1],
[5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1],
[5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1],
[5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1],
[5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1],
[5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1],
[5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1],
[5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1]]],
device='cuda:0')
ftm = torch.cat([ftm_x.unsqueeze(-1), ftm_y.unsqueeze(-1)], dim=-1)
test_case(T, 7, 7, ftm)
# Expected mapping of features from image to semantic map for 8x8 map
ftm_x = torch.tensor([[[2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5],
[2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5],
[2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5],
[2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5],
[2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5],
[2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5],
[2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5],
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5],
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5],
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5],
[2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5],
[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6],
[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6],
[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6],
[1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6],
[1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, | |
a bit weird because the main mechanism is a file write...
"""
if logger is None: logger=self.logger
log = logger.getChild('prep_cf')
wrkr = self._get_wrkr(Preparor)
#copy the template
wrkr.tag = '%s_%s'%(self.name, self.tag)
cf_fp = wrkr.copy_cf_template() #just copy the default template
#=======================================================================
# #set some basics
#=======================================================================
#fix filepaths
#loop and pull
new_pars_d =dict()
for sect, keys in {
'parameters':['impact_units', 'rtail', 'event_rels', 'felv', 'prec'],
'dmg_fps':['curves'],
'plotting':['impactfmt_str', 'color'],
#'risk_fps':['evals'],
}.items():
d = {k:str(pars_d[k]) for k in keys if k in pars_d} #get these keys where present
if sect == 'parameters':
d['name']=self.name
if len(d)>0:
new_pars_d[sect] = tuple([d, '#set by testAll.py on %s'%wrkr.today_str])
wrkr.set_cf_pars(new_pars_d)
#=======================================================================
# wrap
#=======================================================================
#update the session
"""subsequent workers will inherit the control file for this workflow"""
self.cf_fp = cf_fp
self.com_hndls.append('cf_fp')
log.info('control file created: %s'%cf_fp)
return cf_fp
def prep_finvConstruct(self,
pars_d,
nest_data = dict(),
miti_data = dict(),
nestID = 0,
logger=None,
dkey='finv_vlay',
):
if logger is None: logger=self.logger
log = logger.getChild('prep_finvConstruct')
wrkr = self._get_wrkr(Preparor)
#=======================================================================
# load the data
#=======================================================================
finv_vlay = self._retrieve(dkey,
f = lambda logger=None: wrkr.load_vlay(
os.path.join(self.base_dir, pars_d['finv_fp']), logger=logger)
)
#=======================================================================
# run converter
#=======================================================================
#prepare the nest data
if len(nest_data)>0:
nest_data2 = wrkr.build_nest_data(nestID=nestID, d_raw = nest_data, logger=log)
else:
nest_data2 = dict()
#build the finv
finv_vlay = wrkr.to_finv(finv_vlay,new_data={**nest_data2, **miti_data}, logger=log)
self.data_d[dkey] = finv_vlay #set for subsequent workers
"""
view(finv_vlay)
"""
def prep_finv(self, pars_d,
logger=None,
dkey='finv_vlay'):
if logger is None: logger=self.logger
log = logger.getChild('prep_cf')
wrkr = self._get_wrkr(Preparor)
#=======================================================================
# load the data
#=======================================================================
finv_vlay = self._retrieve(dkey,
f = lambda logger=None: wrkr.load_vlay(
os.path.join(self.base_dir, pars_d['finv_fp']), logger=logger)
)
#=======================================================================
# execute
#=======================================================================
df = wrkr.finv_to_csv(finv_vlay, felv=pars_d['felv'], write=self.write, logger=log)
if not self.write: wrkr.upd_cf_finv('none')
return df
def prep_curves(self, pars_d,
logger=None):
if logger is None: logger=self.logger
log = logger.getChild('prep_curves')
wrkr = self._get_wrkr(Preparor)
#=======================================================================
# load the data
#=======================================================================
fp = os.path.join(self.base_dir, pars_d['curves_fp'])
assert os.path.exists(fp), 'bad curve_fp: %s'%fp
df_d = pd.read_excel(fp, sheet_name=None, header=None, index_col=None)
log.info('loaded %i from %s'%(len(df_d), fp))
#=======================================================================
# write to control file
#=======================================================================
if not self.write:
wrkr.set_cf_pars(
{
'dmg_fps':(
{'curves':fp},
'#\'curves\' file path set at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),
),
},
)
return df_d
def prep_evals(self,
pars_d,
duplicate=True, #whether to make a new copy of the evals
logger=None,):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('prep_evals')
wrkr = self._get_wrkr(Preparor)
#=======================================================================
# #get raw filepath
#=======================================================================
fp_raw = os.path.join(self.base_dir, pars_d.pop('evals_fp'))
assert os.path.exists(fp_raw), 'bad raw evals: %s'%fp_raw
#=======================================================================
# copy over file
#=======================================================================
if duplicate:
#get new filepath
fp = os.path.join(self.out_dir, os.path.basename(fp_raw))
shutil.copyfile(fp_raw, fp)
else:
fp = fp_raw
#=======================================================================
# #update control file
#=======================================================================
wrkr.set_cf_pars(
{
'risk_fps':({'evals':fp},
'#evals file path set from %s.py at %s'%(
__name__, datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')))
},
)
#return loaded data
return pd.read_csv(fp, **wrkr.dtag_d['evals'])
def rsamp_prep(self, pars_d, #hazar draster sampler
logger=None,
dkey = 'rlay_d',
rlay_d = None, #optional container of raster layers
**kwargs):
if logger is None: logger=self.logger
log = logger.getChild('rsamp_prep')
wrkr = self._get_wrkr(Rsamp)
#=======================================================================
# load the data
#=======================================================================
if rlay_d is None:
fp = os.path.join(self.base_dir, pars_d['raster_dir'])
rlay_d = self._retrieve(dkey,
f = lambda logger=None: wrkr.load_rlays(fp, logger=logger))
if 'aoi_fp' in pars_d:
fp = os.path.join(self.base_dir, pars_d['aoi_fp'])
aoi_vlay = self._retrieve('aoi_vlay',
f = lambda logger=None: wrkr.load_vlay(fp, logger=logger))
else:
aoi_vlay=None
#=======================================================================
# execute
#=======================================================================
rlay_l = wrkr.runPrep(list(rlay_d.values()), aoi_vlay = aoi_vlay,logger=log,
**kwargs)
#=======================================================================
# wrap
#=======================================================================
self.data_d['rlay_d'] = {lay.name():lay for lay in rlay_l}
def rsamp_haz(self, pars_d, #hazar draster sampler
logger=None,
dkey = 'rlay_d',
rlay_d = None, #optional container of raster layers
rkwargs=None,
):
if logger is None: logger=self.logger
log = logger.getChild('rsamp_haz')
wrkr = self._get_wrkr(Rsamp)
#=======================================================================
# load the data
#=======================================================================
if rlay_d is None:
fp = os.path.join(self.base_dir, pars_d['raster_dir'])
rlay_d = self._retrieve(dkey,
f = lambda logger=None: wrkr.load_rlays(fp, logger=logger))
#dtm layer
if 'dtm_fp' in pars_d:
fp = os.path.join(self.base_dir, pars_d['dtm_fp'])
dtm_rlay = self._retrieve('dtm_rlay',
f = lambda logger=None: wrkr.load_rlay(fp, logger=logger))
else:
dtm_rlay=None
#pull previously loaded
finv_vlay = self.data_d['finv_vlay']
#=======================================================================
# execute
#=======================================================================
#user provided run kwargs
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
#kwargs from control file
kwargs = {k:pars_d[k] for k in ['dthresh', 'as_inun'] if k in pars_d}
res_vlay = wrkr.run(list(rlay_d.values()), finv_vlay, dtm_rlay=dtm_rlay,
**{**rkwargs, **kwargs})
#=======================================================================
# #post
#=======================================================================
wrkr.check()
df = wrkr.write_res(res_vlay, write=self.write)
if not self.write: wrkr.out_fp = 'none' #placeholder
wrkr.update_cf(self.cf_fp)
#=======================================================================
# plots
#=======================================================================
if self.plot:
fig = wrkr.plot_boxes()
self.output_fig(fig)
fig = wrkr.plot_hist()
self.output_fig(fig)
return df
"""
for k in df.columns:
print(k)
"""
def rsamp_dtm(self, pars_d, #hazar draster sampler
logger=None,
rkwargs=None,
):
if logger is None: logger=self.logger
log = logger.getChild('rsamp_dtm')
wrkr = self._get_wrkr(Rsamp)
#=======================================================================
# load the data
#=======================================================================
fp = os.path.join(self.base_dir, pars_d['dtm_fp'])
dtm_rlay = self._retrieve('dtm_rlay',
f = lambda logger=None: wrkr.load_rlay(fp, logger=logger))
#pull previously loaded
finv_vlay = self.data_d['finv_vlay']
#=======================================================================
# execute
#=======================================================================
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
res_vlay = wrkr.run([dtm_rlay], finv_vlay, fname='gels',
**rkwargs)
#=======================================================================
# #post
#=======================================================================
wrkr.dtm_check(res_vlay)
df = wrkr.write_res(res_vlay, write=self.write)
if not self.write: wrkr.out_fp = 'none' #placeholder
wrkr.upd_cf_dtm()
return df
def lisamp(self, pars_d, #fail poly sampler
logger=None,
dkey = 'fpol_d',
fpol_d = None,
):
if logger is None: logger=self.logger
log = logger.getChild('lisamp')
wrkr = self._get_wrkr(LikeSampler)
#=======================================================================
# load the data
#=======================================================================
if fpol_d is None:
fp = os.path.join(self.base_dir, pars_d['fpol_dir'])
fpol_d = self._retrieve(dkey,
f = lambda logger=None: wrkr.load_lpols2(fp, logger=logger))
#pull previously loaded
finv_vlay = self.data_d['finv_vlay']
#=======================================================================
# execute
#=======================================================================
rkwargs = self._get_kwargs(wrkr.__class__.__name__)
kwargs = {k:pars_d[k] for k in [] if k in pars_d}
res_df = wrkr.run(finv_vlay, fpol_d, **{**kwargs, **rkwargs})
#=======================================================================
# #post
#=======================================================================
wrkr.check()
if self.write:
wrkr.write_res(res_df)
else:
wrkr.out_fp = 'none'
wrkr.update_cf()
#=======================================================================
# plot
#=======================================================================
if self.plot:
fig = wrkr.plot_hist()
self.output_fig(fig)
fig = wrkr.plot_boxes()
self.output_fig(fig)
return res_df
def validate(self, pars_d, #validation
logger=None,
):
"""because we're not using the control file for testing...
no point in running the validator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('validate')
wrkr = self._get_wrkr(Vali)
#=======================================================================
# precheck
#=======================================================================
assert 'validate' in pars_d
vtag = pars_d.pop('validate')
#=======================================================================
# setup
#=======================================================================
wrkr.config_cf()
#=======================================================================
# validate by vtag
#=======================================================================
for k, modObj in {
'risk1':Risk1,
'dmg2':Dmg2,
}.items():
if not k == vtag: continue
#do the check
errors = wrkr.cf_check(modObj)
if not len(errors)==0:
raise Error('\'%s\' got some errors \n %s'%(vtag, errors))
wrkr.cf_mark() #update the controlf ile
log.debug('finished')
#===========================================================================
# TOOLS.MODEL------------
#===========================================================================
def risk1(self,
pars_d=None,
logger=None,
rkwargs = None, #flow control keys for this run
): #run risk1
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('risk1')
if pars_d is None: pars_d = self.pars_d
#=======================================================================
# setup
#=======================================================================
wrkr = self._get_wrkr(Risk1)
#get control keys for this tool
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
wrkr.setup_fromData(self.data_d) #setup w/ the pre-loaded data
#=======================================================================
# execute
#=======================================================================
res_ttl, res_df = wrkr.run(**rkwargs)
#=======================================================================
# plots
#=======================================================================
if self.plot:
ttl_df = wrkr.set_ttl(tlRaw_df=res_ttl)
for y1lab in ['AEP', 'impacts']:
fig = wrkr.plot_riskCurve(ttl_df, y1lab=y1lab)
self.output_fig(fig)
#=======================================================================
# output
#=======================================================================
if self.write:
wrkr.output_ttl()
wrkr.output_etype()
if not res_df is None: wrkr.output_passet()
#=======================================================================
# wrap
#=======================================================================
res_d = dict()
res_d['r_ttl'] = res_ttl
res_d['eventypes'] = wrkr.eventType_df
if not res_df is None:
res_d['r_passet'] = res_df
""""
wrkr.exlikes
self.data_d.keys()
data_d['finv']
self.cf_fp
self.res_d.keys()
self.com_hndls
"""
self.data_d = {**self.data_d, **res_d}
return res_d
def dmg2(self,
pars_d=None,
logger=None,
rkwargs = None, #flow control keys for this run
#extra outputs
bdmg_smry=False,
dmgs_expnd =False,
): #run risk1
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('dmg2')
if pars_d is None: pars_d = self.pars_d
res_d = dict()
#=======================================================================
# setup
#=======================================================================
wrkr = self._get_wrkr(Dmg2)
#get control keys for this tool
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
wrkr.setup_fromData(self.data_d) #setup w/ the pre-loaded data
#=======================================================================
# execute
#=======================================================================
cres_df = wrkr.run(**rkwargs)
| |
device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
# boxes (offset by class), scores
boxes, scores = x[:, :4] + c, x[:, 4]
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = metrics_utils.box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float(
) / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
print(x, i, x.shape, i.shape)
pass
output[xi] = x[i]
if (time.time() - t) > time_limit:
break # time limit exceeded
return output
def rotation_matrix(yaw, pitch=0, roll=0):
tx = roll
ty = yaw
tz = pitch
Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Ry = np.array([[np.cos(ty), 0, np.sin(ty)], [0, 1, 0], [-np.sin(ty), 0, np.cos(ty)]])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])
return Ry.reshape([3, 3])
# option to rotate and shift (for label info)
def create_corners(dimension, location=None, R=None):
dx = dimension[2] / 2 # L
dy = dimension[0] / 2 # H
dz = dimension[1] / 2 # W
x_corners = []
y_corners = []
z_corners = []
for i in [1, -1]:
for j in [1, -1]:
for k in [1, -1]:
x_corners.append(dx * i)
y_corners.append(dy * j)
z_corners.append(dz * k)
corners = [x_corners, y_corners, z_corners]
# rotate if R is passed in
if R is not None:
corners = np.dot(R, corners)
# shift if location is passed in
if location is not None:
for i, loc in enumerate(location):
corners[i, :] = corners[i, :] + loc
final_corners = []
for i in range(8):
final_corners.append([corners[0][i], corners[1][i], corners[2][i]])
return final_corners
def create_birdview_corners(dimension, location=None, R=None):
dx = dimension[2] / 2 # L
dy = dimension[0] / 2 # H
dz = dimension[1] / 2 # W
x_corners = []
y_corners = []
z_corners = []
for i in [1, -1]:
for k in [1, -1]:
x_corners.append(dx * i)
y_corners.append(dy)
z_corners.append(dz * k)
corners = [x_corners, y_corners, z_corners]
# rotate if R is passed in
if R is not None:
corners = np.dot(R, corners)
# shift if location is passed in
if location is not None:
for i, loc in enumerate(location):
corners[i, :] = corners[i, :] + loc
final_corners = []
for i in range(4):
final_corners.append([corners[0][i], corners[1][i], corners[2][i]])
return final_corners
# takes in a 3d point and projects it into 2d
def project_3d_pt(pt, proj_matrix):
point = np.array(pt)
if proj_matrix.shape == (3, 4):
point = np.append(point, 1)
point = np.dot(proj_matrix, point)
# point = np.dot(np.dot(np.dot(cam_to_img, R0_rect), Tr_velo_to_cam), point)
point = point[:2] / (point[2] + 0.0001)
point = point.astype(np.int16)
return point
def calc_regressed_bbox_3d(alpha, theta_ray, dimension, bboxes, proj_matrix):
# global orientation
orient = alpha + theta_ray
R = rotation_matrix(orient)
# format 2d corners
xmin = bboxes[0]
ymin = bboxes[1]
xmax = bboxes[2]
ymax = bboxes[3]
# left top right bottom
box_corners = [xmin, ymin, xmax, ymax]
# get the point constraints
constraints = []
left_constraints = []
right_constraints = []
top_constraints = []
bottom_constraints = []
# using a different coord system
dx = dimension[2] / 2 # L
dy = dimension[0] / 2 # H
dz = dimension[1] / 2 # W
# below is very much based on trial and error
# based on the relative angle, a different configuration occurs
# negative is back of car, positive is front
left_mult = 1
right_mult = -1
# about straight on but opposite way
if alpha < np.deg2rad(92) and alpha > np.deg2rad(88):
left_mult = 1
right_mult = 1
# about straight on and same way
elif alpha < np.deg2rad(-88) and alpha > np.deg2rad(-92):
left_mult = -1
right_mult = -1
# this works but doesnt make much sense
elif alpha < np.deg2rad(90) and alpha > -np.deg2rad(90):
left_mult = -1
right_mult = 1
# if the car is facing the oppositeway, switch left and right
switch_mult = -1
if alpha > 0:
switch_mult = 1
# left and right could either be the front of the car ot the back of the car
# careful to use left and right based on image, no of actual car's left and right
for i in (-1, 1):
left_constraints.append([left_mult * dx, i * dy, -switch_mult * dz])
for i in (-1, 1):
right_constraints.append([right_mult * dx, i * dy, switch_mult * dz])
# top and bottom are easy, just the top and bottom of car
for i in (-1, 1):
for j in (-1, 1):
top_constraints.append([i * dx, -dy, j * dz])
for i in (-1, 1):
for j in (-1, 1):
bottom_constraints.append([i * dx, dy, j * dz])
# now, 64 combinations
for left in left_constraints:
for top in top_constraints:
for right in right_constraints:
for bottom in bottom_constraints:
constraints.append([left, top, right, bottom])
# filter out the ones with repeats
constraints = filter(lambda x: len(x) == len(set(tuple(i) for i in x)), constraints)
# create pre M (the term with I and the R*X)
pre_M = np.zeros([4, 4])
# 1's down diagonal
for i in range(0, 4):
pre_M[i][i] = 1
best_loc = None
best_error = [1e09]
best_X = None
# loop through each possible constraint, hold on to the best guess
# constraint will be 64 sets of 4 corners
count = 0
for constraint in constraints:
# each corner
Xa = constraint[0]
Xb = constraint[1]
Xc = constraint[2]
Xd = constraint[3]
X_array = [Xa, Xb, Xc, Xd]
# M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y, z]
Ma = np.copy(pre_M)
Mb = np.copy(pre_M)
Mc = np.copy(pre_M)
Md = np.copy(pre_M)
M_array = [Ma, Mb, Mc, Md]
# create A, b
A = np.zeros([4, 3], dtype=np.float)
b = np.zeros([4, 1])
indicies = [0, 1, 0, 1]
for row, index in enumerate(indicies):
X = X_array[row]
M = M_array[row]
# create M for corner Xx
RX = np.dot(R, X)
M[:3, 3] = RX.reshape(3)
M = np.dot(proj_matrix, M)
A[row, :] = M[index, :3] - box_corners[row] * M[2, :3]
b[row] = box_corners[row] * M[2, 3] - M[index, 3]
# solve here with least squares, since over fit will get some error
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# found a better estimation
if error < best_error:
count += 1 # for debugging
best_loc = loc
best_error = error
best_X = X_array
# return best_loc, [left_constraints, right_constraints] # for debugging
best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
return best_loc, best_X
def batched_3d_nms(locations, dimensions, scores, rotys, batch_ids, iou_threshold=0.25):
"""
Select best objects by the position constraint of 3d object
"""
loc_earths = locations[:, 0::2]
keeps = []
for id in torch.unique(batch_ids).cpu().tolist():
index = (batch_ids == id).nonzero(as_tuple=False).view(-1)
keep = torch.ones_like(index)
mask = 1 - torch.eye(len(index.cpu().tolist())).to(loc_earths.device)
mask = mask.bool()
loc = loc_earths[index]
dim = dimensions[index] # h, w, l
score = scores[index]
score = score.unsqueeze(-1) - score.unsqueeze(0)
roty = rotys[index]
det_loc = loc.view(-1, 1, 2) - loc.unsqueeze(dim=0)
det_loc = det_loc.pow_(2.)
det_loc = torch.sqrt_(det_loc[:, :, 0] + det_loc[:, :, 1])
# det_roty = roty.view(-1, 1, 1) - roty.unsqueeze(dim=0)
# r_idx = det_roty > np.pi
# det_roty[r_idx] = det_roty[r_idx] - np.pi
# r_idx = det_roty < -np.pi
# det_roty[r_idx] = det_roty[r_idx] + np.pi
dim1 = dim.view(-1, 1, 3)
dim2 = dim.unsqueeze(dim=0)
dim_cond1 = (dim1[:, :, 1] + dim2[:, :, 1])/2.
dim_cond2 = (dim1[:, :, 1] + dim2[:, :, 2])/2.
dim_cond3 = (dim1[:, :, 2] + dim2[:, :, 1])/2.
dim_cond4 = (dim1[:, :, 2] + dim2[:, :, 2])/2.
dim_cond = (dim_cond1 + dim_cond2 + dim_cond3 | |
an issue for the forked adjutant
# horizon
data = {'email': "<EMAIL>", 'username': 'test_user'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json()['notes'],
['If user with email exists, reset token will be issued.'])
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
'Password Reset for OpenStack')
self.assertEqual(mail.outbox[0].to[0], '<EMAIL>')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(user.password, '<PASSWORD>')
@override_settings(USERNAME_IS_EMAIL=False)
def test_new_project_username_not_email(self):
setup_identity_cache()
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>",
'username': 'test'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = {'email': "<EMAIL>", 'username': "new",
'project_name': 'new_project'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['task created']})
new_task = Task.objects.all()[0]
url = "/v1/tasks/" + new_task.uuid
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "admin",
'username': "test",
'user_id': "test_user_id",
'email': "<EMAIL>",
'authenticated': True
}
response = self.client.post(url, {'approved': True}, format='json',
headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True, 'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@modify_dict_settings(
TASK_SETTINGS=[
{'key_list': ['invite_user', 'additional_actions'],
'operation': 'append',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['invite_user', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value': {
'subject': 'email_update_additional',
'template': 'email_update_started.txt',
'email_roles': ['project_admin'],
'email_current_user': False,
}
}
])
def test_additional_emails_roles(self):
"""
Tests the sending of additional emails to a set of roles in a project
"""
# NOTE(amelia): sending this email here is probably not the intended
# case. It would be more useful in utils such as a quota update or a
# child project being created that all the project admins should be
# notified of
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
user2 = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>",
email="<EMAIL>")
user3 = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>",
email="<EMAIL>")
assignments = [
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="_member_",
user={'id': user.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="project_admin",
user={'id': user.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="_member_",
user={'id': user2.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="project_admin",
user={'id': user2.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="_member_",
user={'id': user3.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="project_mod",
user={'id': user3.id}
),
]
setup_identity_cache(
projects=[project], users=[user, user2, user3],
role_assignments=assignments)
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
data = {'email': "<EMAIL>",
'roles': ['_member_'], 'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(len(mail.outbox[0].to), 2)
self.assertEqual(set(mail.outbox[0].to),
set([user.email, user2.email]))
self.assertEqual(mail.outbox[0].subject, 'email_update_additional')
# Test that the token email gets sent to the other addresses
self.assertEqual(mail.outbox[1].to[0], '<EMAIL>')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True, 'password': '1234'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@modify_dict_settings(
TASK_SETTINGS=[
{'key_list': ['invite_user', 'additional_actions'],
'operation': 'append',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['invite_user', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value': {
'subject': 'email_update_additional',
'template': 'email_update_started.txt',
'email_roles': ['project_admin'],
'email_current_user': False,
}
}
])
def test_additional_emails_role_no_email(self):
"""
Tests that setting email roles to something that has no people to
send to that the update action doesn't fall over
"""
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
assignment = fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="_member_",
user={'id': user.id}
)
setup_identity_cache(
projects=[project], users=[user], role_assignments=[assignment])
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
data = {'email': "<EMAIL>",
'roles': ['_member_']}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 1)
# Test that the token email gets sent to the other addresses
self.assertEqual(mail.outbox[0].to[0], '<EMAIL>')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'confirm': True, 'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@modify_dict_settings(
TASK_SETTINGS=[
{'key_list': ['invite_user', 'additional_actions'],
'operation': 'override',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['invite_user', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value':{
'subject': 'invite_user_additional',
'template': 'email_update_started.txt',
'email_additional_addresses': ['<EMAIL>'],
'email_current_user': False,
}
}
])
def test_email_additional_addresses(self):
"""
Tests the sending of additional emails an admin email set in
the conf
"""
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(
name="<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
assignments = [
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="_member_",
user={'id': user.id}
),
fake_clients.FakeRoleAssignment(
scope={'project': {'id': project.id}},
role_name="project_admin",
user={'id': user.id}
),
]
setup_identity_cache(
projects=[project], users=[user], role_assignments=assignments)
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
data = {'email': "<EMAIL>", 'roles': ['_member_']}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(set(mail.outbox[0].to),
set(['<EMAIL>']))
self.assertEqual(mail.outbox[0].subject, 'invite_user_additional')
# Test that the token email gets sent to the other addresses
self.assertEqual(mail.outbox[1].to[0], '<EMAIL>')
new_token = Token.objects.all()[0]
url = "/v1/tokens/" + new_token.token
data = {'password': '<PASSWORD>'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@modify_dict_settings(
TASK_SETTINGS=[
{'key_list': ['invite_user', 'additional_actions'],
'operation': 'override',
'value': ['SendAdditionalEmailAction']},
{'key_list': ['invite_user', 'action_settings',
'SendAdditionalEmailAction', 'initial'],
'operation': 'update',
'value':{
'subject': 'invite_user_additional',
'template': 'email_update_started.txt',
'email_additional_addresses': ['<EMAIL>'],
'email_current_user': False,
}
}
])
def test_email_additional_action_invalid(self):
"""
The additional email actions should not send an email if the
action is invalid.
"""
setup_identity_cache()
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': "test_project_id",
'roles': "project_admin,_member_,project_mod",
'username': "<EMAIL>",
'user_id': "test_user_id",
'authenticated': True
}
data = {'email': "<EMAIL>", 'roles': ["_member_"],
'project_id': 'test_project_id'}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {'errors': ['actions invalid']})
self.assertEqual(len(mail.outbox), 0)
@mock.patch('adjutant.common.tests.fake_clients.FakeManager.find_project')
def test_all_actions_setup(self, mocked_find):
"""
Ensures that all actions have been setup before pre_approve is
run on any actions, even if we have a pre_approve failure.
Deals with: bug/1745053
"""
setup_identity_cache()
mocked_find.side_effect = KeyError()
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(
response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
new_task = Task.objects.all()[0]
class_conf = settings.TASK_SETTINGS.get(
CreateProject.task_type, settings.DEFAULT_TASK_SETTINGS)
expected_action_names = (
class_conf.get('default_actions', [])
or CreateProject.default_actions[:])
expected_action_names += class_conf.get('additional_actions', [])
actions = new_task.actions
observed_action_names = [a.action_name for a in actions]
self.assertEqual(observed_action_names, expected_action_names)
@mock.patch('adjutant.common.tests.fake_clients.FakeManager.find_project')
def test_task_error_handler(self, mocked_find):
"""
Ensure the _handle_task_error function works as expected.
"""
setup_identity_cache()
mocked_find.side_effect = KeyError("Forced key error.")
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(
response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(
response.json(),
{'errors': ["Error: Something went wrong on the server. "
"It will be looked into shortly."]})
new_task = Task.objects.all()[0]
new_notification = Notification.objects.all()[0]
self.assertTrue(new_notification.error)
self.assertEqual(
new_notification.notes,
{'errors': [
"Error: KeyError('Forced key error.') while setting up "
"task. See task itself for details."]})
self.assertEqual(new_notification.task, new_task)
@override_settings(KEYSTONE={'can_edit_users': False})
def test_user_invite_cant_edit_users(self):
"""
When can_edit_users is false, and a new user is invited,
the task should be marked as invalid if the user doesn't
already exist.
"""
project = fake_clients.FakeProject(name="test_project")
setup_identity_cache(projects=[project])
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "user",
'user_id': "test_user_id",
'authenticated': True
}
data = {'username': 'new_user', 'email': "<EMAIL>",
'roles': ["_member_"], 'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.json(), {'errors': ['actions invalid']})
@override_settings(KEYSTONE={'can_edit_users': False})
def test_user_invite_cant_edit_users_existing_user(self):
"""
When can_edit_users is false, and a new user is invited,
the task should be marked as valid if the user exists.
"""
project = fake_clients.FakeProject(name="test_project")
user = fake_clients.FakeUser(name="<EMAIL>")
setup_identity_cache(projects=[project], users=[user])
url = "/v1/actions/InviteUser"
headers = {
'project_name': "test_project",
'project_id': project.id,
'roles': "project_admin,_member_,project_mod",
'username': "user",
'user_id': "test_user_id",
'authenticated': True
}
data = {'username': 'new_user', 'email': "<EMAIL>",
'roles': ["_member_"], 'project_id': project.id}
response = self.client.post(url, data, format='json', headers=headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['created token']})
@override_settings(KEYSTONE={'can_edit_users': False})
def test_project_create_cant_edit_users(self):
"""
When can_edit_users is false, and a new signup comes in,
the task should be marked as invalid if it needs to
create a new user.
Will return OK (as task doesn't auto_approve), but task will
actually be invalid.
"""
setup_identity_cache()
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['task created']})
task = Task.objects.all()[0]
action_models = task.actions
actions = [act.get_action() for act in action_models]
self.assertFalse(all([act.valid for act in actions]))
@override_settings(KEYSTONE={'can_edit_users': False})
def test_project_create_cant_edit_users_existing_user(self):
"""
When can_edit_users is false, and a new signup comes in,
the task should be marked as valid if the user already
exists.
Will return OK (as task doesn't auto_approve), but task will
actually be valid.
"""
user = fake_clients.FakeUser(name="<EMAIL>")
setup_identity_cache(users=[user])
url = "/v1/actions/CreateProject"
data = {'project_name': "test_project", 'email': "<EMAIL>"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {'notes': ['task created']})
task = Task.objects.all()[0]
action_models = task.actions
actions = [act.get_action() for act in action_models]
self.assertTrue(all([act.valid for act | |
<filename>pose_classification_kit/src/dataset_controller.py<gh_stars>10-100
import os
import numpy as np
from datetime import date
import numpy as np
from pathlib import Path
import json
from .imports.qt import QtWidgets, QtCore, QtGui, pyqtSignal, pyqtSlot
from .imports.openpose import OPENPOSE_LOADED
if OPENPOSE_LOADED:
from .imports.openpose import op
from ..config import DATASETS_PATH
from ..datasets.body_models import BODY25
class ScrollLabel(QtWidgets.QScrollArea):
def __init__(self):
super().__init__()
self.setWidgetResizable(True)
content = QtWidgets.QWidget(self)
self.setWidget(content)
lay = QtWidgets.QVBoxLayout(content)
self.label = QtWidgets.QLabel(content)
self.label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
lay.addWidget(self.label)
def setText(self, text):
self.label.setText(text)
class DatasetControllerWidget(QtWidgets.QWidget):
realTimeHandDraw_Signal = pyqtSignal(bool)
stylesheet = """
#Dataset_Controller {
background-color: white;
border-radius: 3px;
font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;
}
#Dataset_Controller:disabled {
background-color: #e8e8e8;
}
QPushButton {
border: 1px solid #cbcbcb;
border-radius: 2px;
font-size: 16px;
background: white;
}
QPushButton:hover {
border-color: rgb(139, 173, 228);
}
QPushButton:pressed {
color: #cbcbcb;
}
QPushButton:disabled {
background: #e8e8e8;
}
QToolButton {
border: 1px solid #cbcbcb;
border-radius: 2px;
font-size: 16px;
background: white;
}
QToolButton:hover {
border-color: rgb(139, 173, 228);
}
QToolButton:disabled {
background: #e8e8e8;
}
#Record_Button {
border: 1px solid #cbcbcb;
border-radius: 2px;
font-size: 16px;
background: #ffb3b3;
}
#Record_Button:checked {
background: #b3ffb3;
}
#Record_Button:disabled {
background: #e8e8e8;
}
#Record_Button:hover {
border-color: rgb(139, 173, 228);
}
QComboBox {
border: 1px solid #cbcbcb;
border-radius: 2px;
font-size: 16px;
background: white;
}
QComboBox:disabled {
background: #e8e8e8;
}
QLabel {
font-size: 16px;
font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;
}
QLineEdit {
font-size: 16px;
font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;
}
QCheckBox {
font-size: 16px;
font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;
}
QCheckBox:disabled {
background: #e8e8e8;
}
"""
def __init__(self, parent):
super().__init__(parent=parent)
self.parent = parent
self.currentFilePath = ""
self.currentFileInfos = ""
self.poseName = ""
self.focusID = 1
self.sizeData = 0
self.tresholdValue = 0.0
self.datasetList = []
self.accuracyList = []
self.currentDataIndex = 0
self.datasetSaved = True
## Widget style
self.setObjectName("Dataset_Controller")
self.setEnabled(False)
self.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self.setStyleSheet(self.stylesheet)
effect = QtWidgets.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(10)
effect.setOffset(0, 0)
effect.setColor(QtCore.Qt.gray)
self.setGraphicsEffect(effect)
## Widgets initialisation
self.layout = QtWidgets.QGridLayout(self)
self.setLayout(self.layout)
self.fileLabel = ScrollLabel()
self.fileLabel.setText("No file selected")
self.fileLabel.setMinimumHeight(90)
self.fileLabel.setMaximumHeight(90)
self.fileLabel.setMinimumWidth(180)
self.layout.addWidget(self.fileLabel, 0, 0, 1, 9, QtCore.Qt.AlignTop)
self.visuCheckbox = QtWidgets.QCheckBox("Visualize dataset")
self.layout.addWidget(self.visuCheckbox, 1, 0)
self.visuCheckbox.toggled.connect(self.visuCheckboxToggled)
self.visuCheckbox.setEnabled(False)
self.minusButton = QtWidgets.QToolButton(
cursor=QtCore.Qt.PointingHandCursor, toolTip="Previous sample in dataset"
)
self.minusButton.setArrowType(QtCore.Qt.LeftArrow)
self.layout.addWidget(self.minusButton, 1, 1, 1, 1)
self.minusButton.setEnabled(False)
self.minusButton.clicked.connect(
lambda: self.setCurrentDataIndex(self.currentDataIndex - 1)
)
QtWidgets.QShortcut(
QtGui.QKeySequence("left"),
self,
lambda: self.setCurrentDataIndex(self.currentDataIndex - 1),
)
self.currentIndexLine = QtWidgets.QLineEdit(str(self.currentDataIndex))
self.currentIndexLine.setValidator(QtGui.QDoubleValidator())
self.currentIndexLine.setMaximumWidth(40)
self.currentIndexLine.setEnabled(False)
self.layout.addWidget(self.currentIndexLine, 1, 2, 1, 1)
self.currentIndexLine.textChanged.connect(self.userIndexInput)
self.maxIndexLabel = QtWidgets.QLabel(r"/0")
self.maxIndexLabel.setEnabled(False)
self.layout.addWidget(self.maxIndexLabel, 1, 3, 1, 1)
self.plusButton = QtWidgets.QToolButton(
cursor=QtCore.Qt.PointingHandCursor, toolTip="Next sample in dataset"
)
self.plusButton.setArrowType(QtCore.Qt.RightArrow)
self.layout.addWidget(self.plusButton, 1, 4, 1, 1)
self.plusButton.setEnabled(False)
self.plusButton.clicked.connect(
lambda: self.setCurrentDataIndex(self.currentDataIndex + 1)
)
QtWidgets.QShortcut(
QtGui.QKeySequence("right"),
self,
lambda: self.setCurrentDataIndex(self.currentDataIndex + 1),
)
self.deleteButton = QtWidgets.QPushButton(
"Delete sample",
cursor=QtCore.Qt.PointingHandCursor,
toolTip="Remove sample from the dataset",
)
self.deleteButton.setEnabled(False)
self.layout.addWidget(self.deleteButton, 1, 5, 1, 1)
self.deleteButton.clicked.connect(
lambda: self.removeEntryDataset(self.currentDataIndex)
)
self.recordButton = QtWidgets.QPushButton(
"Record samples",
cursor=QtCore.Qt.PointingHandCursor,
toolTip="Start and stop sample recording",
)
self.recordButton.setObjectName("Record_Button")
self.recordButton.setCheckable(True)
self.recordButton.setChecked(False)
self.recordButton.setEnabled(False)
self.recordButton.clicked.connect(self.startRecording)
self.layout.addWidget(self.recordButton, 1, 7, 1, 1)
horSpacer = QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.layout.addItem(horSpacer, 1, 6)
verSpacer = QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.layout.addItem(verSpacer, 2, 0)
def createDataset(self):
dlg = CreateDatasetDialog(self)
if dlg.exec_():
self.clearDataset()
self.updateFileInfo(
dlg.getFilePath(),
dlg.getFileInfos(),
0,
dlg.getPoseName(),
dlg.getFocusID(),
dlg.getTresholdValue(),
)
self.setCurrentDataIndex(0)
def addEntryDataset(self, keypoints, accuracy: float):
"""Add keypoints and accuracy of a hand pose to the local dataset.
Args:
keypoints (np.ndarray((3,21),float)): Coordinates x, y and the accuracy score for each 21 key points.
accuracy (float): Global accuracy of detection of the pose.
"""
self.datasetList.append(keypoints)
self.accuracyList.append(accuracy)
self.maxIndexLabel.setText("/" + str(len(self.datasetList)))
self.datasetSaved = False
def removeEntryDataset(self, index: int):
"""Remove keypoints and accuracy referenced by its index from the local dataset.
Args:
index (int): Index in list of the entry removed.
"""
self.datasetList = self.datasetList[:index] + self.datasetList[index + 1 :]
self.accuracyList = self.accuracyList[:index] + self.accuracyList[index + 1 :]
maxIndex = len(self.accuracyList)
self.maxIndexLabel.setText("/" + str(maxIndex))
index = min(index, maxIndex - 1)
self.setCurrentDataIndex(index)
self.datasetSaved = False
def clearDataset(self):
self.datasetList = []
self.accuracyList = []
self.datasetSaved = True
def userIndexInput(self, indexStr: str):
if indexStr.isdigit():
self.setCurrentDataIndex(int(indexStr) - 1)
elif len(indexStr) == 0:
pass
else:
self.currentIndexLine.setText(str(self.currentDataIndex + 1))
def visuCheckboxToggled(self, state: bool):
self.realTimeHandDraw_Signal.emit(not state)
self.plusButton.setEnabled(state)
self.minusButton.setEnabled(state)
self.currentIndexLine.setEnabled(state)
self.maxIndexLabel.setEnabled(state)
self.deleteButton.setEnabled(state)
self.setCurrentDataIndex(0)
def loadFileJSON(self):
options = QtWidgets.QFileDialog.Options()
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
"Open dataset",
str(DATASETS_PATH),
"Text Files (*.json)",
options=options,
)
if fileName:
self.clearDataset()
with open(fileName) as f:
data = json.load(f)
for entry in data["data"]:
self.addEntryDataset(
np.array([entry["x"], entry["y"], entry["a"]]),
float(entry["detection_accuracy"]),
)
self.updateFileInfo(
filePath=fileName,
fileInfo={"info": data["info"], "data": []},
sizeData=data["info"]["nbr_entries"],
poseName=data["info"]["label"],
focusID=data["info"]["focus_id"],
tresholdValue=data["info"]["threshold_value"],
)
self.recordButton.setEnabled(True)
self.setEnabled(True)
self.visuCheckbox.setChecked(True)
self.datasetSaved = True
return True
return False
def updateFileInfo(
self,
filePath: str = None,
fileInfo: str = None,
sizeData: int = None,
poseName: str = None,
focusID: int = None,
tresholdValue: int = None,
):
self.visuCheckbox.setEnabled(True)
if filePath:
self.currentFilePath = filePath
if fileInfo:
self.currentFileInfos = fileInfo
if sizeData:
self.sizeData = sizeData
self.maxIndexLabel.setText("/" + str(self.sizeData))
if poseName:
self.poseName = poseName
if focusID != None:
self.focusID = focusID
if tresholdValue != None:
self.tresholdValue = tresholdValue
self.fileLabel.setText(
str(self.currentFilePath)
+ "\n -> {} entries for {} ({} hand) with a minimum accuracy of {}.".format(
str(self.sizeData),
self.poseName,
["left_hand", "right_hand", "body"][self.focusID],
str(self.tresholdValue),
)
)
# self.maxIndexLabel.setText("/" + str(self.sizeData))
self.recordButton.setEnabled(True)
self.setEnabled(True)
def setCurrentDataIndex(self, index: int):
if len(self.datasetList) == 0:
self.currentDataIndex = 0
self.parent.handClassifier.leftHandAnalysis.drawHand(None, 0.0)
self.parent.handClassifier.rightHandAnalysis.drawHand(None, 0.0)
else:
if index >= len(self.datasetList):
index = 0
if index < 0:
index = len(self.datasetList) - 1
self.currentDataIndex = index
if self.focusID == 0:
self.parent.handClassifier.leftHandAnalysis.drawHand(
np.array(self.datasetList[self.currentDataIndex]),
self.accuracyList[self.currentDataIndex],
)
elif self.focusID == 1:
self.parent.handClassifier.rightHandAnalysis.drawHand(
np.array(self.datasetList[self.currentDataIndex]),
self.accuracyList[self.currentDataIndex],
)
elif self.focusID == 2:
self.parent.bodyClassifier.bodyAnalysis.drawBody(
np.array(self.datasetList[self.currentDataIndex]),
self.accuracyList[self.currentDataIndex],
)
self.currentIndexLine.setText(str(self.currentDataIndex + 1))
def writeDataToJSON(self):
""" Save the current dataset to the JSON file (URL: self.currentFilePath)."""
if os.path.isfile(self.currentFilePath):
fileData = self.currentFileInfos
fileData["info"]["nbr_entries"] = len(self.datasetList)
fileData["data"] = []
self.updateFileInfo(sizeData=len(self.datasetList))
print(len(self.datasetList))
for accuracy, data in zip(self.accuracyList, self.datasetList):
fileData["data"].append(
{
"detection_accuracy": float(accuracy),
"x": data[0].tolist(),
"y": data[1].tolist(),
"a": data[2].tolist(),
}
)
with open(self.currentFilePath, "w") as outfile:
json.dump(fileData, outfile, indent=4)
self.datasetSaved = True
def startRecording(self, state: bool):
self.parent.isRecording = state
def getTresholdValue(self) -> float:
return self.tresholdValue
def getFocusID(self) -> int:
return self.focusID
def getPoseName(self) -> str:
return self.poseName
def isSaved(self) -> bool:
return self.datasetSaved
class CreateDatasetDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(CreateDatasetDialog, self).__init__(parent=parent)
self.setWindowTitle("Create new dataset")
self.setWindowFlag(QtCore.Qt.WindowContextHelpButtonHint, False)
self.currentFolder = DATASETS_PATH
self.currentFilePath = None
self.currentPoseName = "Default"
self.currentTresholdValue = 0.0
## Widgets initialisation
self.folderLabel = ScrollLabel()
self.folderLabel.setText(str(self.currentFolder))
self.folderLabel.setMaximumHeight(35)
self.folderLabel.setMinimumWidth(200)
self.folderButton = QtWidgets.QPushButton("Change root folder")
self.folderButton.clicked.connect(self.changeSavingFolder)
self.handSelection = FocusSelectionWidget(self)
self.poseNameLine = QtWidgets.QLineEdit(self.currentPoseName)
self.poseNameLine.textChanged.connect(self.changePoseName)
self.tresholdValueLine = QtWidgets.QLineEdit(str(self.currentTresholdValue))
onlyDouble = QtGui.QDoubleValidator()
self.tresholdValueLine.setValidator(onlyDouble)
self.tresholdValueLine.textChanged.connect(self.changeTresholdValue)
self.createButton = QtWidgets.QPushButton("Create dataset")
self.createButton.clicked.connect(self.createDataset)
## Structure
self.layout = QtWidgets.QGridLayout(self)
self.setLayout(self.layout)
self.layout.addWidget(self.folderLabel, 0, 0, 1, 5, QtCore.Qt.AlignTop)
self.layout.addWidget(self.folderButton, 0, 5, 1, 1, QtCore.Qt.AlignTop)
self.layout.addWidget(self.handSelection, 1, 0, 1, 1)
self.layout.addWidget(self.poseNameLine, 1, 2, 1, 1)
self.layout.addWidget(QtWidgets.QLabel("Label:"), 1, 1, 1, 1)
self.layout.addWidget(QtWidgets.QLabel("Accuracy threshold:"), 1, 3, 1, 1)
self.layout.addWidget(self.tresholdValueLine, 1, 4, 1, 1)
self.layout.addWidget(self.createButton, 1, 5, 1, 1)
self.layout.setRowStretch(0, 0)
self.layout.setRowStretch(1, 0)
self.layout.setRowStretch(2, 1)
def createDataset(self):
self.isRecording = True
path = self.getSavingFolder()
focusID = self.handSelection.getCurrentFocusID()
fileName = (
self.getPoseName()
+ "_"
+ ["left_hand", "right_hand", "body"][focusID]
+ ".json"
)
path /= fileName
if path.is_file():
self.isRecording = False
self.createButton.setEnabled(False)
self.createButton.setText("Dataset allready created")
else:
self.createButton.setEnabled(True)
self.createButton.setText("Create dataset")
with open(path, "w+") as outfile:
json.dump(self.getFileInfos(), outfile, indent=4, ensure_ascii=False)
self.accept()
self.currentFilePath = path
def getFileHeadlines(self):
folder = self.getPoseName()
tresholdValue = self.getTresholdValue()
handID = self.handSelection.getCurrentFocusID()
output = ""
output += folder + "," + str(handID) + "," + str(tresholdValue) + "\n"
output += "## Data generated the " + str(date.today()) + " labelled " + folder
output += " (" + ("right hand" if handID == 1 else "left hand")
output += ") with a global accuracy higher than " + str(tresholdValue)
output += ", based on OpenPose estimation.\n"
output += "## Data format: Coordinates x, y and accuracy of estimation a\n\n"
return output
def getFileInfos(self):
info = {
"info": {
"label": self.getPoseName(),
"focus": ["left_hand", "right_hand", "body"][
self.handSelection.getCurrentFocusID()
],
"nbr_entries": 0,
"threshold_value": self.getTresholdValue(),
"focus_id": self.handSelection.getCurrentFocusID(),
},
"data": [],
}
if self.handSelection.getCurrentFocusID() == 2 and OPENPOSE_LOADED:
info["info"]["BODY25_Mapping"] = BODY25.mapping
info["info"]["BODY25_Pairs"] = BODY25.pairs
return info
@pyqtSlot()
def changeSavingFolder(self):
path_str = str(
QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory")
)
if len(path_str) > 0:
self.folderLabel.setText(path_str)
self.currentFolder = Path(path_str)
@pyqtSlot(str)
def changePoseName(self, name: str):
if not self.createButton.isEnabled():
self.createButton.setEnabled(True)
self.createButton.setText("Create dataset")
self.currentPoseName = name
@pyqtSlot(str)
def changeTresholdValue(self, value: str):
try:
self.currentTresholdValue = float(value.replace(",", "."))
except:
self.currentTresholdValue = 0.0
def getSavingFolder(self):
return self.currentFolder
def getPoseName(self) -> str:
return self.currentPoseName
def | |
type so the value written to the database is only
# down to the second.
value = value.replace(microsecond=0)
self.cursor.execute("create table t1(t time)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select t from t1").fetchone()[0]
self.assertEqual(type(result), time)
self.assertEqual(value, result)
def test_datetime2(self):
value = datetime(2007, 1, 15, 3, 4, 5)
self.cursor.execute("create table t1(dt datetime2)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select dt from t1").fetchone()[0]
self.assertEqual(type(result), datetime)
self.assertEqual(value, result)
#
# ints and floats
#
def test_int(self):
value = 1234
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(result, value)
def test_negative_int(self):
value = -1
self.cursor.execute("create table t1(n int)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(result, value)
def test_bigint(self):
input = 3000000000
self.cursor.execute("create table t1(d bigint)")
self.cursor.execute("insert into t1 values (?)", input)
result = self.cursor.execute("select d from t1").fetchone()[0]
self.assertEqual(result, input)
def test_overflow_int(self):
# python allows integers of any size, bigger than an 8 byte int can contain
input = 9999999999999999999999999999999999999
self.cursor.execute("create table t1(d bigint)")
self.cnxn.commit()
self.assertRaises(OverflowError, self.cursor.execute, "insert into t1 values (?)", input)
result = self.cursor.execute("select * from t1").fetchall()
self.assertEqual(result, [])
def test_float(self):
value = 1234.567
self.cursor.execute("create table t1(n float)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(result, value)
def test_denorm_float(self):
value = 0.00012345
self.cursor.execute("create table t1(n float)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(result, value)
def test_negative_float(self):
value = -200
self.cursor.execute("create table t1(n float)")
self.cursor.execute("insert into t1 values (?)", value)
result = self.cursor.execute("select n from t1").fetchone()[0]
self.assertEqual(value, result)
def test_non_numeric_float(self):
self.cursor.execute("create table t1(d float)")
self.cnxn.commit()
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
self.assertRaises(pyodbc.ProgrammingError, self.cursor.execute, "insert into t1 values (?)", input)
result = self.cursor.execute("select * from t1").fetchall()
self.assertEqual(result, [])
#
# stored procedures
#
# def test_callproc(self):
# "callproc with a simple input-only stored procedure"
# pass
def test_sp_results(self):
self.cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
""")
rows = self.cursor.execute("exec proc1").fetchall()
self.assertEqual(type(rows), list)
self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects
self.assertEqual(type(rows[0].refdate), datetime)
def test_sp_results_from_temp(self):
# Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable.
# If you don't do this, you'd need to call nextset() once to skip it.
self.cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
""")
self.cursor.execute("exec proc1")
self.assertTrue(self.cursor.description is not None)
self.assertTrue(len(self.cursor.description) == 4)
rows = self.cursor.fetchall()
self.assertEqual(type(rows), list)
self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects
self.assertEqual(type(rows[0].refdate), datetime)
def test_sp_results_from_vartbl(self):
self.cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
""")
self.cursor.execute("exec proc1")
rows = self.cursor.fetchall()
self.assertEqual(type(rows), list)
self.assertEqual(len(rows), 10) # there has to be at least 10 items in sysobjects
self.assertEqual(type(rows[0].refdate), datetime)
def test_sp_with_dates(self):
# Reported in the forums that passing two datetimes to a stored procedure doesn't work.
self.cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
self.cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
""")
self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now())
rows = self.cursor.fetchall()
self.assertTrue(rows is not None)
self.assertTrue(rows[0][0] == 0) # 0 years apart
def test_sp_with_none(self):
# Reported in the forums that passing None caused an error.
self.cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
self.cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
""")
self.cursor.execute("exec test_sp ?", None)
rows = self.cursor.fetchall()
self.assertTrue(rows is not None)
self.assertTrue(rows[0][0] == None) # 0 years apart
#
# rowcount
#
def test_rowcount_delete(self):
self.assertEqual(self.cursor.rowcount, -1)
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.cursor.execute("delete from t1")
self.assertEqual(self.cursor.rowcount, count)
def test_rowcount_nodata(self):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over
the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a
zero return value.
"""
self.cursor.execute("create table t1(i int)")
# This is a different code path internally.
self.cursor.execute("delete from t1")
self.assertEqual(self.cursor.rowcount, 0)
def test_rowcount_select(self):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a
select statement, so we'll test for that behavior. This is valid behavior according to the DB API
specification, but people don't seem to like it.
"""
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.cursor.execute("select * from t1")
self.assertEqual(self.cursor.rowcount, -1)
rows = self.cursor.fetchall()
self.assertEqual(len(rows), count)
self.assertEqual(self.cursor.rowcount, -1)
def test_rowcount_reset(self):
"Ensure rowcount is reset after DDL"
ddl_rowcount = 0 if self.driver_type_is('freetds') else -1
self.cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
self.cursor.execute("insert into t1 values (?)", i)
self.assertEqual(self.cursor.rowcount, 1)
self.cursor.execute("create table t2(i int)")
self.assertEqual(self.cursor.rowcount, ddl_rowcount)
#
# always return Cursor
#
# In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very
# confusing when things went wrong and added very little value even when things went right since users could always
# use: cursor.execute("...").rowcount
def test_retcursor_delete(self):
self.cursor.execute("create table t1(i int)")
self.cursor.execute("insert into t1 values (1)")
v = self.cursor.execute("delete from t1")
self.assertEqual(v, self.cursor)
def test_retcursor_nodata(self):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over
the code that errors out and drop down to the same SQLRowCount code.
"""
self.cursor.execute("create table t1(i int)")
# This is a different code path internally.
v = self.cursor.execute("delete from t1")
self.assertEqual(v, self.cursor)
def test_retcursor_select(self):
self.cursor.execute("create table t1(i int)")
self.cursor.execute("insert into t1 values (1)")
v = self.cursor.execute("select * from t1")
self.assertEqual(v, self.cursor)
#
# misc
#
def table_with_spaces(self):
"Ensure we can select using [x z] syntax"
try:
self.cursor.execute("create table [test one](int n)")
self.cursor.execute("insert into [test one] values(1)")
self.cursor.execute("select * from [test one]")
v = self.cursor.fetchone()[0]
self.assertEqual(v, 1)
finally:
self.cnxn.rollback()
def test_lower_case(self):
"Ensure pyodbc.lowercase forces returned column names to lowercase."
# Has to be set before creating the cursor, so we must recreate self.cursor.
pyodbc.lowercase = True
self.cursor = self.cnxn.cursor()
self.cursor.execute("create table t1(Abc int, dEf int)")
self.cursor.execute("select * from t1")
names = [ t[0] for t in self.cursor.description ]
names.sort()
self.assertEqual(names, [ "abc", "def" ])
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(self):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
self.cursor = self.cnxn.cursor()
self.cursor.execute("create table t1(a int, b char(3))")
self.cnxn.commit()
self.cursor.execute("insert into t1 values(1, 'abc')")
row = self.cursor.execute("select * from t1").fetchone()
self.assertEqual(self.cursor.description, row.cursor_description)
def test_temp_select(self):
# A project was failing to create temporary tables via select into.
self.cursor.execute("create table t1(s char(7))")
self.cursor.execute("insert into t1 values(?)", "testing")
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), str)
self.assertEqual(v, "testing")
self.cursor.execute("select s into t2 from t1")
v = self.cursor.execute("select * from t1").fetchone()[0]
self.assertEqual(type(v), str)
self.assertEqual(v, "testing")
# Money
#
# The inputs are strings so we don't have to deal with floating point rounding.
for value in "-1234.56 -1 0 1 1234.56 123456789.21".split():
name = str(value).replace('.', '_').replace('-', 'neg_')
locals()['test_money_%s' % name] = _simpletest('money', Decimal(str(value)))
def test_executemany(self):
self.cursor.execute("create table t1(a int, b varchar(10))")
params = [ (i, str(i)) for i in range(1, 6) ]
self.cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = self.cursor.execute("select count(*) from t1").fetchone()[0]
self.assertEqual(count, len(params))
| |
<filename>ape_tabular.py<gh_stars>0
import copy
import re
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import roc_auc_score, f1_score
from sklearn.linear_model import LinearRegression, LogisticRegression
from yellowbrick.cluster import KElbowVisualizer
from anchors import utils, anchor_tabular, anchor_base, limes
from growingspheres import counterfactuals as cf
from growingspheres.utils.gs_utils import get_distances, generate_inside_ball, generate_categoric_inside_ball
import pyfolding as pf
import scipy
from ape_tabular_experiments import compute_all_explanation_method_precision, simulate_user_experiments, compute_local_surrogate_precision_coverage, ape_illustrative_results, simulate_user_experiments_lime_ls
class ApeTabularExplainer(object):
"""
Args:
"""
def __init__(self, train_data, class_names, black_box_predict, black_box_predict_proba=None,
multiclass = False, continuous_features=None, categorical_features=None,
categorical_values = None, feature_names=None, discretizer="MDLP",
nb_min_instance_in_sphere=800, threshold_precision=0.95,
nb_min_instance_per_class_in_sphere=100, verbose=False,
categorical_names=None):
self.train_data = train_data
self.class_names = class_names
self.black_box_predict = lambda x: black_box_predict(x)
# black box predict proba is used for lime explanation with probabilistic function
if black_box_predict_proba is not None:
self.black_box_predict_proba = lambda x: black_box_predict_proba(x)
self.categorical_names = categorical_names
self.multiclass = multiclass
self.continuous_features = continuous_features
self.categorical_features = categorical_features
self.categorical_values = categorical_values
self.feature_names= feature_names
self.discretizer = discretizer
self.nb_min_instance_in_sphere = nb_min_instance_in_sphere
self.threshold_precision = threshold_precision
self.nb_min_instance_per_class_in_sphere = nb_min_instance_per_class_in_sphere
self.verbose = verbose
self.black_box_labels = black_box_predict(self.train_data)
if self.verbose: print("Setting interpretability methods")
self.anchor_explainer = anchor_tabular.AnchorTabularExplainer(class_names, feature_names, train_data,
copy.copy(categorical_names), discretizer=discretizer,
black_box_labels=self.black_box_labels, ordinal_features=continuous_features)
self.lime_explainer = limes.lime_tabular.LimeTabularExplainer(train_data, feature_names=feature_names,
categorical_features=categorical_features, categorical_names=categorical_names,
class_names=class_names, discretize_continuous=True, discretizer=discretizer,
training_labels=self.black_box_labels)
# Compute and store variance of each feature
self.feature_variance = []
for feature in range(len(train_data[0])):
self.feature_variance.append(np.var(train_data[feature]))
# Compute and store the probability of each value for each categorical feature
self.probability_categorical_feature = []
if self.categorical_features is not None:
for nb_feature, feature in enumerate(self.categorical_features):
set_categorical_value = categorical_values[nb_feature]
probability_instance_per_feature = []
for categorical_feature in set_categorical_value:
probability_instance_per_feature.append(sum(self.train_data[:,feature] == categorical_feature)/len(self.train_data[:,feature]))
self.probability_categorical_feature.append(probability_instance_per_feature)
def modify_instance_for_linear_model(self, lime_exp, instances_in_sphere):
"""
Modify the instances in the sphere to be predict by the linear model trained in Lime
Args: lime_exp: lime_explainer.explain_instance object
instances_in_sphere: Raw values for instances present in the hyper field
Return: List of instances present in the hyper field in order to be computed by the linear model build by Lime
"""
linear_model = lime_exp.easy_model
used_features = [x for x in lime_exp.used_features]
prediction_inside_sphere = linear_model.predict(instances_in_sphere[:,used_features])
return prediction_inside_sphere
def transform_data_into_data_frame(self, data_to_transform):
dictionary = {}
for nb_feature, name in enumerate(self.feature_names):
dictionary[name] = data_to_transform[:,nb_feature]
pandas_frame = pd.DataFrame(dictionary)
return pandas_frame
def generate_rule_and_data_for_anchors(self, anchor_exp, target_class, data_to_transform, simulated_user_experiment=False):
"""
Generate rules and data frame for applying anchors
Args: anchor_exp: anchor_explainer.explain_instance object
target_class: Class of the target instance to explain (class predict by the rules from anchor_exp)
data_to_transform: Train data that are used to generate an anchor
simulated_user_experiment: Determine if this function returns only the data modify and the rule or the list of features that are used by the anchor
Return: The rules used by the anchor explanation
Training data convert to data frame
"""
pandas_frame = self.transform_data_into_data_frame(data_to_transform)
rules = {}
features_employed_in_rule = []
for exp in anchor_exp:
for feature_number, feature_spliting in enumerate(self.feature_names):
split = re.split(feature_spliting, exp)
if len(split) > 1:
features_employed_in_rule.append(feature_number)
signe = str(split[1][:3]).replace(" ", "")
comparaison = (split[1][3:]).replace(" ", "")
try:
comparaison = float(comparaison)
# In case the rule is on categorical data and explanation rests on text instead of numerical values
except ValueError:
for feature_rule in self.categorical_names:
if comparaison in self.categorical_names[feature_rule]:
comparaison = self.categorical_names[feature_rule].index(comparaison)
break
if self.verbose: print("Caution ! You're data are not only numbers.")
rules[feature_spliting] = [(signe, comparaison, target_class)]
if simulated_user_experiment:
return rules, pandas_frame, features_employed_in_rule
else:
return rules, pandas_frame
def get_base_model_data(self, set_rules, x_data_frame: pd.DataFrame):
"""
Filters the trainig data for data points affected by the rules and associated them the prediction of the rule.
Args: set_rules: A set of rules return by the explanation model
x_data_fram: a data frame of instances that we want to test whether they validate the set of rules or not
Return: instances validating the set of rules
"""
instances_in_anchors = x_data_frame.copy()
for category, rules in set_rules.items():
for rule in rules:
if ">=" in rule[0]:
instances_in_anchors = instances_in_anchors.loc[instances_in_anchors[category] >= rule[1]]
elif "<=" in rule[0]:
instances_in_anchors = instances_in_anchors.loc[instances_in_anchors[category] <= rule[1]]
elif "=" in rule[0]:
instances_in_anchors = instances_in_anchors.loc[instances_in_anchors[category] == rule[1]]
elif "<" in rule[0]:
instances_in_anchors = instances_in_anchors.loc[instances_in_anchors[category] < rule[1]]
elif ">" in rule[0]:
instances_in_anchors = instances_in_anchors.loc[instances_in_anchors[category] > rule[1]]
else:
print("Invalid rule detected: {}".format(rule))
instances_in_anchors = instances_in_anchors.reset_index(drop=True)
return instances_in_anchors
def generate_artificial_instances_in_anchor(self, instances_in_anchor: pd.DataFrame, nb_instances_in_sphere, target_instance,
rules, farthest_distance, percentage_distribution):
"""
Generate as many artificial instances as the number of instances present in the field that validate the anchor rules
Args: instances_in_anchor: All the instances from the training dataset validatin the anchor rules
nb_instances_in_sphere: Number of instances generated in the hyperfield
target_instance: The target instance to explain
rules: The set of rules generated by anchor
farthest_distance: the distance between the target instance and its farthest instance from the training dataset
percentage_distribution: Size of the hyper field for categorical data
Return: As many artificial instances as the number of instances present in the field that validate the anchor rules
"""
artificial_instances_in_anchors = instances_in_anchor.copy()
cnt = 2
while len(artificial_instances_in_anchors) < nb_instances_in_sphere:
# If there are not enough instances from the training dataset to compare with instances instances in sphere we generate more until we find enough
# to compare precision and coverage of both methods
try:
if len(self.categorical_features) > 1:
generated_artificial_instances = generate_categoric_inside_ball(target_instance, (0, farthest_distance), 1,
int (cnt*nb_instances_in_sphere),
self.continuous_features, self.categorical_features, self.categorical_values,
feature_variance=self.feature_variance, probability_categorical_feature=self.probability_categorical_feature)
else:
generated_artificial_instances = generate_inside_ball(target_instance, (0, farthest_distance),
int (cnt*nb_instances_in_sphere), feature_variance=self.feature_variance)
except OverflowError:
print("over flow error")
artificial_instances_pandas_frame = self.transform_data_into_data_frame(generated_artificial_instances)
artificial_instances_in_anchor = self.get_base_model_data(rules,
artificial_instances_pandas_frame)
artificial_instances_in_anchors = artificial_instances_in_anchors.append(artificial_instances_in_anchor, ignore_index=True)
cnt += 1
return artificial_instances_in_anchors[:nb_instances_in_sphere].to_numpy()
def store_counterfactual_instances_in_sphere(self, instances_in_sphere, target_class, libfolding=False):
"""
Store the counterfactual instances present in the sphere (maximum max_instances counterfactual instances in the sphere)
Args: instances_in_sphere: Set of instances generated in the hyper field
target_class: Class of the target instance
libfolding: Parameter to indicate whether we return the index of the counterfactual present in the field or directly the values
Return: Depends of libfolding value
"""
counterfactual_instances_in_sphere = []
index_counterfactual_in_sphere = []
for index, instance_in_sphere in enumerate(instances_in_sphere):
if self.black_box_predict(instance_in_sphere.reshape(1, -1)) != self.target_class:
counterfactual_instances_in_sphere.append(instance_in_sphere)
index_counterfactual_in_sphere.append(index)
return counterfactual_instances_in_sphere if not libfolding else index_counterfactual_in_sphere
def check_test_unimodal_data(self, counterfactual_in_sphere, instances_in_sphere, radius, counterfactual_libfolding=None):
"""
Test over instances in the hypersphere to discover if data are uni or multimodal
Args: counterfactual_in_sphere: Counterfactual instances find in the area of the hyper field
instances_in_sphere: All the instances generated or present in the field
radius: Size of the hyper field
counterfactual_libfolding: counterfactual instances with continuous values for Libfolding
Return: Indicate whether the counterfactual find in the hyper field are unimodal or multimodal
and compute the clusters centers in case of multimodal data
"""
try:
results = pf.FTU(counterfactual_libfolding, routine="python") if counterfactual_libfolding is not None else pf.FTU(counterfactual_in_sphere, routine="python")
self.multimodal_results = results.folding_statistics<1
if self.multimodal_results:
# If counterfactual instances are multimodal we compute the clusters center
visualizer = KElbowVisualizer(KMeans(), k=(1,8))
x_elbow = np.array(counterfactual_in_sphere)
visualizer.fit(x_elbow)
n_clusters = visualizer.elbow_value_
if n_clusters is not None:
if self.verbose: print("n CLUSTERS ", n_clusters)
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(counterfactual_in_sphere)
self.clusters_centers = kmeans.cluster_centers_
if self.verbose: print("Mean center of clusters from KMEANS ", self.clusters_centers)
else:
# If counterfactual instances are unimodal we test a linear separability problem
tree_closest_neighborhood = scipy.spatial.cKDTree(instances_in_sphere)
mean = 0
target_class = self.black_box_predict(counterfactual_in_sphere[0].reshape(1, -1))
for item in counterfactual_in_sphere:
the_result = tree_closest_neighborhood.query(item, k=2)
try:
if self.black_box_predict(instances_in_sphere[the_result[1][1]].reshape(1, -1)) == target_class:
mean+=1
except:
print("problem in the search of the closest neighborhood", the_result)
mean /= len(counterfactual_in_sphere)
if self.verbose: print("Value of the linear separability test:", mean)
# We indicate that data are multimodal if the test of linear separability is inferior to the threshold precision
# of the interpretability methods
self.multimodal_results = mean < self.threshold_precision
if self.verbose: print("The libfolding test indicates that data are ", "multimodal." if self.multimodal_results else "unimodal.")
return True
except ValueError:
print("There is an error in the libfolding code for unimodal testing.")
return False
def instances_from_dataset_inside_sphere(self, closest_counterfactual, radius):
"""
Counts how many instances from the training data are present in the area of the hyper field
Args: closest_counterfactual: Center of the hyper field
radius: Size of the hyper field corresponding to the distance between the target instances and the farthest among the closest counterfactuals
Return: Index of the instances from the training data that | |
information. Skipped.",
2,
)
return
vgg_train_labels = np.argmax(y_train, axis=1)
gen_train_data = self.generate_new_data(
len(vgg_train_labels),
labels=vgg_train_labels,
num_classes=num_classes,
)
train_generator = SimpleConditionalDataGenerator(
gen_train_data,
y_train,
vgg_batch_size,
True,
)
test_generator = SimpleConditionalDataGenerator(
x_test, y_test, vgg_batch_size, False
)
val_generator = SimpleConditionalDataGenerator(
x_val, y_val, vgg_batch_size, True
)
vgg16 = model_helper.customVGG16Model(optimizer=Adam(vgg_learning_rate))
vgg16.create(input_shape=self._data_shape, num_classes=num_classes)
vgg16.fit(
train_generator,
epochs=vgg_epochs,
batch_size=vgg_batch_size,
val_generator=val_generator,
)
metrics = vgg16.full_evaluation(test_generator)
figure_helper.plot_values_over_keys(
data=vgg16._history.history,
savepath=os.path.join(self.get_figure_dir(), "vgg16_training_history.pdf"),
)
figure_helper.plot_confusion_matrix(
metrics["confusion_matrix"],
np.arange(num_classes),
savepath=os.path.join(self.get_figure_dir(), "vgg16_confusion_matrix.pdf"),
)
figure_helper.plot_confusion_matrix(
metrics["confusion_matrix"],
np.arange(num_classes),
True,
savepath=os.path.join(
self.get_figure_dir(), "vgg16_confusion_matrix_normalized.pdf"
),
)
if save:
self.save_evaluation_metric(
"vgg16_classifier",
metrics,
)
del vgg16
K.clear_session()
return metrics
def _set_path_to_harcnn_conf(self):
if self._test_if_keys_in_model_config("epsilon", "train", "test", "val"):
if (
self._config["epsilon"] is not None
and self._config["train"]
and self._config["test"]
and self._config["val"]
):
self.path_to_ms_conf = os.path.join(
self._base_path,
"/".join(ModelContainer._configs_dir.split("/")[:-1]),
"optimizer_configs",
f"harcnn-{self._config['dataset']}_eps_{self._config['epsilon']}.json",
)
return
self.path_to_ms_conf = os.path.join(
self._base_path,
"/".join(ModelContainer._configs_dir.split("/")[:-1]),
"optimizer_configs",
f"harcnn-{self._config['dataset']}.json",
)
def evaluate_against_harcnn(self, save: bool = True) -> dict:
if not isinstance(self.data, BaseTimeSeriesClass):
self._util.log(
"evaluate_against_harcnn expects time series data. Skipped.", 2
)
return
self._set_path_to_harcnn_conf()
if not os.path.isfile(self.path_to_ms_conf):
self._util.log(
f"Need a config for motion classifier, but {self.path_to_ms_conf} not present. Skipped.",
2,
)
return
with open(self.path_to_ms_conf, "r") as f:
ms_config = json.load(f)
ms_learning_rate, ms_batch_size, ms_epochs = (
ms_config["learning_rate"],
ms_config["batch_size"],
ms_config["epochs"],
)
num_classes = self.data.get_num_unique_labels()
_, x_test, x_val, y_train, y_test, y_val = self.data.unravel()
if y_train is None or y_test is None:
self._util.log(
"Evaluation of time series only possible with conditional information. Skipped.",
2,
)
return
harcnn_train_labels = np.argmax(y_train, axis=1)
gen_train_data = self.generate_new_data(
len(harcnn_train_labels),
labels=harcnn_train_labels,
num_classes=num_classes,
)
train_generator = SimpleConditionalDataGenerator(
gen_train_data,
y_train,
ms_batch_size,
True,
)
test_generator = SimpleConditionalDataGenerator(
x_test, y_test, ms_batch_size, False
)
val_generator = SimpleConditionalDataGenerator(
x_val, y_val, ms_batch_size, True
)
motion_classifier = model_helper.customHARCNNModel(
optimizer=Adam(ms_learning_rate)
)
motion_classifier.create(input_shape=self._data_shape, num_classes=num_classes)
motion_classifier.fit(
train_generator,
epochs=ms_epochs,
batch_size=ms_batch_size,
val_generator=val_generator,
)
metrics = motion_classifier.full_evaluation(test_generator)
figure_helper.plot_values_over_keys(
data=motion_classifier._history.history,
savepath=os.path.join(self.get_figure_dir(), "training_history_harcnn.pdf"),
)
figure_helper.plot_confusion_matrix(
metrics["confusion_matrix"],
np.arange(num_classes),
savepath=os.path.join(self.get_figure_dir(), "harcnn_confusion_matrix.pdf"),
)
figure_helper.plot_confusion_matrix(
metrics["confusion_matrix"],
np.arange(num_classes),
True,
savepath=os.path.join(
self.get_figure_dir(), "harcnn_confusion_matrix_normalized.pdf"
),
)
if save:
self.save_evaluation_metric(
"harcnn_classifier",
metrics,
)
del motion_classifier
K.clear_session()
return metrics
def generate_new_data(
self, sample_size: int = 1, labels: list or int = None, num_classes: int = None
) -> list:
"""This function generates new data.
Args:
sample_size (int, optional): Number of random new samples to generate. Defaults to 1.
labels (list or int, optional): For conditional models the target labels. Defaults to None.
num_classes (int, optional): For conditional models the number of overall labels. If labels are set but None this is inferred from passed labels (max). Defaults to None.
Raises:
RuntimeError: Model not present.
RuntimeError: Missing label information.
Returns:
list: Newly generated data.
"""
if self.vae is None:
raise RuntimeError(
f"{self.__class__} has no model present. Create one first to generate data."
)
# dim_z = self.decoder.input_shape[-1]
# mean = np.zeros(dim_z)
# cov = np.eye(dim_z)
# z = np.random.default_rng().multivariate_normal(mean, cov, sample_size)
# if self._conditional and (dim_z > self.get_latent_dim()):
# if labels is None:
# raise RuntimeError(
# f"To generate data for {self.__class__} pass label information."
# )
# labels = util.check_if_list_and_matches_length(
# labels, sample_size, "labels"
# )
# if num_classes is None:
# num_classes = np.max(labels)
# dim_z = dim_z - num_classes
# z = np.random.default_rng().normal(size=(sample_size, dim_z))
# labels = utils.to_categorical(labels, num_classes=num_classes)
# z = np.hstack((z, labels))
# else:
# z = np.random.default_rng().normal(size=(sample_size, dim_z))
# return self.decoder(z)
_, _, x, _, _, y = self.data.unravel()
y_labels = np.argmax(y, axis=1)
if self._conditional:
if labels is None:
raise RuntimeError(
f"To generate data for {self.__class__} pass label information."
)
labels = util.check_if_list_and_matches_length(
labels, sample_size, "labels"
)
global_data = np.zeros((sample_size, *x.shape[1:]))
list_of_labels, counts = np.unique(labels, return_counts=True)
for label, count in zip(list_of_labels, counts):
label_data = []
data_mask = y_labels == label
train_generator = SimpleDataGenerator(
x[data_mask], y[data_mask], self._config["batch_size"]
)
while True:
tmp_pred = self.vae.predict(
train_generator, steps=len(train_generator)
)
label_data += list(tmp_pred)
if len(label_data) >= count:
break
label_data = np.array(label_data)[:count]
global_data[labels == label] = label_data
else:
global_data = []
train_generator = SimpleDataGenerator(x, None, self._config["batch_size"])
while True:
tmp_pred = self.vae.predict(train_generator, steps=len(train_generator))
global_data += list(tmp_pred)
if len(global_data) >= sample_size:
break
global_data = np.array(global_data)[:sample_size]
return global_data
def perturb_own_dataset(self) -> None:
lower_bound = self.get_lower_bound()
if lower_bound is None or lower_bound == 0.0:
return
if self.data is None:
raise RuntimeError(
f"{self.__class__} has no data present. Load data first."
)
if self.vae is None:
raise RuntimeError(
f"{self.__class__} has no model present. Create and load model first."
)
data_dir, dataset, batch_size = self._util.read_config(
"data_dir", "dataset", "batch_size"
)
orig_data_path = os.path.join(data_dir, dataset, f"{dataset}.npz")
X, y = DataContainer.load_data(orig_data_path)
# TODO refactor dataset to allow better preprocessing
y_cat = utils.to_categorical(y) if y is not None else None
if isinstance(self.data, BaseImageClass):
X = util.transform_images_zero_one_range(X)
if isinstance(self.data, BaseTimeSeriesClass):
if not (X.shape[1] == 12) and not (X.shape[2] == 500):
X = X.reshape(-1, 12, 500)
data_generator = SimpleDataGenerator(X, y_cat, batch_size, self._conditional)
X_perturbed = self.vae.predict(data_generator, steps=len(data_generator))
if isinstance(self.data, BaseImageClass):
X_perturbed = util.inverse_transform_images_zero_one_range(X_perturbed)
data_path = os.path.join(
data_dir, dataset, f"{dataset}_noise_{lower_bound}.npz"
)
DataContainer.save_data(data_path, X_perturbed, y)
def compute_epsilon_ldp(self, num_samples: int) -> Tuple[float, float]:
dim_z, lower_bound = self.get_latent_dim(), self.get_lower_bound()
return model_helper.compute_epsilon_delta_for_ldp(
lower_bound, dim_z, num_samples
)
@abstractmethod
def create_model(self) -> None:
if not self.vae:
raise ValueError(
f"{self.__class__} misses vae. You have to build and set it in the subclass!"
)
if self.encoder:
self.encoder.summary(print_fn=lambda x: self._util.log(x, 2))
if self.decoder:
self.decoder.summary(print_fn=lambda x: self._util.log(x, 2))
self.set_optimizer()
self.vae.compile(optimizer=self.optimizer)
self.vae.summary(print_fn=lambda x: self._util.log(x, 2))
class Cifar10VAE(VAEContainer):
def __init__(self, model_config: str, config: dict, verbose: int = 1):
self._conditional = False
super().__init__(model_config, config, verbose)
def create_model(self, trained: bool = False) -> None:
"""Create VAE model
Args:
trained (bool, optional): Indicates if model was already trained before.
Not used in GAN subclass. Defaults to False.
Raises:
Exception: Raised when data is not present and data shape cannot be determined.
"""
dim_z = self.get_latent_dim()
# Alternative architecture for cifar10 VAEs
# Directly copied from here:
# https://github.wdf.sap.corp/D043326/membership_inf_gan_vae/blob/master/Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py
original_img_size = (32, 32, 3)
img_rows, img_cols, img_chns = original_img_size
filters = 32
kernel_size = 3
intermediate_dim = 300
latent_dim = dim_z
x = layers.Input(shape=original_img_size)
enc_conv_1 = layers.Conv2D(
img_chns, kernel_size=(2, 2), padding="same", activation="relu"
)(x)
enc_conv_2 = layers.Conv2D(
filters,
kernel_size=(2, 2),
padding="same",
activation="relu",
strides=(2, 2),
)(enc_conv_1)
enc_conv_3 = layers.Conv2D(
filters,
kernel_size=kernel_size,
padding="same",
activation="relu",
strides=1,
)(enc_conv_2)
enc_conv_4 = layers.Conv2D(
filters,
kernel_size=kernel_size,
padding="same",
activation="relu",
strides=1,
)(enc_conv_3)
enc_flat_layer = layers.Flatten()(enc_conv_4)
enc_hidden_layer = layers.Dense(intermediate_dim, activation="relu")(
enc_flat_layer
)
# mean and variance for latent variables
z_mean = layers.Dense(latent_dim)(enc_hidden_layer)
z_log_var = layers.Dense(latent_dim)(enc_hidden_layer)
z = layers.Lambda(model_helper.sampling, output_shape=(latent_dim,))(
[z_mean, z_log_var]
)
# decoder architecture
decoder_hid = layers.Dense(int(intermediate_dim), activation="relu")
decoder_upsample = layers.Dense(
int(filters * img_rows / 2 * img_cols / 2), activation="relu"
)
if K.image_data_format() == "channels_first":
output_shape = (filters, int(img_rows / 2), int(img_cols / 2))
else:
output_shape = (int(img_rows / 2), int(img_cols / 2), filters)
decoder_reshape = layers.Reshape(output_shape)
decoder_deconv_1 = layers.Conv2DTranspose(
filters,
kernel_size=kernel_size,
padding="same",
strides=1,
activation="relu",
)
decoder_deconv_2 = layers.Conv2DTranspose(
filters,
kernel_size=kernel_size,
padding="same",
strides=1,
activation="relu",
)
decoder_deconv_3_upsamp = layers.Conv2DTranspose(
filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="valid",
activation="relu",
)
decoder_mean_squash = layers.Conv2D(
img_chns, kernel_size=2, padding="valid", activation="sigmoid"
)
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(layers.Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = (
img_rows
* img_cols
* metrics.binary_crossentropy(x, x_decoded_mean_squash)
)
kl_loss = -0.5 * K.mean(
1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1
)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
return x
y = layers.CustomVariationalLayer()([x, x_decoded_mean_squash])
# Encoder and decoder will only be constructed here when
# model is reconstructed before attacking (after the model has already been trained)
# When the VAE is first constructed the encoder and decoder models will be built in the train method after successful training
if trained:
encoder = models.Model(x, z_mean)
decoder_input = layers.Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
decoder = models.Model(decoder_input, _x_decoded_mean_squash)
self.encoder = encoder
self.decoder = decoder
else:
# Save layers, so that encoder and decoder can be built later (after training)
layers_dict = dict()
layers_dict["x"] = x
layers_dict["z_mean"] = z_mean
layers_dict["decoder_hid"] = decoder_hid
layers_dict["decoder_upsample"] = decoder_upsample
layers_dict["decoder_reshape"] = decoder_reshape
| |
len(Knovel) - 1]).
Exemplars: a list of length len(Knovel) * nExemplars of 2-element
tuples. The 1st element of each tuple is the image id that was
sampled and the 2nd element is its category label (which is in
the ragne [nKbase, nKbase + len(Knovel) - 1]).
"""
if len(Knovel) == 0:
return [], []
nKnovel = len(Knovel)
Tnovel = []
Exemplars = []
assert ((nTestNovel % nKnovel) == 0)
nEvalExamplesPerClass = int(nTestNovel / nKnovel)
for Knovel_idx in range(len(Knovel)):
imd_ids = self.sampleImageIdsFrom(
Knovel[Knovel_idx],
sample_size=(nEvalExamplesPerClass + nExemplars))
imds_tnovel = imd_ids[:nEvalExamplesPerClass]
imds_ememplars = imd_ids[nEvalExamplesPerClass:]
Tnovel += [(img_id, nKbase + Knovel_idx) for img_id in imds_tnovel]
Exemplars += [(img_id, nKbase + Knovel_idx) for img_id in imds_ememplars]
assert (len(Tnovel) == nTestNovel)
assert (len(Exemplars) == len(Knovel) * nExemplars)
random.shuffle(Exemplars)
return Tnovel, Exemplars
def sample_episode(self):
"""Samples a training episode."""
nKnovel = self.nKnovel
nKbase = self.nKbase
nTestNovel = self.nTestNovel
nTestBase = self.nTestBase
nExemplars = self.nExemplars
Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)
Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)
Tnovel, Exemplars = self.sample_train_and_test_examples_for_novel_categories(
Knovel, nTestNovel, nExemplars, nKbase)
# concatenate the base and novel category examples.
Test = Tbase + Tnovel
random.shuffle(Test)
Kall = Kbase + Knovel
return Exemplars, Test, Kall, nKbase
def createExamplesTensorData_orig(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
images = torch.stack(
[self.dataset[img_idx][0] for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def createExamplesTensorData(self, examples):
s_data = [self.dataset[img_idx] for img_idx, _ in examples]
images = torch.stack([entry[0] for entry in s_data], dim=0)
#labels = torch.LongTensor([entry[1] for entry in s_data])
labels = torch.LongTensor([label for _, label in examples])
if self.dataset.phase == 'test':
gt_boxes = torch.stack([entry[2] for entry in s_data], dim=0)
return images, labels, gt_boxes
else:
return images, labels
def get_iterator(self, epoch=0):
rand_seed = epoch
random.seed(rand_seed)
np.random.seed(rand_seed)
def load_function(iter_idx):
np.random.seed(iter_idx)
random.seed(iter_idx)
Exemplars, Test, Kall, nKbase = self.sample_episode()
if self.dataset.phase == 'test':
Xt, Yt, Bt = self.createExamplesTensorData(Test) # Images, Labels, GT_boxes
Kall = torch.LongTensor(Kall)
if len(Exemplars) > 0:
Xe, Ye, Be = self.createExamplesTensorData(Exemplars)
return Xe, Ye, Be, Xt, Yt, Bt, Kall, nKbase
else:
return Xt, Yt, Bt, Kall, nKbase
else:
Xt, Yt = self.createExamplesTensorData(Test) # Images, Labels, GT_boxes
Kall = torch.LongTensor(Kall)
if len(Exemplars) > 0:
Xe, Ye = self.createExamplesTensorData(Exemplars)
return Xe, Ye, Xt, Yt, Kall, nKbase
else:
return Xt, Yt, Kall, nKbase
tnt_dataset = tnt.dataset.ListDataset(
elem_list=range(self.epoch_size), load=load_function)
data_loader = tnt_dataset.parallel(
batch_size=self.batch_size,
num_workers=(0 if self.is_eval_mode else self.num_workers),
shuffle=(False if self.is_eval_mode else True))
return data_loader
def __call__(self, epoch=0):
return self.get_iterator(epoch)
def __len__(self):
return int(self.epoch_size / self.batch_size)
class FewShotDataloaderRepmet(FewShotDataloader):
def __init__(self,
dataset,
nKnovel=5, # number of novel categories.
nKbase=-1, # number of base categories.
nExemplars=1, # number of training examples per novel category.
nTestNovel=15 * 5, # number of test examples for all the novel categories.
nTestBase=15 * 5, # number of test examples for all the base categories.
batch_size=1, # number of training episodes per batch.
num_workers=4,
epoch_size=2000, # number of batches per epoch.
):
super(FewShotDataloaderRepmet, self).__init__(dataset,
nKnovel, # number of novel categories.
nKbase, # number of base categories.
nExemplars, # number of training examples per novel category.
nTestNovel, # number of test examples for all the novel categories.
nTestBase, # number of test examples for all the base categories.
batch_size, # number of training episodes per batch.
num_workers,
epoch_size,)
def get_iterator(self, epoch=0):
rand_seed = epoch
random.seed(rand_seed)
np.random.seed(rand_seed)
def load_function(iter_idx):
# Exemplars, Test, Kall, nKbase = self.sample_episode()
Xt, Yt = self.createExamplesTensorData(Test)
if len(Exemplars) > 0:
Xe, Ye = self.createExamplesTensorData(Exemplars)
return Xe, Ye, Xt, Yt
else:
raise NotImplementedError("shouldnt get here")
tnt_dataset = tnt.dataset.ListDataset(
elem_list=range(self.dataset.__len__()), load=load_function)
data_loader = tnt_dataset.parallel(
batch_size=self.batch_size,
num_workers=0,
shuffle=(False if self.is_eval_mode else True))
return data_loader
class ImageNetTestRepmet(ImageFolder):
def __init__(self, options, do_not_use_random_transf=False, debug=False):
super(ImageNetTestRepmet, self).__init__(os.path.join(imagenet_loc_data_path, 'train'))
idx_to_class = {}
for key in self.class_to_idx.keys():
idx_to_class[self.class_to_idx[key]] = key
self.idx_to_class = idx_to_class
self.options = options
self.test_n_support = options.test_way * options.test_shot
episodes = load_data(joseph_test_episode_path)
for epi in episodes:
if len(epi['train_boxes']) < 5:
episodes.remove(epi)
self.episodes = episodes
self.image_res = options.image_res
assert((options.pad_mode == 'constant') or (options.pad_mode == 'edge') or (options.pad_mode == 'reflect') or
(options.pad_mode == 'symmetric'))
self.pad_mode = options.pad_mode
self.debug = debug
class_name_to_idx = {}
for folder_name, class_name in folder_name_to_class_name.items():
if class_name == "crane":
class_name_to_idx[class_name.lower()] = 134
else:
class_name_to_idx[class_name.lower()] = self.class_to_idx[folder_name]
self.class_name_to_idx = class_name_to_idx
print('Loading ImageNet dataset - phase test')
# mean_pix = [x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]
# std_pix = [x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]
mean_pix = [0.485, 0.456, 0.406]
std_pix = [0.229, 0.224, 0.225]
normalize = transforms.Normalize(mean=mean_pix, std=std_pix)
if do_not_use_random_transf == True:
self.transform = transforms.Compose([
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
else:
self.transform = transforms.Compose([
# transforms.RandomCrop(self.image_res, padding=8),
# transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
# transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
self.images_for_bbox_gen_transform = transforms.Compose([
# transforms.RandomCrop(self.image_res, padding=8),
# transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
# transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor()
])
def __getitem__(self, index):
# resize the image so that the bigger edge is self.image_res,
# then pad the smaller edge to size self.image_res
episode = self.episodes[index]
if len(episode['train_boxes']) != self.test_n_support:
episode = self.fix_episode(episode)
# generate support batch
# torchvision.utils.save_image(img, '/dccstor/alfassy/tmp/tmp.jpeg')
# img.save("/dccstor/alfassy/tmp/tmp.jpeg", "jpeg")
label_to_label_number = {self.class_name_to_idx[class_name.lower()]: i for i, class_name in enumerate(episode['epi_cats_names'])}
labels_support = []
for i, train_box in enumerate(episode['train_boxes']):
img_path = train_box[2]
sample_label = self.class_to_idx[(img_path.split('/')[-1]).split('_')[0]]
labels_support += [label_to_label_number[sample_label]]
img = PILI.open(str(imagenet_loc_data_path) + str(img_path.split('CLS-LOC')[1])).convert('RGB')
if img.size[0] > img.size[1]:
bigger = 0
else:
bigger = 1
resize_transform = transforms.Resize(int((self.image_res * img.size[abs(bigger - 1)]) / img.size[bigger]))
img = resize_transform(img)
pad_width_left = int((self.image_res - img.size[0]) / 2)
pad_width_right = int(pad_width_left + ((self.image_res - img.size[0]) % 2))
pad_width_top = int((self.image_res - img.size[1]) / 2)
pad_width_bottom = int(pad_width_top + ((self.image_res - img.size[1]) % 2))
pad_transform = transforms.Pad((pad_width_left, pad_width_top, pad_width_right, pad_width_bottom),
padding_mode=self.pad_mode)
img = pad_transform(img)
img_for_bbox_gen = self.images_for_bbox_gen_transform(img)
if self.transform is not None:
img = self.transform(img)
if i == 0:
support_images = img_for_bbox_gen.unsqueeze(0)
data_support = img.unsqueeze(0)
else:
data_support = torch.cat((data_support, img.unsqueeze(0)), dim=0)
support_images = torch.cat((support_images, img_for_bbox_gen.unsqueeze(0)), dim=0)
labels_support_torch = torch.LongTensor([label for label in labels_support])
# generate query batch
labels_query = []
for i, query_image_path in enumerate(episode['query_images']):
sample_label = self.class_to_idx[(query_image_path.split('/')[-1]).split('_')[0]]
labels_query += [label_to_label_number[sample_label]]
img = PILI.open(str(imagenet_loc_data_path) + str(query_image_path.split('CLS-LOC')[1])).convert('RGB')
if img.size[0] > img.size[1]:
bigger = 0
else:
bigger = 1
resize_transform = transforms.Resize(
int((self.image_res * img.size[abs(bigger - 1)]) / img.size[bigger]))
img = resize_transform(img)
pad_width_left = int((self.image_res - img.size[0]) / 2)
pad_width_right = int(pad_width_left + ((self.image_res - img.size[0]) % 2))
pad_width_top = int((self.image_res - img.size[1]) / 2)
pad_width_bottom = int(pad_width_top + ((self.image_res - img.size[1]) % 2))
pad_transform = transforms.Pad((pad_width_left, pad_width_top, pad_width_right, pad_width_bottom),
padding_mode=self.pad_mode)
img = pad_transform(img)
img_for_bbox_gen = self.images_for_bbox_gen_transform(img)
if self.transform is not None:
img = self.transform(img)
if i == 0:
query_images = img_for_bbox_gen.unsqueeze(0)
data_query = img.unsqueeze(0)
else:
data_query = torch.cat((data_query, img.unsqueeze(0)), dim=0)
query_images = torch.cat((query_images, img_for_bbox_gen.unsqueeze(0)), dim=0)
labels_query_torch = torch.LongTensor([label for label in labels_query])
return data_support, labels_support_torch, data_query, labels_query_torch, support_images, query_images
def __len__(self):
return len(self.episodes)
def fix_episode(self, episode2fix):
'''
Takes episodes with redundant support examples and removes them.
:return: episode
'''
train_boxes = []
seen_categories = []
for element in episode2fix['train_boxes']:
cat = element[0]
if cat not in seen_categories:
seen_categories += [cat]
train_boxes += [element]
episode2fix['train_boxes'] = train_boxes
return episode2fix
folder_name_to_class_name = {"n01440764": "tench, Tinca tinca",
"n01443537": "goldfish, Carassius auratus",
"n01484850": "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias",
"n01491361": "tiger shark, Galeocerdo cuvieri",
"n01494475": "hammerhead, hammerhead shark",
"n01496331": "electric ray, crampfish, numbfish, torpedo",
"n01498041": "stingray",
"n01514668": "cock",
"n01514859": "hen",
"n01518878": "ostrich, Struthio camelus",
"n01530575": "brambling, Fringilla montifringilla",
"n01531178": "goldfinch, Carduelis carduelis",
"n01532829": "house finch, linnet, Carpodacus mexicanus",
"n01534433": "junco, snowbird",
"n01537544": "indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"n01558993": "robin, American robin, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# email: <EMAIL>
"""
Group astronomical images by fields and epochs.
Example of usage:
python stacking.py --path_data pathtoyourdata/ --radius 10 --deltaT 1
will stack all images in pathtoyourdata/ whose referenced RA and Dec
(CRVAL1 and CRVAL2) are separated by 10 arcmin maximum and taken
within time interval of 1 hour.
SWARP is required to perform the stacking.
On linux machines it can be installed with:
sudo apt install swarp
"""
import errno
import glob
import os
import subprocess
import shutil
import tempfile
import time as time1
from astropy.io import fits
from astropy.table import Table
import argparse
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy import time, wcs
import numpy as np
from gmadet.utils import list_files
def rm_p(src):
try:
# shutil.rmtree(src, ignore_errors=True)
os.remove(src)
except BaseException:
pass
def mv_p(src, dest):
try:
shutil.move(src, dest)
except BaseException:
pass
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def table_obs(path_data, radius, deltaT, exclude=None):
""" Create astropy table to group epochs and fields """
# List of all raw files
filenames = list_files(path_data,
exclude=exclude,
get_subdirs=False)
names = []
RA = []
Dec = []
Time = []
telescopes = []
instruments = []
filters = []
for ima in filenames:
# print("processing " + ima + " ...\x1b[2K", end='\r', flush=True),
hdr = fits.open(ima, memmap=False)[0].header
# Get time of observation in hours
try:
date = time.Time(hdr["DATE-OBS"], format="fits")
# convert Julian day in hours
hr = date.jd * 24
except BaseException:
try:
hr = float(hdr["JD"]) * 24.0
except BaseException:
print(
"No keyword is found for the date of observation.\n"
"Expected: `DATE-OBS` or `JD`"
)
w = wcs.WCS(hdr)
names.append(ima)
RA.append(w.wcs.crval[0])
Dec.append(w.wcs.crval[1])
Time.append(hr)
telescopes.append(hdr["TELESCOP"])
instruments.append(hdr["INSTRUME"])
filters.append(hdr["FILTER"])
# Add unique index identifier per image
idx = np.arange(len(names))
# id to identify same field of view within given radius
field_id = np.zeros(len(names), dtype=int)
# id to identify epoch of same field within given time
epoch_id = np.zeros(len(names), dtype=int)
# Column to indicate the name of the stacked image
stack_name = [None] * len(names)
# RA and Dec took as reference for one field
ra_ref = [None] * len(names)
dec_ref = [None] * len(names)
obs_table = Table(
[
idx,
names,
telescopes,
instruments,
filters,
RA,
Dec,
Time,
field_id,
epoch_id,
ra_ref,
dec_ref,
stack_name,
],
names=[
"idx",
"filename",
"Telescope",
"Instrument",
"Filter",
"RA",
"Dec",
"JD",
"fieldID",
"epochID",
"RA_ref",
"Dec_ref",
"stack_filename",
],
)
# Sort by obs-time
obs_table.sort("JD")
field_id = 0
for tel, inst, filt in obs_table.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(obs_table["Telescope"] == tel)
& (obs_table["Instrument"] == inst)
& (obs_table["Filter"] == filt)
)
# Group by field of view
# initialise with first image data
ccrval_ref = SkyCoord(
obs_table[mask]["RA"][0],
obs_table[mask]["Dec"][0],
unit=(u.deg, u.deg),
frame="icrs",
)
field_id = 1
mask_idx = obs_table["idx"] == obs_table[mask]["idx"][0]
obs_table["fieldID"][mask_idx] = field_id
obs_table["RA_ref"][mask_idx] = obs_table[mask]["RA"][0]
obs_table["Dec_ref"][mask_idx] = obs_table[mask]["Dec"][0]
for data in obs_table[mask]:
if data["fieldID"] == 0:
# If image has not been associated to a field yet
# Check for the closest field
# otherwise create new field ID
ccrval = SkyCoord(
data["RA"], data["Dec"],
unit=(u.deg, u.deg),
frame="icrs"
)
mask2 = (obs_table["fieldID"] != 0) & mask
sep_min = 100 # in degrees
field_ref = -1
for j, key in enumerate(
obs_table[mask2].group_by("fieldID").groups.keys
):
# Assume that ra and dec of one field is defined by first
# image for that field
mask3 = (obs_table["fieldID"] == key[0]) & mask2
ra_ref = np.atleast_1d(obs_table[mask3]["RA"])[0]
dec_ref = np.atleast_1d(obs_table[mask3]["Dec"])[0]
ccrval_ref = SkyCoord(
ra_ref, dec_ref, unit=(u.deg, u.deg), frame="icrs"
)
sep = ccrval.separation(ccrval_ref).degree
if (sep < radius) & (sep < sep_min):
sep_min = sep
field_ref = key[0]
if field_ref != -1:
mask_idx = obs_table["idx"] == data["idx"]
obs_table["fieldID"][mask_idx] = field_ref
obs_table["RA_ref"][mask_idx] = ra_ref
obs_table["Dec_ref"][mask_idx] = dec_ref
else:
field_id += 1
mask_idx = obs_table["idx"] == data["idx"]
obs_table["fieldID"][mask_idx] = field_id
obs_table["RA_ref"][mask_idx] = data["RA"]
obs_table["Dec_ref"][mask_idx] = data["Dec"]
# Group fields by epochs
for tel, inst, filt in obs_table.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(obs_table["Telescope"] == tel)
& (obs_table["Instrument"] == inst)
& (obs_table["Filter"] == filt)
)
for field_id in obs_table[mask].group_by("fieldID").groups.keys:
mask_field = (obs_table["fieldID"] == field_id[0]) & mask
JD_ref = obs_table[mask_field]["JD"][0]
epoch_id = 1
for data in obs_table[mask_field]:
if data["JD"] <= JD_ref + deltaT:
mask_idx = obs_table["idx"] == data["idx"]
obs_table["epochID"][mask_idx] = epoch_id
else:
epoch_id += 1
JD_ref = data["JD"]
mask_idx = obs_table["idx"] == data["idx"]
obs_table["epochID"][mask_idx] = epoch_id
# obs_table.show_in_browser()
return obs_table
def makelists(path_data, path_lists, radius, deltaT, exclude=None):
"""
Group images by fields and epochs
Parameters
----------
path_data : path to images, string
directory path to loop through all the fits file it contains
path_lists : path to folder containing list of grouped images, string
radius : radius in arcmin, float
radius in arcmin used to group fields based on CRVAL values
deltaT : time in hours, float
maximum time interval for one epoch, i.e. from one image taken at
time t, all images of the same field taken before t + deltaT
are stacked
Returns
-------
No variable is returned. Files containing the images to stack are
created in stacklists/ folder
"""
# Create folder for lists, delete existing files
if not os.path.isdir(path_lists):
# rm_p(path_lists)
mkdir_p(path_lists)
# Convert radius in degrees
radius = radius / 60
# Create observation table with images grouped by field and epoch
fields = table_obs(path_data, radius, deltaT, exclude=exclude)
# Create ascii files containing images to stack.
# These files are the input of SWARP
fields_list = open(os.path.join(path_lists, "fields.slist"), "w")
for tel, inst, filt in fields.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(fields["Telescope"] == tel)
& (fields["Instrument"] == inst)
& (fields["Filter"] == filt)
)
for field_id, epoch_id in (
fields[mask].group_by(["fieldID", "epochID"]).groups.keys
):
mask_field = (
(fields["fieldID"] == field_id) & (
fields["epochID"] == epoch_id) & mask
)
tel = str(fields["Telescope"][mask_field][0]).replace(" ", "")
band = str(fields["Filter"][mask_field][0]).replace(" ", "")
ra = str(
np.round(
fields["RA_ref"][mask_field][0],
3)).replace(
".",
"")
dec = str(
np.round(
fields["Dec_ref"][mask_field][0],
3)).replace(
".",
"")
filename = (
tel
+ "_"
+ band
+ "_"
+ ra
+ "_"
+ dec
+ "_field_%03d_%03d" % (field_id, epoch_id)
)
# filename = prefix + "_%03d_%03d" % (field_id, epoch_id)
f = open(os.path.join(path_lists, filename + ".list"), "w")
for data in fields[mask_field]:
f.write(data["filename"] + "\n")
mask_idx = fields["idx"] == data["idx"]
fields["stack_filename"][mask_idx] = filename
f.close()
fields_list.write(filename + " ")
fields_list.close()
# fields.show_in_browser()
def stacking(path_data, radius, deltaT, useweight=False,
subBack=True, path_results="gmadet_stacking", gain=1, keep=False):
"""Stack images"""
# Add '/' at the end of the paths if they are missing
if path_data[-1] != "/":
path_data = path_data + "/"
path_stacks = path_results # path_data + "gmadet_stacking/"
# Rename folder if already existing
if os.path.exists(path_stacks):
if keep:
mv_p(path_stacks,
path_stacks + '_' + time1.strftime("%Y%m%d-%H%M%S"))
else:
shutil.rmtree(path_stacks)
mkdir_p(path_stacks)
path_lists = tempfile.mkdtemp() # Temporary dir for fieldlists
useweight = bool(useweight)
# Whether to substrack background
if subBack:
subBack = "Y"
else:
subBack = "N"
# Make list of images to stack
makelists(path_data, path_lists, radius, deltaT)
# Get all the prefixes corresponding to one field
filenames = glob.glob(os.path.join(path_lists, "*.list"))
prefixes = []
for filename in filenames:
splitfilename = os.path.splitext(
os.path.split(filename)[-1])[0].split("_")
prefi = ""
for i in range(len(splitfilename) - 1):
prefi += splitfilename[i] + "_"
prefixes.append(prefi)
# Discard duplicates
prefixes = np.unique(prefixes)
# Loop over fields
for pref in prefixes:
imalists = []
epochs = []
# Loop over epochs
for imalist in glob.glob(os.path.join(path_lists, pref + "???.list")):
# Check that there are at least 2 images to stack
# Otherwise skip it
file = np.genfromtxt(imalist, dtype=str)
if len(np.atleast_1d(file)) < 2:
continue
epochs += [os.path.join(
path_stacks,
os.path.splitext(os.path.split(imalist)[-1])[0])
]
imalists += ["@" + imalist]
point = path_stacks + pref
subprocess.call(
[
"swarp",
"-HEADER_ONLY", "Y",
"-IMAGEOUT_NAME", point + ".head",
"-GAIN_DEFAULT", str(gain),
]
+ imalists
)
subprocess.call(
[
"sed",
"-i",
"s/MJD-OBS/COMMENT/; s/EXPTIME/COMMENT/; s/GAIN /COMMENT/; s/SATURATE/COMMENT /",
point + ".head",
]
)
for i, imalist in enumerate(imalists):
epoch = epochs[i]
shutil.copy(point + ".head", epoch + ".head")
if useweight:
subprocess.call(
[
"swarp",
"-IMAGEOUT_NAME", epoch + ".fits",
"-SUBTRACT_BACK", subBack,
"-BACK_SIZE", "128",
"-BACK_FILTERSIZE", "3",
"-WEIGHTOUT_NAME", epoch + ".weight.fits",
"-RESAMPLING_TYPE", "LANCZOS3",
"-OVERSAMPLING", "0",
"-COMBINE_TYPE", "MEDIAN",
"-GAIN_DEFAULT", str(gain),
"-COPY_KEYWORDS", "FILTER",
]
+ [imalist]
)
else:
subprocess.call(
[
"swarp",
"-IMAGEOUT_NAME", epoch + ".fits",
"-GAIN_DEFAULT", str(gain),
"-SUBTRACT_BACK", subBack,
"-BACK_SIZE", | |
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
# PaSST
# refer to https://arxiv.org/abs/2110.05069 Section 2
self.new_pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) # for C and D tokens
self.freq_new_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, self.patch_embed.grid_size[0], 1)) # | f
self.time_new_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, 1, self.patch_embed.grid_size[1])) # __ t
####
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Sequential(nn.LayerNorm(self.num_features),
nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.new_pos_embed, std=.02)
trunc_normal_(self.freq_new_pos_embed, std=.02)
trunc_normal_(self.time_new_pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if mode.startswith('jax'):
# leave cls token as zeros to match jax impl
raise RuntimeError("Not supported yet")
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'new_pos_embed', 'freq_new_pos_embed', 'time_new_pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
global first_RUN # not jit friendly? use trace instead
x = self.patch_embed(x) # [b, e, f, t]
B_dim, E_dim, F_dim, T_dim = x.shape # slow
if first_RUN: print(" patch_embed : ", x.shape)
# Adding Time/Freq information
if first_RUN: print(" self.time_new_pos_embed.shape", self.time_new_pos_embed.shape)
time_new_pos_embed = self.time_new_pos_embed
if x.shape[-1] != time_new_pos_embed.shape[-1]:
time_new_pos_embed = time_new_pos_embed[:, :, :, :x.shape[-1]]
if first_RUN: print(" CUT time_new_pos_embed.shape", time_new_pos_embed.shape)
x = x + time_new_pos_embed
if first_RUN: print(" self.freq_new_pos_embed.shape", self.freq_new_pos_embed.shape)
x = x + self.freq_new_pos_embed
# Structured Patchout https://arxiv.org/abs/2110.05069 Section 2.2
if self.training and self.s_patchout_t:
if first_RUN: print(f"X Before time Patchout of {self.s_patchout_t} ", x.size())
# ([1, 768, 1, 82])
random_indices = torch.randperm(T_dim)[:T_dim - self.s_patchout_t].sort().values
x = x[:, :, :, random_indices]
if first_RUN: print("X after time Patchout", x.size())
if self.training and self.s_patchout_f:
if first_RUN: print(f"X Before Freq Patchout of {self.s_patchout_f} ", x.size())
# [1, 768, 12, 1]
random_indices = torch.randperm(F_dim)[:F_dim - self.s_patchout_f].sort().values
x = x[:, :, random_indices, :]
if first_RUN: print(" \n X after freq Patchout: ", x.size())
###
# Flatten the sequence
x = x.flatten(2).transpose(1, 2)
# Unstructured Patchout
if first_RUN: print("X flattened", x.size())
if self.training and self.u_patchout:
seq_len = x.shape[1]
random_indices = torch.randperm(seq_len)[:seq_len - self.u_patchout].sort().values
x = x[:, random_indices, :]
if first_RUN: print("X After Unstructured Patchout", x.size())
####
# Add the C/D tokens
if first_RUN: print(" self.new_pos_embed.shape", self.new_pos_embed.shape)
cls_tokens = self.cls_token.expand(B_dim, -1, -1) + self.new_pos_embed[:, :1, :]
if first_RUN: print(" self.cls_tokens.shape", cls_tokens.shape)
if self.dist_token is None:
x = torch.cat((cls_tokens, x), dim=1)
else:
dist_token = self.dist_token.expand(B_dim, -1, -1) + self.new_pos_embed[:, 1:, :]
if first_RUN: print(" self.dist_token.shape", dist_token.shape)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
if first_RUN: print(" final sequence x", x.shape)
x = self.pos_drop(x)
x = self.blocks(x)
if first_RUN: print(f" after {len(self.blocks)} atten blocks x", x.shape)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
global first_RUN
if first_RUN: print("x", x.size())
x = self.forward_features(x)
if self.head_dist is not None:
features = (x[0] + x[1]) / 2
if first_RUN: print("forward_features", features.size())
x = self.head(features)
if first_RUN: print("head", x.size())
first_RUN = False
return x, features
else:
features = x
if first_RUN: print("forward_features", features.size())
x = self.head(x)
if first_RUN: print("head", x.size())
first_RUN = False
return x, features
def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
elif name.startswith('pre_logits'):
lecun_normal_(module.weight)
nn.init.zeros_(module.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif jax_impl and isinstance(module, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):
nn.init.zeros_(module.bias)
nn.init.ones_(module.weight)
def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=(), mode='bicubic'):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s with %s cls/dis tokens', posemb.shape, posemb_new.shape,
num_tokens)
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode=mode, align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def adapt_image_pos_embed_to_passt(posemb, num_tokens=1, gs_new=(), mode='bicubic'):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info('Resized position embedding: %s to %s with %s cls/dis tokens', posemb.shape, gs_new,
num_tokens)
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
assert len(gs_new) >= 2
_logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode=mode, align_corners=False)
freq_new_pos_embed = posemb_grid.mean(dim=3, keepdim=True)
time_new_pos_embed = posemb_grid.mean(dim=2, keepdim=True)
_logger.info('New Position cls/dstl embedding %s', posemb_tok.shape)
_logger.info('New FREQ Position embedding %s', freq_new_pos_embed.shape)
_logger.info('New TIME Position embedding %s', time_new_pos_embed.shape)
return posemb_tok, freq_new_pos_embed, time_new_pos_embed
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
state_dict = {k: v for k, v in state_dict.items()}
if "time_new_pos_embed" not in state_dict:
# we are working with ImageNet model
_logger.info("Adapting pos embedding from ImageNet pretrained model to PaSST.")
v = state_dict.pop("pos_embed")
new_pos_embed, freq_new_pos_embed, time_new_pos_embed = adapt_image_pos_embed_to_passt(
v, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
state_dict["new_pos_embed"] = new_pos_embed
state_dict["freq_new_pos_embed"] = freq_new_pos_embed
state_dict["time_new_pos_embed"] = time_new_pos_embed
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape != model.pos_embed.shape:
# this should never occur
v = resize_pos_embed(
v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
default_cfg = default_cfg or default_cfgs[variant]
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
# NOTE this extra code to support handling of repr size for in21k pretrained models
default_num_classes = default_cfg['num_classes']
num_classes = kwargs.get('num_classes', default_num_classes)
repr_size = kwargs.pop('representation_size', None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model = build_model_with_cfg(
PaSST, variant, pretrained,
default_cfg=default_cfg,
representation_size=repr_size,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_custom_load='npz' in default_cfg['url'],
**kwargs)
return model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs)
return model
def deit_base_distilled_patch16_384(pretrained=False, | |
slc.start)
def slice_up_range(n, num_slices, start=0):
"""
Divides up `range(start,start+n)` into `num_slices` slices.
Parameters
----------
n : int
The number of (consecutive) indices in the range to be divided.
num_slices : int
The number of slices to divide the range into.
start : int, optional
The starting entry of the range, so that the range to be
divided is `range(start,start+n)`.
Returns
-------
list of slices
"""
base = n // num_slices # base slice size
m1 = n - base * num_slices # num base+1 size slices
m2 = num_slices - m1 # num base size slices
assert(((base + 1) * m1 + base * m2) == n)
off = start
ret = [slice(off + (base + 1) * i, off + (base + 1) * (i + 1)) for i in range(m1)]
off += (base + 1) * m1
ret += [slice(off + base * i, off + base * (i + 1)) for i in range(m2)]
assert(len(ret) == num_slices)
return ret
def distribute_slice(s, comm, allow_split_comm=True):
"""
Partition a continuous slice evenly among `comm`'s processors.
This function is similar to :func:`distribute_indices`, but
is specific to the case when the indices being distributed
are a consecutive set of integers (specified by a slice).
Parameters
----------
s : slice
The slice to be partitioned.
comm : mpi4py.MPI.Comm or ResourceAllocation
The communicator which specifies the number of processors and
which may be split into returned sub-communicators. If a
:class:`ResourceAllocation` object, node information is also
taken into account when available (for shared memory compatibility).
allow_split_comm : bool
If True, when there are more processors than slice indices,
multiple processors will be given the *same* local slice
and `comm` will be split into sub-communicators, one for each
group of processors that are given the same local slice.
If False, then "extra" processors are simply given
nothing to do, i.e. an empty local slice.
Returns
-------
slices : list of slices
The list of *unique* slices assigned to different processors. It's
possible that a single slice (i.e. element of `slices`) is assigned
to multiple processors (when there are more processors than indices
in `s`.
loc_slice : slice
A slice specifying the indices belonging to the current processor.
owners : dict
A dictionary giving the owning rank of each slice. Values are integer
ranks and keys are integers into `slices`, specifying which slice.
loc_comm : mpi4py.MPI.Comm or ResourceAllocation or None
The local communicator/ResourceAllocation for the group of processors
which have been given the same `loc_slice` to compute, obtained by
splitting `comm`. If `loc_slice` is unique to the current processor,
or if `allow_split_comm` is False, None is returned.
"""
from ..baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
if isinstance(comm, _ResourceAllocation):
ralloc = comm
comm = ralloc.comm
else:
ralloc = None
if comm is None:
nprocs, rank = 1, 0
else:
nprocs = comm.Get_size()
rank = comm.Get_rank()
slices = slice_up_slice(s, min(nprocs, _slct.length(s)))
assert(len(slices) <= nprocs)
loc_iSlices, slcOwners, _ = \
distribute_indices_base(list(range(len(slices))), nprocs, rank,
allow_split_comm)
assert(len(loc_iSlices) <= 1) # should not assign more than one slice to
# each proc by design (there are only nprocs slices)
if len(loc_iSlices) == 1:
loc_slice = slices[loc_iSlices[0]]
#Split comm into sub-comms when there are more procs than
# indices, resulting in all procs getting only a
# single index and multiple procs getting the *same*
# (single) index.
if nprocs > _slct.length(s) and (comm is not None) and allow_split_comm:
loc_comm = comm.Split(color=loc_iSlices[0], key=rank)
else:
loc_comm = None
else: # len(loc_iSlices) == 0 (nothing for this proc to do)
loc_slice = slice(0, 0)
loc_comm = None
if ralloc is not None: # then return a ResourceAllocation instead of a comm
loc_comm = _ResourceAllocation(loc_comm, ralloc.mem_limit, ralloc.profiler,
ralloc.distribute_method, ralloc.allocated_memory)
if ralloc.host_comm is not None:
loc_comm.build_hostcomms() # signals that we want to use shared intra-host memory
return slices, loc_slice, slcOwners, loc_comm
def gather_slices(slices, slice_owners, ar_to_fill,
ar_to_fill_inds, axes, comm, max_buffer_size=None):
"""
Gathers data within a numpy array, `ar_to_fill`, according to given slices.
Upon entry it is assumed that the different processors within `comm` have
computed different parts of `ar_to_fill`, namely different slices of the
`axis`-th axis. At exit, data has been gathered such that all processors
have the results for the entire `ar_to_fill` (or at least for all the slices
given).
Parameters
----------
slices : list
A list of all the slices (computed by *any* of the processors, not
just the current one). Each element of `slices` may be either a
single slice or a tuple of slices (when gathering across multiple
dimensions).
slice_owners : dict
A dictionary mapping the index of a slice (or tuple of slices)
within `slices` to an integer rank of the processor responsible
for communicating that slice's data to the rest of the processors.
ar_to_fill : numpy.ndarray
The array which contains partial data upon entry and the gathered
data upon exit.
ar_to_fill_inds : list
A list of slice or index-arrays specifying the (fixed) sub-array of
`ar_to_fill` that should be gathered into. The elements of
`ar_to_fill_inds` are taken to be indices for the leading dimension
first, and any unspecified dimensions or `None` elements are
assumed to be unrestricted (as if `slice(None,None)`). Note that
the combination of `ar_to_fill` and `ar_to_fill_inds` is essentally like
passing `ar_to_fill[ar_to_fill_inds]` to this function, except it will
work with index arrays as well as slices.
axes : int or tuple of ints
The axis or axes of `ar_to_fill` on which the slices apply (which axis
do the slices in `slices` refer to?). Note that `len(axes)` must
be equal to the number of slices (i.e. the tuple length) of each
element of `slices`.
comm : mpi4py.MPI.Comm or ResourceAllocation or None
The communicator specifying the processors involved and used
to perform the gather operation. If a :class:`ResourceAllocation`
is provided, then inter-host communication is used when available
to facilitate use of shared intra-host memory.
max_buffer_size : int or None
The maximum buffer size in bytes that is allowed to be used
for gathering data. If None, there is no limit.
Returns
-------
None
"""
from ..baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
if isinstance(comm, _ResourceAllocation):
ralloc = comm
comm = ralloc.comm
#For use with shared intra-host (intra-node) memory:
# my_interhost_ranks = ranks of comm, 1 per host, that this processor uses to send/receive data between hosts
# broadcast_comm = the comm of my_interhost_ranks used to send/receive data.
if ralloc.interhost_ranks is not None:
my_interhost_ranks = set(ralloc.interhost_ranks)
broadcast_rank_map = {comm_rank: broadcast_comm_rank
for broadcast_comm_rank, comm_rank in enumerate(ralloc.interhost_ranks)}
broadcast_comm = ralloc.interhost_comm
else:
my_interhost_ranks = None
broadcast_rank_map = {i: i for i in range(comm.Get_size())} if (comm is not None) else {0: 0} # trivial map
broadcast_comm = comm
else:
ralloc = None
my_interhost_ranks = None
broadcast_rank_map = {i: i for i in range(comm.Get_size())} if (comm is not None) else {0: 0} # trivial map
broadcast_comm = comm
if comm is None: return # no gathering needed!
# To be safe, since use of broadcast_comm below means we don't always need to wait for all procs
# to finish what they were doing last, which could involve updating a shared ar_to_fill so that
# values accessed by the already-finished front-running processors are affected!
comm.barrier()
#Perform broadcasts for each slice in order
my_rank = comm.Get_rank()
axes = (axes,) if _compat.isint(axes) else axes
#print("DB: Rank %d (%d): BEGIN GATHER SLICES: interhost=%s, group=%s" %
# (my_rank, broadcast_comm.rank, str(my_interhost_ranks), str(broadcast_comm.Get_group())))
# # if ar_to_fill_inds only contains slices (or is empty), then we can slice ar_to_fill once up front
# # and not use generic arIndx in loop below (slower, especially with lots of procs)
# if all([isinstance(indx, slice) for indx in ar_to_fill_inds]):
# ar_to_fill = ar_to_fill[tuple(ar_to_fill_inds)] # Note: this *doesn't* reduce its .ndim
# ar_to_fill_inds = () # now ar_to_fill requires no further indexing
arIndx = [slice(None, None)] * ar_to_fill.ndim
arIndx[0:len(ar_to_fill_inds)] = ar_to_fill_inds
max_indices = [None] * len(axes)
if max_buffer_size is not None: # no maximum of buffer size
chunkBytes = | |
"""LUNOS Heat Recovery Ventilation Fan Control (e2/eGO)"""
# FIXME: can we subscribe to updates from the w1/w2 entities to avoid polling?
import asyncio
import logging
import time
import voluptuous as vol
from homeassistant.components.fan import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_NAME,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from . import LUNOS_CODING_CONFIG
from .const import (
ATTR_CFM,
ATTR_CMHR,
ATTR_DB,
ATTR_MODEL_NAME,
ATTR_VENTILATION_MODE,
CFM_TO_CMH,
CONF_CONTROLLER_CODING,
CONF_DEFAULT_FAN_COUNT,
CONF_DEFAULT_SPEED,
CONF_FAN_COUNT,
CONF_RELAY_W1,
CONF_RELAY_W2,
DEFAULT_LUNOS_NAME,
DEFAULT_SPEED,
LUNOS_DOMAIN,
SERVICE_CLEAR_FILTER_REMINDER,
SERVICE_TURN_OFF_SUMMER_VENTILATION,
SERVICE_TURN_ON_SUMMER_VENTILATION,
SPEED_LIST,
SPEED_SWITCH_STATES,
SPEED_TURBO,
UNKNOWN,
)
LOG = logging.getLogger(__name__)
# delay all speed changes to > 3 seconds since the last relay switch change to avoid side effects
SPEED_CHANGE_DELAY_SECONDS = 4
DELAY_BETWEEN_FLIPS = 0.100
MINIMUM_DELAY_BETWEEN_STATE_CHANGES = 15.0
# FIXME: support enabling exhaust-only mode
VENTILATION_NORMAL = "normal"
VENTILATION_SUMMER = "summer"
VENTILATION_EXHAUST = "exhaust-only"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_NAME
): cv.string, # NOTE: we default the name later based on logic
vol.Optional(CONF_RELAY_W1): cv.string, # cv.entity_id
vol.Optional(CONF_RELAY_W2): cv.string, # cv.entity_id
vol.Optional(CONF_DEFAULT_SPEED, default=DEFAULT_SPEED): vol.In(SPEED_LIST),
# vol.Optional(CONF_CONTROLLER_CODING, default='e2-usa'): cv.string,
vol.Optional(CONF_CONTROLLER_CODING, default="e2-usa"): vol.In(
LUNOS_CODING_CONFIG.keys()
),
vol.Optional(CONF_FAN_COUNT): vol.In(
[1, 2, 3, 4]
), # default is based on how controller is coded (see below)
}
)
# pylint: disable=unused-argument
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Initialize the LUNOS fans from config."""
name = config.get(CONF_NAME)
if not name:
name = DEFAULT_LUNOS_NAME
relay_w1 = config.get(CONF_RELAY_W1)
relay_w2 = config.get(CONF_RELAY_W2)
default_speed = config.get(CONF_DEFAULT_SPEED)
LOG.info(
f"LUNOS fan controller '{name}' using relays W1={relay_w1}, W2={relay_w2}'"
)
fan = LUNOSFan(hass, config, name, relay_w1, relay_w2, default_speed)
async_add_entities([fan], update_before_add=True)
# expose service call APIs
# FIXME: how are these tied to a specific LUNOS fan instance?
component = EntityComponent(LOG, LUNOS_DOMAIN, hass)
component.async_register_entity_service(
SERVICE_CLEAR_FILTER_REMINDER, {}, "async_clear_filter_reminder"
)
component.async_register_entity_service(
SERVICE_TURN_ON_SUMMER_VENTILATION, {}, "async_turn_on_summer_ventilation"
)
component.async_register_entity_service(
SERVICE_TURN_OFF_SUMMER_VENTILATION, {}, "async_turn_off_summer_ventilation"
)
return True
class LUNOSFan(FanEntity):
"""Representation of a LUNOS fan."""
def __init__(
self, hass, conf, name, relay_w1, relay_w2, default_speed: str = DEFAULT_SPEED
):
"""Init this sensor."""
self.hass = hass
self._name = name
self.entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, name, hass=hass)
super().__init__()
self._speed = None
self._default_speed = default_speed
self._last_state_change = None
# specify W1/W2 relays to use
self._relay_w1 = relay_w1
self._relay_w2 = relay_w2
coding = conf.get(CONF_CONTROLLER_CODING)
model_config = LUNOS_CODING_CONFIG[coding]
# default fan count differs depending on controller mode (e2 = 2 fans, eGO = 1 fan)
self._fan_count = conf.get(CONF_FAN_COUNT, model_config[CONF_DEFAULT_FAN_COUNT])
self._attributes = {
ATTR_MODEL_NAME: model_config["name"],
CONF_CONTROLLER_CODING: coding,
CONF_FAN_COUNT: self._fan_count,
ATTR_VENTILATION_MODE: VENTILATION_NORMAL,
ATTR_DB: UNKNOWN,
CONF_RELAY_W1: relay_w1,
CONF_RELAY_W2: relay_w2,
}
# copy select fields from the model config into the attributes
for attribute in [
"cycle_seconds",
"supports_summer_vent",
"supports_filter_reminder",
"turbo_mode",
"exhaust_only",
]:
if attribute in model_config:
self._attributes[attribute] = model_config[attribute]
# determine the current speed of the fans
self._update_speed(self._determine_current_speed())
LOG.info(
f"Created LUNOS fan controller '{self._name}' (W1={relay_w1}; W2={relay_w2}; default_speed={default_speed})"
)
async def async_added_to_hass(self) -> None:
"""Once entity has been added to HASS, subscribe to state changes."""
await super().async_added_to_hass()
# setup listeners to track changes to the W1/W2 relays
async_track_state_change_event(
self.hass, [self._relay_w1, self._relay_w2], self._relay_state_changed
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
@callback
def _relay_state_changed(self, event):
"""Whenever W1 or W2 relays change state, the fan speed needs to be updated"""
entity = event.data.get("entity_id")
to_state = event.data["new_state"].state
# sometimes there is no from_state
old_state = event.data.get("old_state")
from_state = old_state.state if old_state else None
if not from_state or to_state != from_state:
LOG.info(
f"{entity} changed from {from_state} to {to_state}, updating '{self._name}'"
)
self.schedule_update_ha_state()
def update_attributes(self):
"""Calculate the current CFM based on the current fan speed as well as the
number of fans configured by the user."""
if self._speed is not None:
coding = self._attributes[CONF_CONTROLLER_CODING]
controller_config = LUNOS_CODING_CONFIG[coding]
fan_multiplier = self._fan_count / controller_config[CONF_DEFAULT_FAN_COUNT]
# load the behaviors of the fan for the current speed setting
behavior_config = controller_config.get("behavior")
if not behavior_config:
LOG.error(
f"Missing behavior config for coding {coding}: {controller_config}"
)
behavior = behavior_config.get(self._speed, {})
# determine the air flow rates based on fan behavior at the current speed
cfm = cmh = None
if "cfm" in behavior:
cfm_for_mode = behavior["cfm"]
cfm = cfm_for_mode * fan_multiplier
cmh = cfm * CFM_TO_CMH
elif "chm" in behavior:
chm_for_mode = behavior["chm"]
cmh = chm_for_mode * fan_multiplier
cfm = cmh / CFM_TO_CMH
self._attributes[ATTR_CFM] = cfm
self._attributes[ATTR_CMHR] = cmh
# if sound level (dB) is defined for the speed, include it in attributes
self._attributes[ATTR_DB] = behavior.get(ATTR_DB, None)
self._attributes["watts"] = behavior.get("watts", None)
LOG.debug(
f"Updated '{self._name}': speed={self._speed}; attributes {self._attributes}; controller config {controller_config}"
)
@property
def name(self):
"""Return the name of the fan."""
return self._name
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return SPEED_LIST
@property
def speed(self) -> str:
"""Return the current speed."""
return self._speed
@property
def is_on(self) -> bool:
"""Return true if entity is on."""
if self._speed is None:
return False
# NOTE: for 4-speed fans, there is never a true "OFF" setting
return self._speed != SPEED_OFF
@property
def device_state_attributes(self):
"""Return state attributes."""
return self._attributes
def _determine_current_speed(self):
"""Probe the two relays to determine current state and find the matching speed switch state"""
w1 = self.hass.states.get(self._relay_w1)
if not w1:
LOG.warning(
f"W1 entity {self._relay_w1} not found, cannot determine LUNOS fan speed."
)
return
w2 = self.hass.states.get(self._relay_w2)
if not w2:
LOG.warning(
f"W2 entity {self._relay_w2} not found, cannot determine LUNOS fan speed."
)
return
# determine the current speed based on relay W1/W2 state
current_state = [w1.state, w2.state]
for speed, speed_state in SPEED_SWITCH_STATES.items():
if current_state == speed_state:
LOG.info(
f"LUNOS speed for '{self._name}' = {speed} (W1/W2={current_state})"
)
return speed
return None
def _update_speed(self, speed):
"""Update to the new speed and update any dependent attributes"""
self._speed = speed
self._last_state_change = time.time()
self.update_attributes()
async def _throttle_state_changes(self, required_delay):
time_passed = time.time() - self._last_state_change
if time_passed < required_delay:
delay = max(0, required_delay - time_passed)
LOG.warning(
f"To avoid LUNOS '{self._name}' controller race conditions, sleeping {delay} seconds"
)
await asyncio.sleep(delay)
async def async_set_speed(self, speed: str) -> None:
"""Set the fan speed"""
switch_states = SPEED_SWITCH_STATES[speed]
if not switch_states:
LOG.warning(
f"LUNOS fan '{self._name}' DOES NOT support speed '{speed}'; ignoring speed change."
)
return
# flipping W1 or W2 within 3 seconds instructs the LUNOS controller to either clear the
# filter warning light (W1) or turn on the summer/night ventilation mode (W2), thus
# delay all state changes to be > 3 seconds since the last switch change
await self._throttle_state_changes(SPEED_CHANGE_DELAY_SECONDS)
LOG.info(
f"Changing LUNOS fan '{self._name}' speed from {self._speed} to {speed}"
)
await self.set_relay_switch_state(self._relay_w1, switch_states[0])
await self.set_relay_switch_state(self._relay_w2, switch_states[1])
# update to the new speed and update any dependent attributes
self._update_speed(speed)
async def async_update(self):
"""Attempt to retrieve current state of the fan by inspecting the switch state."""
# throttle to allow switch changes to converge
await self._throttle_state_changes(1.0)
current_speed = self._determine_current_speed()
if current_speed != self._speed:
self._update_speed(current_speed)
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the fan on."""
# TODO: should this turn on to the default speed, or the last speed before turning off?
if speed is None:
speed = self._default_speed
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the fan off."""
await self.async_set_speed(SPEED_OFF)
async def call_switch_service(self, method, relay_entity_id):
LOG.info(f"Calling switch {method} for {relay_entity_id}")
await self.hass.services.async_call(
"switch", method, {"entity_id": relay_entity_id}, False
)
self._last_state_change = time.time()
async def set_relay_switch_state(self, relay_entity_id, state):
LOG.info(f"Setting relay {relay_entity_id} to {state}")
method = SERVICE_TURN_ON if state == STATE_ON else SERVICE_TURN_OFF
await self.call_switch_service(method, relay_entity_id)
async def toggle_relay_to_set_lunos_mode(self, entity_id):
saved_speed = self._speed
# LUNOS requires flipping switches on/off 3 times to set mode
toggle_methods = [
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
]
for method in toggle_methods:
await self.call_switch_service(method, entity_id)
await asyncio.sleep(DELAY_BETWEEN_FLIPS)
# restore speed state back to state before toggling relay
await self.async_set_speed(saved_speed)
# flipping W1 within 3 seconds instructs the LUNOS controller to clear the filter warning light
async def async_clear_filter_reminder(self):
LOG.info(f"Clearing the filter change reminder light for LUNOS '{self._name}'")
self.toggle_relay_to_set_lunos_mode(self._relay_w1)
# In summer ventilation mode, the reversing time of the fan is extended to one hour, i.e. the fan will run
# for one hour in the supply air mode and the following hour in the exhaust air mode etc. for max. 8 hours
def supports_summer_ventilation(self):
coding = self._attributes[CONF_CONTROLLER_CODING]
controller_config = LUNOS_CODING_CONFIG[coding]
return controller_config["supports_summer_vent"]
| |
colour ranges
# # extract coeffs from fit logs via:
# # awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}' bhm_spiders_agn_gaiadr2_pontlike/gdr2_*mag_to_sdss_*mag_?_results.log # noqa
# coeffs = {
# "g2_p": 0.236233,
# "g1_p": 0.154277,
# "g0_p": -0.066625,
# "i2_p": 0.340616,
# "i1_p": -1.395607,
# "i0_p": 0.555709,
# "r2_p": 0.410346,
# "r1_p": -1.065556,
# "r0_p": 0.441098,
# "z2_p": 0.512729,
# "z1_p": -2.214448,
# "z0_p": 0.865291,
# }
# bp_rp_p = tic.gaiabp - tic.gaiarp
# g_p = (tic.gaiamag + coeffs['g0_p'] + coeffs['g1_p'] * bp_rp_p +
# coeffs['g2_p'] * bp_rp_p * bp_rp_p)
# r_p = (tic.gaiamag + coeffs['r0_p'] + coeffs['r1_p'] * bp_rp_p +
# coeffs['r2_p'] * bp_rp_p * bp_rp_p)
# i_p = (tic.gaiamag + coeffs['i0_p'] + coeffs['i1_p'] * bp_rp_p +
# coeffs['i2_p'] * bp_rp_p * bp_rp_p)
# z_p = (tic.gaiamag + coeffs['z0_p'] + coeffs['z1_p'] * bp_rp_p +
# coeffs['z2_p'] * bp_rp_p * bp_rp_p)
bp_rp = tic.gaiabp - tic.gaiarp
g_blue = (tic.gaiamag + coeffs['g0_blue'] + coeffs['g1_blue'] * bp_rp +
coeffs['g2_blue'] * bp_rp * bp_rp +
coeffs['g3_blue'] * bp_rp * bp_rp * bp_rp)
r_blue = (tic.gaiamag + coeffs['r0_blue'] + coeffs['r1_blue'] * bp_rp +
coeffs['r2_blue'] * bp_rp * bp_rp +
coeffs['r3_blue'] * bp_rp * bp_rp * bp_rp)
i_blue = (tic.gaiamag + coeffs['i0_blue'] + coeffs['i1_blue'] * bp_rp +
coeffs['i2_blue'] * bp_rp * bp_rp +
coeffs['i3_blue'] * bp_rp * bp_rp * bp_rp)
z_blue = (tic.gaiamag + coeffs['z0_blue'] + coeffs['z1_blue'] * bp_rp +
coeffs['z2_blue'] * bp_rp * bp_rp +
coeffs['z3_blue'] * bp_rp * bp_rp * bp_rp)
g_red = (tic.gaiamag + coeffs['g0_red'] + coeffs['g1_red'] * bp_rp +
coeffs['g2_red'] * bp_rp * bp_rp)
r_red = (tic.gaiamag + coeffs['r0_red'] + coeffs['r1_red'] * bp_rp +
coeffs['r2_red'] * bp_rp * bp_rp)
i_red = (tic.gaiamag + coeffs['i0_red'] + coeffs['i1_red'] * bp_rp +
coeffs['i2_red'] * bp_rp * bp_rp)
z_red = (tic.gaiamag + coeffs['z0_red'] + coeffs['z1_red'] * bp_rp +
coeffs['z2_red'] * bp_rp * bp_rp)
# validity checks - set limits semi-manually
bp_rp_min = 0.0
bp_rp_max = 3.0
valid = (tic.gaiamag.between(0.1, 29.9) &
tic.gaiabp.between(0.1, 29.9) &
tic.gaiarp.between(0.1, 29.9) &
bp_rp.between(bp_rp_min, bp_rp_max))
opt_prov = peewee.Case(None, ((valid, 'sdss_psfmag_from_gdr2'),), 'undefined')
magnitude_g = peewee.Case(None,
(
(valid & (bp_rp < 1.8), g_blue),
(valid & (bp_rp > 1.8), g_red),
), 'NaN')
magnitude_r = peewee.Case(None,
(
(valid & (bp_rp < 1.8), r_blue),
(valid & (bp_rp > 1.8), r_red),
), 'NaN')
magnitude_i = peewee.Case(None,
(
(valid & (bp_rp < 1.8), i_blue),
(valid & (bp_rp > 1.8), i_red),
), 'NaN')
magnitude_z = peewee.Case(None,
(
(valid & (bp_rp < 1.8), z_blue),
(valid & (bp_rp > 1.8), z_red),
), 'NaN')
query = (
c.select(
fn.min(c.catalogid).alias('catalogid'),
fn.min(tic.gaia_int).alias('gaia_source'), # extra
fn.min(x.ero_detuid).alias('ero_detuid'), # extra
fn.min(c.ra).alias('ra'), # extra
fn.min(c.dec).alias('dec'), # extra
priority.alias("priority"),
fn.min(value).alias('value'),
fn.min(cadence).alias('cadence'),
fn.min(instrument).alias('instrument'),
fn.min(opt_prov).alias('optical_prov'),
fn.min(magnitude_g).alias('g'),
fn.min(magnitude_r).alias('r'),
fn.min(magnitude_i).alias('i'),
fn.min(magnitude_z).alias('z'),
fn.min(tic.gaiamag).alias('gaia_g'),
fn.min(tic.gaiabp).alias('bp'),
fn.min(tic.gaiarp).alias('rp'),
)
.join(c2tic)
.where(
c.version_id == version_id,
c2tic.version_id == version_id,
c2tic.best >> True,
)
.join(tic)
.join(x, on=(tic.gaia_int == x.gaia_dr2_id))
.switch(c)
# start joining the spectroscopy
.switch(c)
.join(c2s16, JOIN.LEFT_OUTER)
.join(
s16, JOIN.LEFT_OUTER,
on=(
(c2s16.target_id == s16.c.specobjid) &
(c2s16.version_id == version_id)
)
)
.switch(c)
.join(c2s2020, JOIN.LEFT_OUTER)
.join(
s2020, JOIN.LEFT_OUTER,
on=(
(c2s2020.target_id == s2020.c.pk) &
(c2s2020.version_id == version_id)
)
)
.join(
sV, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sV.c.plug_ra, sV.c.plug_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
.join(
sph, JOIN.LEFT_OUTER,
on=(
fn.q3c_join(sph.c.target_ra, sph.c.target_dec,
c.ra, c.dec,
match_radius_spectro)
)
)
# finished joining the spectroscopy
.where(
(x.ero_version == self.parameters['ero_version']),
(x.xmatch_method == self.parameters['xmatch_method']),
(x.xmatch_version == self.parameters['xmatch_version']),
(x.opt_cat == self.parameters['opt_cat']),
(x.xmatch_metric >= self.parameters['p_any_min']),
(tic.gaiamag > self.parameters['gaia_g_mag_limit']),
(tic.gaiarp > self.parameters['gaia_rp_mag_limit']),
(x.ero_det_like > self.parameters['det_like_min']),
)
.group_by(tic.gaia_int) # avoid duplicates - we trust the gaia ids
)
if query_region:
query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
query_region[0],
query_region[1],
query_region[2]))
return query
#
# END BhmSpidersAgnGaiadr2Carton
# ##################################################################################
class BhmSpidersAgnSepCarton(BaseCarton):
name = 'bhm_spiders_agn_sep'
category = 'science'
mapper = 'BHM'
program = 'bhm_spiders'
tile = False
instrument = 'BOSS'
def build_query(self, version_id, query_region=None):
c = Catalog.alias()
x = EROSITASupersetAGN.alias()
tic = TIC_v8.alias()
c2tic = CatalogToTIC_v8.alias()
instrument = peewee.Value(self.instrument)
gaia_g_max_for_cadence1 = self.parameters['gaia_g_max_for_cadence1']
gaia_rp_max_for_cadence1 = self.parameters['gaia_rp_max_for_cadence1']
gaia_g_max_for_cadence2 = self.parameters['gaia_g_max_for_cadence2']
gaia_rp_max_for_cadence2 = self.parameters['gaia_rp_max_for_cadence2']
value = peewee.Value(self.parameters.get('value', 1.0)).cast('float')
# priority is determined by target properties
# start with a priority floor value (per carton)
# then increment if any conditions are met:
# add +dpriority_match_flags if target is a secondary cross-match (match_flag > 1)
# add +dpriority_det_like if target has a low value of ero_det_like
# add +dpriority_has_spec if target has existing good SDSS spectroscopy
priority_1 = peewee.Case(
None,
((x.xmatch_flags > 1, 1), ),
0)
priority_2 = peewee.Case(
None,
((x.ero_det_like < self.parameters['det_like_for_priority'], 1), ),
0)
# add a step down in priority for anything selected by the secondary xmatch_version
priority_3 = peewee.Case(
None,
((x.xmatch_method == self.parameters['xmatch_version2'], 1), ),
0)
# choose the maximum priority option for all combinations of this target
priority = fn.max(
self.parameters['priority_floor'] +
priority_1 * self.parameters['dpriority_match_flags'] +
priority_2 * self.parameters['dpriority_det_like'] +
priority_3 * self.parameters['dpriority_match_method']
)
# choose cadence based on magnitude in Gaia G and RP-bands
cadence1 = self.parameters['cadence1']
cadence2 = self.parameters['cadence2']
cadence3 = self.parameters['cadence3']
cadence4 = 'unknown_cadence'
cadence = peewee.Case(
None,
(
((tic.gaiamag < gaia_g_max_for_cadence1) |
(tic.gaiarp < gaia_rp_max_for_cadence1), cadence1),
((tic.gaiamag < gaia_g_max_for_cadence2) |
(tic.gaiarp < gaia_rp_max_for_cadence2), cadence2),
((tic.gaiamag >= gaia_g_max_for_cadence2) &
(tic.gaiarp >= gaia_rp_max_for_cadence2), cadence3),
),
cadence4)
# compute transformed SDSS mags
# transform the Gaia dr2 G,BP,RP into sdss psfmag griz
# direct copy of method for bhm_spiders_agn_gaiadr2
coeffs = {
"g2_red": 0.081178,
"g1_red": 0.355677,
"g0_red": 0.510306,
"i2_red": 0.048864,
"i1_red": -0.287475,
"i0_red": -0.336712,
"r2_red": 0.028080,
"r1_red": 0.542331,
"r0_red": -1.055168,
"z2_red": -0.131385,
"z1_red": 0.302555,
"z0_red": -1.381648,
"g3_blue": 0.639054,
"g2_blue": -1.739187,
"g1_blue": 1.420330,
"g0_blue": -0.194071,
"i3_blue": 0.780585,
"i2_blue": -2.549848,
"i1_blue": 1.489880,
"i0_blue": -0.241381,
"r3_blue": 0.575494,
"r2_blue": -2.077000,
"r1_blue": 1.573302,
"r0_blue": -0.295026,
"z3_blue": 1.064986,
"z2_blue": -3.162969,
"z1_blue": 1.493750,
"z0_blue": -0.199582,
}
bp_rp = tic.gaiabp - tic.gaiarp
g_blue = (tic.gaiamag + coeffs['g0_blue'] + coeffs['g1_blue'] * bp_rp +
coeffs['g2_blue'] * bp_rp * bp_rp +
coeffs['g3_blue'] * bp_rp * bp_rp * bp_rp)
r_blue = (tic.gaiamag + coeffs['r0_blue'] + coeffs['r1_blue'] * bp_rp +
coeffs['r2_blue'] * bp_rp * bp_rp +
coeffs['r3_blue'] * bp_rp * bp_rp * bp_rp)
i_blue = (tic.gaiamag + coeffs['i0_blue'] + coeffs['i1_blue'] * bp_rp +
coeffs['i2_blue'] * bp_rp * bp_rp +
coeffs['i3_blue'] * bp_rp * bp_rp * bp_rp)
z_blue = (tic.gaiamag + coeffs['z0_blue'] + coeffs['z1_blue'] * bp_rp +
coeffs['z2_blue'] * bp_rp * bp_rp +
coeffs['z3_blue'] * bp_rp * bp_rp * bp_rp)
g_red = (tic.gaiamag + coeffs['g0_red'] + coeffs['g1_red'] * bp_rp +
coeffs['g2_red'] * bp_rp * bp_rp)
r_red = (tic.gaiamag + coeffs['r0_red'] + coeffs['r1_red'] * bp_rp +
coeffs['r2_red'] * bp_rp * bp_rp)
i_red = (tic.gaiamag + coeffs['i0_red'] + coeffs['i1_red'] * bp_rp +
coeffs['i2_red'] * bp_rp * bp_rp)
z_red = (tic.gaiamag + coeffs['z0_red'] + coeffs['z1_red'] * bp_rp +
coeffs['z2_red'] * bp_rp * bp_rp)
# validity checks - set limits semi-manually
bp_rp_min = 0.0
bp_rp_max = 3.0
valid = (tic.gaiamag.between(0.1, 29.9) &
tic.gaiabp.between(0.1, 29.9) &
tic.gaiarp.between(0.1, 29.9) &
bp_rp.between(bp_rp_min, bp_rp_max))
opt_prov = peewee.Case(None, ((valid, 'sdss_psfmag_from_gdr2'),), 'undefined')
magnitude_g = peewee.Case(None,
(
(valid & (bp_rp < 1.8), g_blue),
(valid & (bp_rp > 1.8), g_red),
), 'NaN')
magnitude_r = peewee.Case(None,
(
(valid & (bp_rp < 1.8), r_blue),
(valid & (bp_rp > 1.8), r_red),
), 'NaN')
magnitude_i = peewee.Case(None,
(
(valid & (bp_rp < 1.8), i_blue),
(valid & (bp_rp > 1.8), i_red),
), 'NaN')
magnitude_z = peewee.Case(None,
(
(valid & (bp_rp < 1.8), z_blue),
(valid & (bp_rp > 1.8), z_red),
), 'NaN')
query = (
c.select(
fn.min(c.catalogid).alias('catalogid'),
fn.min(tic.gaia_int).alias('gaia_source'), # extra
fn.min(x.ero_detuid).alias('ero_detuid'), # extra
fn.min(c.ra).alias('ra'), # extra
fn.min(c.dec).alias('dec'), # extra
priority.alias("priority"),
fn.min(value).alias('value'),
fn.min(cadence).alias('cadence'),
fn.min(instrument).alias('instrument'),
fn.min(opt_prov).alias('optical_prov'),
fn.min(magnitude_g).alias('g'),
fn.min(magnitude_r).alias('r'),
fn.min(magnitude_i).alias('i'),
fn.min(magnitude_z).alias('z'),
fn.min(tic.gaiamag).alias('gaia_g'),
fn.min(tic.gaiabp).alias('bp'),
fn.min(tic.gaiarp).alias('rp'),
)
.join(c2tic)
.where(
c.version_id == version_id,
c2tic.version_id == version_id,
c2tic.best >> True,
)
.join(tic)
.join(x, on=(tic.gaia_int == x.gaia_dr2_id))
.switch(c)
.where(
x.ero_version == self.parameters['ero_version'],
x.xmatch_method == self.parameters['xmatch_method'],
(
(x.xmatch_version == self.parameters['xmatch_version1']) |
(x.xmatch_version == self.parameters['xmatch_version2'])
),
x.opt_cat == self.parameters['opt_cat'],
x.xmatch_metric >= self.parameters['p_any_min'],
tic.gaiamag > self.parameters['gaia_g_mag_limit'],
tic.gaiarp > self.parameters['gaia_rp_mag_limit'],
x.ero_det_like > self.parameters['det_like_min'],
)
.group_by(tic.gaia_int) # avoid duplicates - we trust the gaia ids
)
if query_region:
query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
query_region[0],
query_region[1],
query_region[2]))
return query
#
# END BhmSpidersAgnSepCarton
# ##################################################################################
class BhmSpidersAgnPs1dr2Carton(BaseCarton):
name = 'bhm_spiders_agn_ps1dr2'
category = 'science'
mapper = 'BHM'
program = 'bhm_spiders'
tile = False
instrument = 'BOSS'
def | |
<reponame>andriyor/featuretools
import logging
import warnings
from collections import defaultdict
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools import primitives
from featuretools.entityset.entityset import LTI_COLUMN_NAME
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
GroupByTransformFeature,
IdentityFeature,
TransformFeature
)
from featuretools.feature_base.utils import is_valid_input
from featuretools.primitives.base import (
AggregationPrimitive,
PrimitiveBase,
TransformPrimitive
)
from featuretools.primitives.options_utils import (
filter_groupby_matches_by_options,
filter_matches_by_options,
generate_all_primitive_options,
ignore_dataframe_for_primitive
)
from featuretools.utils.gen_utils import Library, camel_and_title_to_snake
logger = logging.getLogger('featuretools')
class DeepFeatureSynthesis(object):
"""Automatically produce features for a target dataframe in an Entityset.
Args:
target_dataframe_name (str): Name of dataframe for which to build features.
entityset (EntitySet): Entityset for which to build features.
agg_primitives (list[str or :class:`.primitives.`], optional):
list of Aggregation Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to use.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
where_primitives (list[str or :class:`.primitives.PrimitiveBase`], optional):
only add where clauses to these types of Primitives
Default:
["count"]
groupby_trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to make GroupByTransformFeatures with
max_depth (int, optional) : maximum allowed depth of features.
Default: 2. If -1, no limit.
max_features (int, optional) : Cap the number of generated features to
this number. If -1, no limit.
allowed_paths (list[list[str]], optional): Allowed dataframe paths to make
features for. If None, use all paths.
ignore_dataframes (list[str], optional): List of dataframes to
blacklist when creating features. If None, use all dataframes.
ignore_columns (dict[str -> list[str]], optional): List of specific
columns within each dataframe to blacklist when creating features.
If None, use all columns.
seed_features (list[:class:`.FeatureBase`], optional): List of manually
defined features to use.
drop_contains (list[str], optional): Drop features
that contains these strings in name.
drop_exact (list[str], optional): Drop features that
exactly match these strings in name.
where_stacking_limit (int, optional): Cap the depth of the where features.
Default: 1
primitive_options (dict[str or tuple[str] or PrimitiveBase -> dict or list[dict]], optional):
Specify options for a single primitive or a group of primitives.
Lists of option dicts are used to specify options per input for primitives
with multiple inputs. Each option ``dict`` can have the following keys:
``"include_dataframes"``
List of dataframes to be included when creating features for
the primitive(s). All other dataframes will be ignored
(list[str]).
``"ignore_dataframes"``
List of dataframes to be blacklisted when creating features
for the primitive(s) (list[str]).
``"include_columns"``
List of specific columns within each dataframe to include when
creating features for the primitive(s). All other columns
in a given dataframe will be ignored (dict[str -> list[str]]).
``"ignore_columns"``
List of specific columns within each dataframe to blacklist
when creating features for the primitive(s) (dict[str ->
list[str]]).
``"include_groupby_dataframes"``
List of dataframes to be included when finding groupbys. All
other dataframes will be ignored (list[str]).
``"ignore_groupby_dataframes"``
List of dataframes to blacklist when finding groupbys
(list[str]).
``"include_groupby_columns"``
List of specific columns within each dataframe to include as
groupbys, if applicable. All other columns in each
dataframe will be ignored (dict[str -> list[str]]).
``"ignore_groupby_columns"``
List of specific columns within each dataframe to blacklist
as groupbys (dict[str -> list[str]]).
"""
def __init__(self,
target_dataframe_name,
entityset,
agg_primitives=None,
trans_primitives=None,
where_primitives=None,
groupby_trans_primitives=None,
max_depth=2,
max_features=-1,
allowed_paths=None,
ignore_dataframes=None,
ignore_columns=None,
primitive_options=None,
seed_features=None,
drop_contains=None,
drop_exact=None,
where_stacking_limit=1):
if target_dataframe_name not in entityset.dataframe_dict:
es_name = entityset.id or 'entity set'
msg = 'Provided target dataframe %s does not exist in %s' % (target_dataframe_name, es_name)
raise KeyError(msg)
# need to change max_depth to None because DFs terminates when <0
if max_depth == -1:
max_depth = None
# if just one dataframe, set max depth to 1 (transform stacking rule)
if len(entityset.dataframe_dict) == 1 and (max_depth is None or max_depth > 1):
warnings.warn("Only one dataframe in entityset, changing max_depth to "
"1 since deeper features cannot be created")
max_depth = 1
self.max_depth = max_depth
self.max_features = max_features
self.allowed_paths = allowed_paths
if self.allowed_paths:
self.allowed_paths = set()
for path in allowed_paths:
self.allowed_paths.add(tuple(path))
if ignore_dataframes is None:
self.ignore_dataframes = set()
else:
if not isinstance(ignore_dataframes, list):
raise TypeError('ignore_dataframes must be a list')
assert target_dataframe_name not in ignore_dataframes,\
"Can't ignore target_dataframe!"
self.ignore_dataframes = set(ignore_dataframes)
self.ignore_columns = defaultdict(set)
if ignore_columns is not None:
# check if ignore_columns is not {str: list}
if not all(isinstance(i, str) for i in ignore_columns.keys()) or not all(isinstance(i, list) for i in ignore_columns.values()):
raise TypeError('ignore_columns should be dict[str -> list]')
# check if list values are all of type str
elif not all(all(isinstance(v, str) for v in value) for value in ignore_columns.values()):
raise TypeError('list values should be of type str')
for df_name, cols in ignore_columns.items():
self.ignore_columns[df_name] = set(cols)
self.target_dataframe_name = target_dataframe_name
self.es = entityset
for library in Library:
if library.value == self.es.dataframe_type:
df_library = library
break
if agg_primitives is None:
agg_primitives = [p for p in primitives.get_default_aggregation_primitives() if df_library in p.compatibility]
self.agg_primitives = []
self.agg_primitives = sorted([check_primitive(p, "aggregation") for p in agg_primitives])
if trans_primitives is None:
trans_primitives = [p for p in primitives.get_default_transform_primitives() if df_library in p.compatibility]
self.trans_primitives = sorted([check_primitive(p, "transform") for p in trans_primitives])
if where_primitives is None:
where_primitives = [primitives.Count]
self.where_primitives = sorted([check_primitive(p, "where") for p in where_primitives])
if groupby_trans_primitives is None:
groupby_trans_primitives = []
self.groupby_trans_primitives = sorted([check_primitive(p, "groupby transform") for p in groupby_trans_primitives])
if primitive_options is None:
primitive_options = {}
all_primitives = self.trans_primitives + self.agg_primitives + \
self.where_primitives + self.groupby_trans_primitives
bad_primitives = [prim.name for prim in all_primitives if df_library not in prim.compatibility]
if bad_primitives:
msg = 'Selected primitives are incompatible with {} EntitySets: {}'
raise ValueError(msg.format(df_library.value, ', '.join(bad_primitives)))
self.primitive_options, self.ignore_dataframes, self.ignore_columns =\
generate_all_primitive_options(all_primitives,
primitive_options,
self.ignore_dataframes,
self.ignore_columns,
self.es)
self.seed_features = sorted(seed_features or [], key=lambda f: f.unique_name())
self.drop_exact = drop_exact or []
self.drop_contains = drop_contains or []
self.where_stacking_limit = where_stacking_limit
def build_features(self, return_types=None, verbose=False):
"""Automatically builds feature definitions for target
dataframe using Deep Feature Synthesis algorithm
Args:
return_types (list[woodwork.ColumnSchema] or str, optional):
List of ColumnSchemas defining the types of
columns to return. If None, defaults to returning all
numeric, categorical and boolean types. If given as
the string 'all', use all available return types.
verbose (bool, optional): If True, print progress.
Returns:
list[BaseFeature]: Returns a list of
features for target dataframe, sorted by feature depth
(shallow first).
"""
all_features = {}
self.where_clauses = defaultdict(set)
if return_types is None:
return_types = [ColumnSchema(semantic_tags=['numeric']),
ColumnSchema(semantic_tags=['category']),
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=BooleanNullable)]
elif return_types == 'all':
pass
else:
msg = "return_types must be a list, or 'all'"
assert isinstance(return_types, list), msg
self._run_dfs(self.es[self.target_dataframe_name], RelationshipPath([]),
all_features, max_depth=self.max_depth)
new_features = list(all_features[self.target_dataframe_name].values())
def filt(f):
# remove identity features of the ID field of the target dataframe
if (isinstance(f, IdentityFeature) and
f.dataframe_name == self.target_dataframe_name and
f.column_name == self.es[self.target_dataframe_name].ww.index):
return False
return True
# filter out features with undesired return types
if return_types != 'all':
new_features = [
f for f in new_features
if any(is_valid_input(f.column_schema, schema) for schema in return_types)]
new_features = list(filter(filt, new_features))
new_features.sort(key=lambda f: f.get_depth())
new_features = self._filter_features(new_features)
if self.max_features > 0:
new_features = new_features[:self.max_features]
if verbose:
print("Built {} features".format(len(new_features)))
verbose = None
return new_features
def _filter_features(self, features):
assert isinstance(self.drop_exact, list), "drop_exact must be a list"
assert isinstance(self.drop_contains,
list), "drop_contains must be a list"
f_keep = []
for f in features:
keep = True
for contains in self.drop_contains:
if contains in f.get_name():
keep = False
break
if f.get_name() in self.drop_exact:
keep = False
if keep:
f_keep.append(f)
return f_keep
def _run_dfs(self, dataframe, relationship_path, all_features, max_depth):
"""
Create features for the provided dataframe
Args:
dataframe (DataFrame): Dataframe for which to create features.
relationship_path (RelationshipPath): The path to this dataframe.
all_features (dict[dataframe name -> dict[str -> BaseFeature]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys.
max_depth (int) : Maximum allowed depth of features.
"""
if max_depth is not None and max_depth < 0:
return
all_features[dataframe.ww.name] = {}
"""
Step 1 - Create identity features
"""
self._add_identity_features(all_features, dataframe)
"""
Step 2 - Recursively build features for each dataframe in a backward relationship
"""
backward_dataframes = self.es.get_backward_dataframes(dataframe.ww.name)
for b_dataframe_id, sub_relationship_path in backward_dataframes:
# Skip if we've already created features for this dataframe.
if b_dataframe_id in all_features:
continue
if b_dataframe_id in self.ignore_dataframes:
continue
new_path = relationship_path + sub_relationship_path
if self.allowed_paths and | |
<gh_stars>0
import tkinter as tk
from tkinter import messagebox
import re
import math
import os
from time import strftime, localtime
import codecs # log output for windows `
# ------------------ log function ---------------------
def printLog(strLogMsg):
print(strLogMsg)
fileLog = codecs.open("./cal_v3.log", 'a', "utf-8")
fileLog.write("[%s]%s\n" % (getDateTimeFormat(), strLogMsg))
fileLog.close()
def getDateTimeFormat():
strDateTime = "%s" % (strftime("%Y/%m/%d %H:%M:%S", localtime()))
return strDateTime
class Calculator():
def __init__(self):
printLog("[I][__init__] Iniiating the Calculator")
self.window = tk.Tk()
self.window.title("Calculator")
self.window.geometry("800x400") # set window size
self.window.resizable(0, 0) # set window fixed
# 讓grid column and row可隨視窗放大, grid size: 4x6
for i in range(5):
self.window.columnconfigure(i, weight=1)
for i in range(6):
self.window.rowconfigure(i, weight=1)
# 將 StringVar 變數與 Tkinter 控制元件關聯後,修改 StringVar 變數後,Tkinter 將自動更新此控制元件
self.strEqua = tk.StringVar()
# 儲存算式然後 set 到 strEqua
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
# 使用Entry顯示計算值
self.entResult = tk.Entry(self.window, textvariable=self.strEqua, state=tk.DISABLED, justify="right") # "state=tk.DISABLED" will not allow user to input, "justify="right"" aligns the text to the right
self.entResult.config(disabledbackground=self.window["bg"], font=12) # set disabledbackground colour
self.entResult.grid(row = 0, column = 0, columnspan=5, ipadx=70, sticky=tk.NW+tk.SE)
# -------- setup buttons of number ---------
self.btnZero = tk.Button(self.window, width=20, text="0", font=12, command=lambda:self.pressNum("0"))
self.btnZero.grid(row=5, column=0, columnspan=2, sticky=tk.NW+tk.SE)
self.btnOne = tk.Button(self.window, width=20, text="1", font=12, command=lambda:self.pressNum("1"))
self.btnOne.grid(row=4, column=0, sticky=tk.NW+tk.SE)
self.btnTwo = tk.Button(self.window, width=20, text="2", font=12, command=lambda:self.pressNum("2"))
self.btnTwo.grid(row=4, column=1, sticky=tk.NW+tk.SE)
self.btnThree = tk.Button(self.window, width=20, text="3", font=12, command=lambda:self.pressNum("3"))
self.btnThree.grid(row=4, column=2, sticky=tk.NW+tk.SE)
self.btnFour = tk.Button(self.window, width=20, text="4", font=12, command=lambda:self.pressNum("4"))
self.btnFour.grid(row=3, column=0, sticky=tk.NW+tk.SE)
self.btnFive = tk.Button(self.window, width=20, text="5", font=12, command=lambda:self.pressNum("5"))
self.btnFive.grid(row=3, column=1, sticky=tk.NW+tk.SE)
self.btnSix = tk.Button(self.window, width=20, text="6", font=12, command=lambda:self.pressNum("6"))
self.btnSix.grid(row=3, column=2, sticky=tk.NW+tk.SE)
self.btnSeven = tk.Button(self.window, width=20, text="7", font=12, command=lambda:self.pressNum("7"))
self.btnSeven.grid(row=2, column=0, sticky=tk.NW+tk.SE)
self.btnEight = tk.Button(self.window, width=20, text="8", font=12, command=lambda:self.pressNum("8"))
self.btnEight.grid(row=2, column=1, sticky=tk.NW+tk.SE)
self.btnNine = tk.Button(self.window, width=20, text="9", font=12, command=lambda:self.pressNum("9"))
self.btnNine.grid(row=2, column=2, sticky=tk.NW+tk.SE)
# -------- setup buttons of alrithmatic ---------
self.btnAdd = tk.Button(self.window, width=20, text="+", font=12, command=lambda:self.pressArithm("+"))
self.btnAdd.grid(row=5, column=3, sticky=tk.NW+tk.SE)
self.btnSub = tk.Button(self.window, width=20, text="-", font=12, command=lambda:self.pressArithm("-"))
self.btnSub.grid(row=4, column=3, sticky=tk.NW+tk.SE)
self.btnMult = tk.Button(self.window, width=20, text="*", font=12, command=lambda:self.pressArithm("*"))
self.btnMult.grid(row=3, column=3, sticky=tk.NW+tk.SE)
self.btnDiv = tk.Button(self.window, width=20, text="/", font=12, command=lambda:self.pressArithm("/"))
self.btnDiv.grid(row=2, column=3, sticky=tk.NW+tk.SE)
self.btnMod = tk.Button(self.window, width=20, text="%", font=12, command=lambda:self.pressArithm(
"%"))
self.btnMod.grid(row=1, column=3, sticky=tk.NW+tk.SE)
# ------- setup special operation buttons ---------
self.btnRoot = tk.Button(self.window, width=20, text="\u221A", font=12, command=lambda:self.pressRoot())
self.btnRoot.grid(row=1, column=4, sticky=tk.NW+tk.SE)
self.btnSquare = tk.Button(self.window, width=20, text="x\u00B2", font=12, command=lambda:self.pressSquare())
self.btnSquare.grid(row=2, column=4, sticky=tk.NW+tk.SE)
self.btnCube = tk.Button(self.window, width=20, text="x\u00B3", font=12, command=lambda:self.pressCube())
self.btnCube.grid(row=3, column=4, sticky=tk.NW+tk.SE)
self.btnFact = tk.Button(self.window, width=20, text="n!", font=12, command=lambda:self.pressFact())
self.btnFact.grid(row=4, column=4, sticky=tk.NW+tk.SE)
# -------- setup buttons of other operations ---------
self.btnEqu = tk.Button(self.window, width=20, text="=", font=12, command=lambda:self.pressEqu(""))
self.btnEqu.grid(row=5, column=4, sticky=tk.NW+tk.SE)
self.btnDec = tk.Button(self.window, width=20, text=".", font=12, command=lambda:self.pressDec())
self.btnDec.grid(row=5, column=2, sticky=tk.NW+tk.SE)
self.btnClear = tk.Button(self.window, width=20, text="AC", font=12, command=lambda:self.pressClear())
self.btnClear.grid(row=1, column=0, sticky=tk.NW+tk.SE)
self.btnMinus = tk.Button(self.window, width=20, text="+/-", font=12, command=lambda:self.pressMinus())
self.btnMinus.grid(row=1, column=2, sticky=tk.NW+tk.SE)
self.btnErase = tk.Button(self.window, width=20, text="\u232B", font=12, command=lambda:self.pressErase())
self.btnErase.grid(row=1, column=1, sticky=tk.NW+tk.SE)
# ------------------ method of button events -------------------------
# handling the button events of numbers
def pressNum(self, strNum):
printLog("[I][pressNum] The button %s has been pressed" % strNum)
# if the expression has been evaluated, reset the experssion to strNum
if self.bEvaluated:
self.strExpr = strNum
self.strEqua.set(self.strExpr)
self.bEvaluated = False
return
# if the expression is single digit
if len(self.strExpr) < 2:
# if the expression is 0, simply change it to strNum
if self.strExpr == "0":
self.strExpr = strNum
# else, concatenation the expression and strNum
else:
self.strExpr += strNum
# if the length of expression >= 2
else:
# make sure there can not be equation like 3+02, should be 3+2
if self.hasOp(self.strExpr[-2]) and self.strExpr[-1] == "0":
self.strExpr = self.strExpr[:-1] + strNum
# concatenation the expression and pressed button var
else:
self.strExpr += strNum
self.strEqua.set(self.strExpr)
# handling the alrithmatic buttons
def pressArithm(self, strOp):
printLog("[I][pressArithm] The button %s has been pressed" % strOp)
# if the last char is op or ".", repalace with strOp
if self.hasOp(self.strExpr[-1]) or self.strExpr[-1] == ".":
self.strExpr = self.strExpr[:-1] + strOp
# if the op is in the expression and not in the last pos, do calculation
elif self.hasOp(self.strExpr):
self.pressEqu("pressArithm")
self.strExpr += strOp
# concatenation the expression and alrithmatic button
else:
self.strExpr += strOp
self.strEqua.set(self.strExpr)
# There must be an operator in the expression after the action
# Therefore, there must be an evaluation after
self.bEvaluated = False
def pressRoot(self):
printLog("[I][pressRoot] The button \u221A has been pressed")
try:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
# split expression by ops for examle, 123+4 goes to [123, 4]. Then
# get the number last number(4) and calculate
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(math.sqrt(eval(strLast)))
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except OverflowError as e:
printLog("[W][pressRoot] The \u221A operation will go overflow")
messagebox.showinfo("Error", e)
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressRoot] Unexpected Error: " + e)
def pressSquare(self):
printLog("[I][pressSquare] The button x\u00B2 has been pressed")
if self.isTooLarge():
printLog("[W][pressSquare] The number is out of limit")
messagebox.showinfo("Warning", "Inf")
self.strExpr = "0"
else:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(eval(strLast)**2)
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
def pressCube(self):
printLog("[I][pressCube] The button x\u00B3 has been pressed")
if self.isTooLarge():
printLog("[W][pressCube] The number is out of limit")
messagebox.showinfo("Warning", "Inf")
self.strExpr = "0"
else:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(eval(strLast)**3)
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
def pressFact(self):
printLog("[I][pressFact] The button n! has been pressed")
try:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
# if the value > 100,000, return to default value
if eval(strLast) > 1E5:
printLog("[W][pressFact] The factorial number is out of limit")
messagebox.showinfo("Error", "The factorial number is out of limit")
self.strExpr = "0"
else:
strVal = str(math.factorial(eval(strLast)))
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except ValueError as e:
printLog("[W][pressFact] The factorial number is out of limit")
messagebox.showinfo("Error", e)
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressFact] Unexpected Error: " + e)
#print(e)
def pressEqu(self, strCaller):
# check caller, "" for user press; ohterwise for called by function
if strCaller == "":
printLog("[I][pressEqu] The button = has been pressed")
else:
printLog("[I][pressEqu] PressEqu has been called by %s" % strCaller)
try:
# evaluate the expression
self.strExpr = str(eval(self.strExpr))
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except ZeroDivisionError:
printLog("[W][pressEqu] Action involves zero division")
messagebox.showinfo("Error", "Can not divide by zero") # tkinter.messagebox
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
# deal with invalid expression such as 8*(*(*(, then return default value
except SyntaxError:
printLog("[W][pressEqu] The expression is incomplete")
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressEqu] Unexpected Error: " + e)
#print("Unexpected Error: " + e)
def pressDec(self):
printLog("[I][pressDec] The button . has been pressed")
# if the expression has been evaluated, reset the experssion to 0
if self.bEvaluated:
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
return
# if the last char is operator
if self.hasOp(self.strExpr[-1]):
# if there is already "." in expression, replace op with nothing
if "." in self.strExpr:
self.strExpr = self.strExpr[:-1]
# otherwise, replace op wiht "."
else:
self.strExpr = self.strExpr[:-1] + "."
# make sure there can be two floating numbers in the expression. e.g. 3.2 + 6.4
# if three is "." in the expression after spliting by ops, do noting
elif "." in re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]:
return
# otherewise, add decimal point to the expression
else:
self.strExpr = self.strExpr + "."
self.strEqua.set(self.strExpr)
def pressClear(self):
printLog("[I][pressClaer] The button AC has been pressed")
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
def pressMinus(self):
printLog("[I][pressMinus] The button +/- has been pressed")
if self.strExpr[0] == "-":
self.strExpr = self.strExpr[1:]
else:
self.strExpr = "-" + self.strExpr[0:]
self.strEqua.set(self.strExpr)
def pressErase(self):
printLog("[I][pressErase] The button \u232B has been pressed")
# if the expression is single digit or something else, set to 0(default)
if len(self.strExpr) < 2:
self.strExpr = "0"
else:
self.strExpr = self.strExpr[:-1]
self.strEqua.set(self.strExpr)
# ------------------ end of method of button events -----------------------
def isTooLarge(self):
return True if len(self.strExpr) > 200 else | |
= value
value_columns = reference_curve.options["value_column_names"]
uncertainty_columns = reference_curve.options["uncertainty_column_names"]
number_plots = len(value_columns)
number_columns = int(plot_options["plots_per_column"])
number_rows = int(round(float(number_plots) / float(number_columns)))
fig, reference_axes = plt.subplots(nrows=number_rows, ncols=number_columns,
sharex=plot_options["share_x"],
figsize=plot_options["plot_size"],
dpi=plot_options["dpi"])
x_data = reference_curve[plot_options["independent_axis_column_name"]]
for axes_index, ax in enumerate(reference_axes.flat):
y_data = np.array(reference_curve[value_columns[axes_index]])
error = np.array(reference_curve[uncertainty_columns[axes_index]])
ax.plot(x_data, y_data, plot_options["plot_format"])
ax.fill_between(x_data, y_data - error, y_data + error,
color=plot_options["fill_color"],
alpha=plot_options["fill_opacity"],
edgecolor=plot_options["fill_edge_color"])
ax.set_title(value_columns[axes_index])
plt.tight_layout()
# Dealing with the save option
if plot_options["file_name"] is None:
file_name = auto_name(specific_descriptor=plot_options["specific_descriptor"],
general_descriptor=plot_options["general_descriptor"],
directory=plot_options["directory"], extension='png', padding=3)
else:
file_name = plot_options["file_name"]
if plot_options["save_plot"]:
# print file_name
plt.savefig(os.path.join(plot_options["directory"], file_name))
else:
plt.show()
return fig
def plot_reference_curve_comparison(reference_curve_list, **options):
"""Plots a list of frequency based reference curves
by using the options value_column_names and uncertainty_column_names.
Options """
defaults = {"display_legend": False,
"save_plot": False,
"directory": os.getcwd(),
"specific_descriptor": "Reference_Curve",
"general_descriptor": "Plot",
"file_name": None,
"plots_per_column": 2,
"plot_format": '-',
"fill_color": 'k',
"fill_opacity": .25,
"fill_edge_color": 'k',
"plot_size": (8, 10),
"dpi": 80,
"independent_axis_column_name": "Frequency",
"share_x": "col",
"labels":None}
plot_options = {}
for key, value in defaults.items():
plot_options[key] = value
for key, value in options.items():
plot_options[key] = value
if plot_options["labels"]:
labels=plot_options["labels"]
else:
labels=[x.path for x in reference_curve_list]
value_columns = reference_curve_list[0].options["value_column_names"]
uncertainty_columns = reference_curve_list[0].options["uncertainty_column_names"]
number_plots = len(value_columns)
number_columns = int(plot_options["plots_per_column"])
number_rows = int(round(float(number_plots) / float(number_columns)))
fig, reference_axes = plt.subplots(nrows=number_rows, ncols=number_columns,
sharex=plot_options["share_x"],
figsize=plot_options["plot_size"],
dpi=plot_options["dpi"])
for index,reference_curve in enumerate(reference_curve_list[:]):
value_columns = reference_curve.options["value_column_names"]
uncertainty_columns = reference_curve.options["uncertainty_column_names"]
x_data = reference_curve[plot_options["independent_axis_column_name"]]
for axes_index, ax in enumerate(reference_axes.flat):
y_data = np.array(reference_curve[value_columns[axes_index]])
error = np.array(reference_curve[uncertainty_columns[axes_index]])
ax.plot(x_data, y_data, plot_options["plot_format"],label=labels[index])
ax.fill_between(x_data, y_data - error, y_data + error,
color=plot_options["fill_color"],
alpha=plot_options["fill_opacity"],
edgecolor=plot_options["fill_edge_color"])
ax.set_title(value_columns[axes_index])
plt.tight_layout()
if plot_options["display_legend"]:
plt.legend()
# Dealing with the save option
if plot_options["file_name"] is None:
file_name = auto_name(specific_descriptor=plot_options["specific_descriptor"],
general_descriptor=plot_options["general_descriptor"],
directory=plot_options["directory"], extension='png', padding=3)
else:
file_name = plot_options["file_name"]
if plot_options["save_plot"]:
# print file_name
plt.savefig(os.path.join(plot_options["directory"], file_name))
else:
plt.show()
return fig
def calrep(raw_model,**options):
""" Performs the calrep analysis routine on a raw data format (such as OnePortRawModel, TwoPortRawModel,PowerRawModel)
Differs from the HP BASIC program in that it keeps the metadata Needs to be checked, returns 4 error terms for power
Also does not calculate all the same rows for power, expansion factor is set to 2, requires that the raw model
has the attribute raw_model.metadata["Connector_Type_Measurement"] defined. If the columns passed in raw_model
do not have repeat values or contain text the result will set connect uncertainty to zero"""
try:
mean_file=frequency_model_collapse_multiple_measurements(raw_model)
except:
mean_file=raw_model
try:
standard_deviation_file=frequency_model_collapse_multiple_measurements(raw_model,method="std")
except:
std_data=[]
for row in mean_file.data:
new_row=[]
for column in mean_file.data[0]:
new_row.append(0)
std_data.append(new_row)
standard_deviation_file=AsciiDataTable(None,column_names=raw_model.column_names,
data=std_data,column_types=raw_model.options["column_types"])
if "Direction" in mean_file.column_names and "Connect" in mean_file.column_names:
mean_file.remove_column("Direction")
mean_file.remove_column("Connect")
if "Direction" in standard_deviation_file.column_names and "Connect" in standard_deviation_file.column_names:
standard_deviation_file.remove_column("Direction")
standard_deviation_file.remove_column("Connect")
new_data=[]
new_column_names=[]
expansion_factor=2
frequency_index=mean_file.column_names.index("Frequency")
for row_index,row in enumerate(mean_file.data[:]):
new_data_row=[]
for column_index,column_name in enumerate(mean_file.column_names[:]):
if re.search("frequency",column_name,re.IGNORECASE):
if row_index==0:
new_column_names.append("Frequency")
new_data_row.append(row[column_index])
else:
if re.search("mag",column_name,re.IGNORECASE):
error_selector=0
error_letter="M"
error_parameter=column_name.replace("mag","")
elif re.search("arg|phase",column_name,re.IGNORECASE):
error_selector=1
error_letter="A"
error_parameter=column_name.replace("arg","")
elif re.search("Eff",column_name,re.IGNORECASE):
error_selector=0
error_letter="E"
error_parameter=""
else:
error_selector=0
if row_index==0:
# If this is the first row build the column names list
new_column_names.append(column_name)
new_column_names.append("u"+error_letter+"b"+error_parameter)
new_column_names.append("u"+error_letter+"a"+error_parameter)
new_column_names.append("u"+error_letter+"d"+error_parameter)
new_column_names.append("u"+error_letter+"g"+error_parameter)
# Mean Value
new_data_row.append(row[column_index])
# Type B
ub=type_b(wr_connector_type=mean_file.metadata["Connector_Type_Measurement"],
frequency=row[frequency_index],parameter=column_name,magnitude=row[column_index],format="mag")
#print("{0} is {1}".format("ub",ub))
new_data_row.append(ub[error_selector])
# Type A or SNIST
ua=S_NIST(wr_connector_type=mean_file.metadata["Connector_Type_Measurement"],
frequency=row[frequency_index],parameter=column_name,magnitude=row[column_index],format="mag")
new_data_row.append(ua[error_selector])
# Standard Deviation
ud=standard_deviation_file.data[row_index][column_index]
new_data_row.append(ud)
# Total Uncertainty
#print(" ua is {0}, ub is {1} and ud is {2}".format(ua,ub,ud))
total_uncertainty=expansion_factor*math.sqrt(ua[error_selector]**2+ub[error_selector]**2+ud**2)
new_data_row.append(total_uncertainty)
new_data.append(new_data_row)
sorted_keys=sorted(mean_file.metadata.keys())
header=["{0} = {1}".format(key,mean_file.metadata[key]) for key in sorted_keys]
column_types=["float" for column in new_column_names]
#todo: Add value_column_names and uncertainty_column_names to conform to reference curve
calrep=AsciiDataTable(None,data=new_data,column_types=column_types,
column_names=new_column_names,header=header,
metadata=mean_file.metadata)
return calrep
def one_port_robin_comparison_plot(input_asc_file,input_res_file,**options):
"""one_port_robin_comparison_plot plots a one port.asc file against a given .res file,
use device_history=True in options to show device history"""
defaults={"device_history":False,"mag_res":False}
plot_options={}
for key,value in defaults.items():
plot_options[key]=value
for key,value in options.items():
plot_options[key]=value
history=np.loadtxt(input_res_file,skiprows=1)
column_names=["Frequency",'magS11','argS11','magS11N','argS11N','UmagS11N','UargS11N']
options={"data":history.tolist(),"column_names":column_names,"column_types":['float' for column in column_names]}
history_table=AsciiDataTable(None,**options)
table=OnePortCalrepModel(input_asc_file)
if plot_options["device_history"]:
history_frame=pandas.read_csv(ONE_PORT_DUT)
device_history=history_frame[history_frame["Device_Id"]==table.header[0].rstrip().lstrip()]
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.errorbar(history_table.get_column('Frequency'),history_table.get_column('magS11N'),fmt='k--',
yerr=history_table.get_column('UmagS11N'),label="History")
ax0.errorbar(table.get_column('Frequency'),table.get_column('magS11'),
yerr=table.get_column('uMg'),fmt='ro',label="Current Measurement",alpha=.3)
if plot_options["device_history"]:
ax0.errorbar(device_history['Frequency'].tolist(),device_history['magS11'].tolist(),fmt='bs',
yerr=device_history['uMg'].tolist(),label="From .asc", alpha=.5)
if plot_options["mag_res"]:
ax0.errorbar(history_table.get_column('Frequency'),history_table.get_column('mag'),fmt='gx',
yerr=history_table.get_column('UmagS11N'),label="From mag in res")
ax0.set_title('Magnitude S11')
ax1.errorbar(history_table.get_column('Frequency'),history_table.get_column('arg'),fmt='k--',
yerr=history_table.get_column('UargS11N'),label="history")
ax1.errorbar(table.get_column('Frequency'),table.get_column('arg'),
yerr=table.get_column('uAg'),fmt='ro',label="Current Measurement",alpha=.3)
if plot_options["device_history"]:
ax1.errorbar(device_history['Frequency'].tolist(),device_history['arg'].tolist(),fmt='bs',
yerr=device_history['uAg'].tolist(),label="From .asc", alpha=.5)
ax1.set_title('Phase S11')
ax0.legend(loc='lower left', shadow=True)
plt.show()
return fig
def two_port_swap_ports(complex_data):
"""Accepts data in [[frequency, S11, S21, S12, S22]..] format and returns
[[frequency, S22, S12, S21, S11]..]"""
out_data=[]
for row in complex_data:
[frequency, S11, S21, S12, S22]=row
new_row=[frequency, S22, S12, S21, S11]
out_data.append(new_row)
return out_data
def two_port_complex_to_matrix_form(complex_data):
"""two_port_complex_to_matrix_form takes a list of [[frequency,S11,S21,S12,S22],..] and
returns a list in the
form [[frequency,np.matrix([[S11,S12],[S21,S22]])]..], it is meant to prepare data for correction"""
out_list=[]
for row in complex_data:
frequency=row[0]
[S11,S21,S12,S22]=row[1:]
m=np.matrix([[S11,S12],[S21,S22]])
out_list.append([frequency,m])
#print out_list
return out_list
def two_port_matrix_to_complex_form(matrix_form_data):
"""two_port_matrix_to_complex_form takes a list of [[frequency,np.matrix([[S11,S12],[S21,S22]])]..]
and returns a list in the
form [[frequency,S11,S21,S12,S22],..] , it is meant to undo two_port_complex_to_matrix_form"""
out_list=[]
for row in matrix_form_data:
frequency=row[0]
m=row[1]
[S11,S21,S12,S22]=[m[0,0],m[1,0],m[0,1],m[1,1]]
out_list.append([frequency,S11,S12,S21,S22])
return out_list
def invert_two_port_matrix_list(two_port_matrix_form):
"""invert_two_port_matrix_list inverts all elements in the list two_port_matrix_form,
which is in the format [[frequency,np.matrix([[S11,S12],[S21,S22]])]..] and returns a list
in [[frequency,inv(np.matrix([[S11,S12],[S21,S22]]))]..] format works on any list in the form [value, matrix]
"""
out_list=[]
for row in two_port_matrix_form:
frequency=row[0]
m=row[1]
m_inv=np.linalg.inv(m)
out_list.append([frequency,m_inv])
return out_list
def polar_average(complex_number_1,complex_number_2):
"""Averages 2 complex numbers in polar coordinates and returns a single complex number"""
polar_number_1=cmath.polar(complex_number_1)
polar_number_2=cmath.polar(complex_number_2)
average_length=(polar_number_1[0]+polar_number_2[0])/2.
average_phase=(polar_number_1[1]+polar_number_2[1])/2.
out_value=cmath.rect(average_length,average_phase)
return out_value
def polar_geometric_average(complex_number_1,complex_number_2):
"""Averages 2 complex numbers in polar coordinates and returns a single complex number"""
polar_number_1=cmath.polar(complex_number_1)
polar_number_2=cmath.polar(complex_number_2)
average_length=(polar_number_1[0]*polar_number_2[0])**.5
average_phase=(polar_number_1[1]+polar_number_2[1])/2
out_value=cmath.rect(average_length,average_phase-math.pi)
return out_value
def S_to_T(S_list):
"""Converts S-parameters into a T Matrix. Input form should be in frequency, np.matrix([[S11,S12],[S21,S22]])
format. Returns a list in [frequency, np.matrix] format """
t_complex_list=[]
t_matrix=[]
for row in S_list:
frequency=row[0]
m=row[1]
T11=-np.linalg.det(m)/m[1,0]
T12=m[0,0]/m[1,0]
T21=-m[1,1]/m[1,0]
T22=1/m[1,0]
t_matrix.append([frequency,np.matrix([[T11,T12],[T21,T22]])])
t_complex_list.append([frequency,T11,T12,T21,T22])
return t_matrix
def T_to_S(T_list):
"""Converts T Matrix into S parameters. Input form should be in frequency, np.matrix([[T11,T12],[T21,T22]])
format. Returns a list in [frequency, np.matrix] format."""
S_list=[]
for row in T_list:
frequency=row[0]
m=row[1]
S11=m[0,1]/m[1,1]
S12=np.linalg.det(m)/m[1,1]
S21=1/m[1,1]
S22=-m[1,0]/m[1,1]
S_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
return S_list
def unwrap_phase(phase_list):
"""unwrap_phase returns an unwraped phase list given a wraped phase list,
assumed units are degrees """
unwrapped_phase_list=[]
phase_list_copy=phase_list[:]
i=1
n=0
while(i+1<len(phase_list)):
if abs(phase_list[i]-phase_list[i-1])>90:
if phase_list[i]-phase_list[i-1]>0:
n+=1
else:
n-=1
phase_list_copy[i]=phase_list_copy[i+1]-n*360
phase_list_copy[i+1]=phase_list_copy[i+1]-n*360
i+=1
return phase_list_copy
def correct_sparameters_eight_term(sparameters_complex,eight_term_correction,reciprocal=True):
"""Applies the eight term correction to sparameters_complex and returns
a correct complex list in the form of [[frequency,S11,S21,S12,S22],..]. The eight term
correction should be in the form [[frequency,S1_11,S1_21,S1_12,S1_22,S2_11,S2_21,S2_12,S2_22]..]
Use s2p.sparameter_complex as input."""
# first transform both lists to matrices
s2p_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
s1_list=[[row[0],row[1],row[2],row[3],row[4]] for row in eight_term_correction]
s2_list=[[row[0],row[5],row[6],row[7],row[8]] for row in eight_term_correction]
s1_matrix_list=two_port_complex_to_matrix_form(s1_list)
s2_matrix_list=two_port_complex_to_matrix_form(s2_list)
# now transform to T matrices
t_matrix_list=S_to_T(s2p_matrix_list)
x_matrix_list=S_to_T(s1_matrix_list)
y_matrix_list=S_to_T(s2_matrix_list)
# now invert x
x_inverse_matrix_list=invert_two_port_matrix_list(x_matrix_list)
y_inverse_matrix_list=invert_two_port_matrix_list(y_matrix_list)
# now apply the correction
t_corrected_list=[]
for index,row in enumerate(t_matrix_list):
frequency=row[0]
t_corrected=x_inverse_matrix_list[index][1]*row[1]*y_inverse_matrix_list[index][1]
t_corrected_list.append([frequency,t_corrected])
# now transform back to S
s_corrected_matrix_list =T_to_S(t_corrected_list)
# now put back into single row form
s_corrected_list=two_port_matrix_to_complex_form(s_corrected_matrix_list)
# now we take the geometric average and replace S12 and S21 with it
if reciprocal:
s_averaged_corrected=[]
phase_last=0
for row in s_corrected_list:
[frequency,S11,S21,S12,S22]=row
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
s_averaged_corrected.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
phase_last=cmath.phase(mean_S12_S21)
s_corrected_list=s_averaged_corrected
else:
pass
return s_corrected_list
def uncorrect_sparameters_eight_term(sparameters_complex,eight_term_correction,reciprocal=True):
"""Removes the eight term correction to sparameters_complex and returns
a uncorrected (reference plane is measurement)
complex list in the form of [[frequency,S11,S21,S12,S22],..]. The eight term
correction should be in the form [[frequency,S1_11,S1_21,S1_12,S1_22,S2_11,S2_21,S2_12,S2_22]..]
Use s2p.sparameter_complex as input."""
# first transform both lists to matrices
s2p_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
s1_list=[[row[0],row[1],row[2],row[3],row[4]] for row in eight_term_correction]
s2_list=[[row[0],row[5],row[6],row[7],row[8]] for row in eight_term_correction]
s1_matrix_list=two_port_complex_to_matrix_form(s1_list)
s2_matrix_list=two_port_complex_to_matrix_form(s2_list)
# now transform to T matrices
t_matrix_list=S_to_T(s2p_matrix_list)
x_matrix_list=S_to_T(s1_matrix_list)
y_matrix_list=S_to_T(s2_matrix_list)
# now apply the correction
t_uncorrected_list=[]
for index,row in enumerate(t_matrix_list):
frequency=row[0]
t_corrected=x_matrix_list[index][1]*row[1]*y_matrix_list[index][1]
t_uncorrected_list.append([frequency,t_corrected])
# now transform back to S
s_uncorrected_matrix_list =T_to_S(t_uncorrected_list)
# now put back into single row form
s_uncorrected_list=two_port_matrix_to_complex_form(s_uncorrected_matrix_list)
# now we take the geometric average and replace S12 and S21 with it
if reciprocal:
s_averaged_corrected=[]
phase_last=0
for row in s_uncorrected_list:
[frequency,S11,S21,S12,S22]=row
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
s_averaged_corrected.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
phase_last=cmath.phase(mean_S12_S21)
s_uncorrected_list=s_averaged_corrected
else:
pass
return s_uncorrected_list
def correct_sparameters_sixteen_term(sparameters_complex,sixteen_term_correction):
"""Applies the sixteen term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The sixteen term correction | |
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
import numpy as np
import json
import os
import sys
import cv2
import copy
import paddlex.utils.logging as logging
# fix linspace problem for pycocotools while numpy > 1.17.2
backup_linspace = np.linspace
def fixed_linspace(start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
axis=0):
num = int(num)
return backup_linspace(start, stop, num, endpoint, retstep, dtype, axis)
def eval_results(results,
metric,
coco_gt,
with_background=True,
resolution=None,
is_bbox_normalized=False,
map_type='11point'):
"""Evaluation for evaluation program results"""
box_ap_stats = []
coco_gt_data = copy.deepcopy(coco_gt)
eval_details = {'gt': copy.deepcopy(coco_gt.dataset)}
if metric == 'COCO':
np.linspace = fixed_linspace
if 'proposal' in results[0]:
proposal_eval(results, coco_gt_data)
if 'bbox' in results[0]:
box_ap_stats, xywh_results = coco_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized)
if 'mask' in results[0]:
mask_ap_stats, segm_results = mask_eval(results, coco_gt_data,
resolution)
ap_stats = [box_ap_stats, mask_ap_stats]
eval_details['bbox'] = xywh_results
eval_details['mask'] = segm_results
return ap_stats, eval_details
np.linspace = backup_linspace
else:
if 'accum_map' in results[-1]:
res = np.mean(results[-1]['accum_map'][0])
logging.debug('mAP: {:.2f}'.format(res * 100.))
box_ap_stats.append(res * 100.)
elif 'bbox' in results[0]:
box_ap, xywh_results = voc_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized,
map_type=map_type)
box_ap_stats.append(box_ap)
eval_details['bbox'] = xywh_results
return box_ap_stats, eval_details
def proposal_eval(results, coco_gt, outputfile, max_dets=(100, 300, 1000)):
assert 'proposal' in results[0]
assert outfile.endswith('.json')
xywh_results = proposal2out(results)
assert len(
xywh_results) > 0, "The number of valid proposal detected is zero.\n \
Please use reasonable model and check input data."
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
cocoapi_eval(xywh_results, 'proposal', coco_gt=coco_gt, max_dets=max_dets)
# flush coco evaluation result
sys.stdout.flush()
def coco_bbox_eval(results,
coco_gt,
with_background=True,
is_bbox_normalized=False):
assert 'bbox' in results[0]
from pycocotools.coco import COCO
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
xywh_results = bbox2out(
results, clsid2catid, is_bbox_normalized=is_bbox_normalized)
results = copy.deepcopy(xywh_results)
if len(xywh_results) == 0:
logging.warning(
"The number of valid bbox detected is zero.\n Please use reasonable model and check input data.\n stop eval!"
)
return [0.0], results
map_stats = cocoapi_eval(xywh_results, 'bbox', coco_gt=coco_gt)
# flush coco evaluation result
sys.stdout.flush()
return map_stats, results
def loadRes(coco_obj, anns):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
import time
res = COCO()
res.dataset['images'] = [img for img in coco_obj.dataset['images']]
tic = time.time()
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
[ann['image_id'] for ann in anns])
res.dataset['images'] = [
img for img in res.dataset['images'] if img['id'] in imgIds
]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1 - x0) * (y1 - y0)
ann['id'] = id + 1
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
res.dataset['annotations'] = anns
res.createIndex()
return res
def mask_eval(results, coco_gt, resolution, thresh_binarize=0.5):
assert 'mask' in results[0]
from pycocotools.coco import COCO
clsid2catid = {i + 1: v for i, v in enumerate(coco_gt.getCatIds())}
segm_results = mask2out(results, clsid2catid, resolution, thresh_binarize)
results = copy.deepcopy(segm_results)
if len(segm_results) == 0:
logging.warning(
"The number of valid mask detected is zero.\n Please use reasonable model and check input data."
)
return None, results
map_stats = cocoapi_eval(segm_results, 'segm', coco_gt=coco_gt)
return map_stats, results
def cocoapi_eval(anns,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
anns: Evaluation result.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logging.debug("Start evaluate...")
coco_dt = loadRes(coco_gt, anns)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
def proposal2out(results, is_bbox_normalized=False):
xywh_res = []
for t in results:
bboxes = t['proposal'][0]
lengths = t['proposal'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
assert len(lengths) == im_ids.size
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
xmin, ymin, xmax, ymax = dt.tolist()
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': 1,
'bbox': bbox,
'score': 1.0
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def bbox2out(results, clsid2catid, is_bbox_normalized=False):
"""
Args:
results: request a dict, should include: `bbox`, `im_id`,
if is_bbox_normalized=True, also need `im_shape`.
clsid2catid: class id to category id map of COCO2017 dataset.
is_bbox_normalized: whether or not bbox is normalized.
"""
xywh_res = []
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
clsid, score, xmin, ymin, xmax, ymax = dt.tolist()
catid = (clsid2catid[int(clsid)])
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
im_shape = t['im_shape'][0][i].tolist()
im_height, im_width = int(im_shape[0]), int(im_shape[1])
xmin *= im_width
ymin *= im_height
w *= im_width
h *= im_height
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': catid,
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
import pycocotools.mask as mask_util
scale = (resolution + 2.0) / resolution
segm_res = []
# for each batch
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0])
if bboxes.shape == (1, 1) or bboxes is None:
continue
if len(bboxes.tolist()) == 0:
continue
masks = t['mask'][0]
s = 0
# for each sample
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i][0])
im_shape = t['im_shape'][0][i]
bbox = bboxes[s:s + num][:, 2:]
clsid_scores = bboxes[s:s + num][:, 0:2]
mask = masks[s:s + num]
s += num
im_h = int(im_shape[0])
im_w = int(im_shape[1])
expand_bbox = expand_boxes(bbox, scale)
expand_bbox = expand_bbox.astype(np.int32)
padded_mask = np.zeros((resolution + 2, resolution + 2),
dtype=np.float32)
for j in range(num):
xmin, ymin, xmax, ymax = expand_bbox[j].tolist()
clsid, score = clsid_scores[j].tolist()
clsid = int(clsid)
padded_mask[1:-1, 1:-1] = mask[j, clsid, :, :]
catid = clsid2catid[clsid]
w = xmax - xmin + 1
h = ymax - ymin + 1
w = np.maximum(w, | |
'exclude_expired' in params:
query_params.append(('exclude_expired', params['exclude_expired'])) # noqa: E501
if 'limit' in params:
query_params.append(('_limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('_offset', params['offset'])) # noqa: E501
if 'sort' in params:
query_params.append(('_sort', params['sort'])) # noqa: E501
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/coupon/coupons', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_coupons_by_query(self, coupon_query, **kwargs): # noqa: E501
"""Retrieve coupons by query # noqa: E501
Retrieves coupons from the account. If no parameters are specified, all coupons will be returned. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupons_by_query(coupon_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CouponQuery coupon_query: Coupon query (required)
:param int limit: The maximum number of records to return on this one API call. (Max 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the coupons. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_coupons_by_query_with_http_info(coupon_query, **kwargs) # noqa: E501
else:
(data) = self.get_coupons_by_query_with_http_info(coupon_query, **kwargs) # noqa: E501
return data
def get_coupons_by_query_with_http_info(self, coupon_query, **kwargs): # noqa: E501
"""Retrieve coupons by query # noqa: E501
Retrieves coupons from the account. If no parameters are specified, all coupons will be returned. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_coupons_by_query_with_http_info(coupon_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CouponQuery coupon_query: Coupon query (required)
:param int limit: The maximum number of records to return on this one API call. (Max 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the coupons. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon_query', 'limit', 'offset', 'sort', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coupons_by_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon_query' is set
if ('coupon_query' not in params or
params['coupon_query'] is None):
raise ValueError("Missing the required parameter `coupon_query` when calling `get_coupons_by_query`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('_limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('_offset', params['offset'])) # noqa: E501
if 'sort' in params:
query_params.append(('_sort', params['sort'])) # noqa: E501
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'coupon_query' in params:
body_params = params['coupon_query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/coupon/coupons/query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_editor_values(self, **kwargs): # noqa: E501
"""Retrieve values needed for a coupon editor # noqa: E501
Retrieve values needed for a coupon editor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_editor_values(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: CouponEditorValues
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_editor_values_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_editor_values_with_http_info(**kwargs) # noqa: E501
return data
def get_editor_values_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve values needed for a coupon editor # noqa: E501
Retrieve values needed for a coupon editor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_editor_values_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: CouponEditorValues
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_editor_values" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/coupon/editor_values', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CouponEditorValues', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_coupon(self, coupon, **kwargs): # noqa: E501
"""Insert a coupon # noqa: E501
Insert a coupon on the UltraCart account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_coupon(coupon, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Coupon coupon: Coupon to insert (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_coupon_with_http_info(coupon, **kwargs) # noqa: E501
else:
(data) = self.insert_coupon_with_http_info(coupon, **kwargs) # noqa: E501
return data
def insert_coupon_with_http_info(self, coupon, **kwargs): # noqa: E501
"""Insert a coupon # noqa: E501
Insert a coupon on the UltraCart account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_coupon_with_http_info(coupon, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Coupon coupon: Coupon to insert (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: CouponResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['coupon', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_coupon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'coupon' is set
if ('coupon' not in params or
params['coupon'] is None):
raise ValueError("Missing the required parameter `coupon` when calling `insert_coupon`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'coupon' in params:
body_params = params['coupon']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
| |
revision_history_limit: Optional[int]
paused: Optional[bool]
progress_deadline_seconds: Optional[int]
restart_at: Optional[datetime.datetime]
strategy: Any
class RolloutBlueGreenStatus(RolloutBaseModel):
active_selector: Optional[str]
post_promotion_analysis_run: Optional[str]
post_promotion_analysis_run_status: Any
pre_promotion_analysis_run: Optional[str]
pre_promotion_analysis_run_status: Any
preview_selector: Optional[str]
previous_active_selector: Optional[str]
scale_down_delay_start_time: Optional[datetime.datetime]
scale_up_preview_check_point: Optional[bool]
class RolloutStatusCondition(RolloutBaseModel):
last_transition_time: datetime.datetime
last_update_time: datetime.datetime
message: str
reason: str
status: str
type: str
class RolloutStatus(RolloutBaseModel):
hpa_replicas: Optional[int] = pydantic.Field(..., alias="HPAReplicas")
abort: Optional[bool]
aborted_at: Optional[datetime.datetime]
available_replicas: Optional[int]
blue_green: RolloutBlueGreenStatus
canary: Any # TODO type this out if connector needs to interact with it
collision_count: Optional[int]
conditions: List[RolloutStatusCondition]
controller_pause: Optional[bool]
current_pod_hash: str
current_step_hash: Optional[str]
current_step_index: Optional[int]
observed_generation: str
pause_conditions: Any
ready_replicas: Optional[int]
replicas: Optional[int]
restarted_at: Optional[datetime.datetime]
selector: str
stable_RS: Optional[str]
updated_replicas: Optional[int]
class RolloutObj(RolloutBaseModel): # TODO is this the right base to inherit from?
api_version: str
kind: str
metadata: RolloutV1ObjectMeta
spec: RolloutSpec
status: Optional[RolloutStatus]
# TODO expose to config if needed
ROLLOUT_GROUP = "argoproj.io"
ROLLOUT_VERSION = "v1alpha1"
ROLLOUT_PURAL = "rollouts"
class Rollout(KubernetesModel):
"""Wrapper around an ArgoCD Kubernetes `Rollout` Object.
The actual instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Rollout`.
.. Rollout:
https://argoproj.github.io/argo-rollouts/features/specification/
"""
obj: RolloutObj
workload_ref_controller: Optional[Deployment] = None
_rollout_const_args: Dict[str, str] = dict(
group=ROLLOUT_GROUP,
version=ROLLOUT_VERSION,
plural=ROLLOUT_PURAL,
)
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.CustomObjectsApi,
f"{ROLLOUT_GROUP}/{ROLLOUT_VERSION}":kubernetes_asyncio.client.CustomObjectsApi,
}
async def create(self, namespace: str = None) -> None:
"""Create the Rollout under the given namespace.
Args:
namespace: The namespace to create the Rollout under.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(
f'creating rollout "{self.name}" in namespace "{namespace}"'
)
self.logger.debug(f"rollout: {self.obj}")
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.create_namespaced_custom_object(
namespace=namespace,
body=self.obj.dict(by_alias=True, exclude_none=True),
**self._rollout_const_args,
))
@classmethod
async def read(cls, name: str, namespace: str) -> "Rollout":
"""Read a Rollout by name under the given namespace.
Args:
name: The name of the Rollout to read.
namespace: The namespace to read the Rollout from.
"""
async with cls.preferred_client() as api_client:
obj = await api_client.get_namespaced_custom_object(
namespace=namespace,
name=name,
**cls._rollout_const_args,
)
rollout = Rollout(RolloutObj.parse_obj(obj))
if rollout.obj.spec.workload_ref:
await rollout.read_workfload_ref(namespace=namespace)
return rollout
async def read_workfload_ref(self, namespace: str) -> None:
if self.obj.spec.workload_ref.kind != "Deployment":
raise RuntimeError(f"Rollout integration does not currently support workloadRef kind of {self.obj.spec.workload_ref.kind}")
self.workload_ref_controller = await Deployment.read(
name=self.obj.spec.workload_ref.name,
namespace=namespace
)
if not self.workload_ref_controller:
raise ValueError(
f'cannot read Rollout: workloadRef Deployment "{self.obj.spec.workload_ref.name}"'
f' does not exist in Namespace "{namespace}"'
)
async def patch(self) -> None:
"""Update the changed attributes of the Rollout."""
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.patch_namespaced_custom_object(
namespace=self.namespace,
name=self.name,
body=self.obj.dict(by_alias=True, exclude_none=True),
**self._rollout_const_args,
))
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the Rollout.
This method expects the Rollout to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Unsupported, options for Rollout deletion.
Returns:
The status of the delete operation.
"""
if options is not None:
raise RuntimeError("Rollout deletion does not support V1DeleteOptions")
self.logger.info(f'deleting rollout "{self.name}"')
self.logger.trace(f"rollout: {self.obj}")
async with self.api_client() as api_client:
return await api_client.delete_namespaced_custom_object(
namespace=self.namespace,
name=self.name,
**self._rollout_const_args,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Rollout resource."""
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.get_namespaced_custom_object_status(
namespace=self.namespace,
name=self.name,
**self._rollout_const_args
))
if self.workload_ref_controller:
await self.workload_ref_controller.refresh()
async def rollback(self) -> None:
# TODO rollbacks are automated in Argo Rollouts, not sure if making this No Op will cause issues
# but I was unable to locate a means of triggering a rollout rollback manually
raise TypeError(
(
"rollback is not supported under the optimization of rollouts because rollbacks are applied to "
"Kubernetes Deployment objects whereas this is automated by argocd"
)
)
async def get_status(self) -> RolloutStatus:
"""Get the status of the Rollout.
Returns:
The status of the Rollout.
"""
self.logger.info(f'checking status of rollout "{self.name}"')
# first, refresh the rollout state to ensure the latest status
await self.refresh()
# return the status from the rollout
return self.obj.status
async def get_pods(self) -> List[Pod]:
"""Get the pods for the Rollout.
Returns:
A list of pods that belong to the rollout.
"""
self.logger.debug(f'getting pods for rollout "{self.name}"')
async with Pod.preferred_client() as api_client:
label_selector = self.match_labels
pod_list:kubernetes_asyncio.client.V1PodList = await api_client.list_namespaced_pod(
namespace=self.namespace, label_selector=selector_string(label_selector)
)
pods = [Pod(p) for p in pod_list.items]
return pods
@property
def status(self) -> RolloutStatus:
"""Return the status of the Rollout.
Returns:
The status of the Rollout.
"""
return self.obj.status
@property
def observed_generation(self) -> str:
"""
Returns the observed generation of the Deployment status.
The generation is observed by the deployment controller.
"""
if self.workload_ref_controller:
return self.workload_ref_controller.observed_generation
return self.obj.status.observed_generation
async def is_ready(self) -> bool:
"""Check if the Rollout is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
await self.refresh()
# if there is no status, the deployment is definitely not ready
status = self.obj.status
if status is None:
return False
# check for the rollout completed status condition
completed_condition = next(filter(lambda con: con.type == "Completed", status.conditions), None)
if completed_condition.status != "True":
return False
# check the status for the number of total replicas and compare
# it to the number of ready replicas. if the numbers are
# equal, the deployment is ready; otherwise it is not ready.
total = status.replicas
ready = status.ready_replicas
if total is None:
return False
return total == ready
@property
def containers(self) -> List[Container]:
"""
Return a list of Container objects from the underlying pod template spec.
"""
if self.workload_ref_controller:
return self.workload_ref_controller.containers
return list(
map(lambda c: Container(c, None), self.obj.spec.template.spec.containers)
)
def find_container(self, name: str) -> Optional[Container]:
"""
Return the container with the given name.
"""
return next(filter(lambda c: c.name == name, self.containers), None)
async def get_target_container(self, config: ContainerConfiguration) -> Optional[Container]:
"""Return the container targeted by the supplied configuration"""
target_container = self.find_container(config.name)
if target_container is not None and isinstance(target_container.obj, RolloutV1Container):
async with kubernetes_asyncio.client.ApiClient() as api_client:
target_container.obj = api_client.deserialize(
response=FakeKubeResponse(target_container.obj.dict(by_alias=True, exclude_none=True)),
response_type=kubernetes_asyncio.client.models.V1Container
)
return target_container
@property
def replicas(self) -> int:
"""
Return the number of desired pods.
"""
return self.obj.spec.replicas
@replicas.setter
def replicas(self, replicas: int) -> None:
"""
Set the number of desired pods.
"""
self.obj.spec.replicas = replicas
@property
def match_labels(self) -> Dict[str, str]:
"""Return the matchLabels dict of the selector field (from the workloadRef if applicable"""
if self.workload_ref_controller:
return self.workload_ref_controller.match_labels
return self.obj.spec.selector.match_labels
@property
def pod_template_spec(self) -> RolloutV1PodTemplateSpec:
"""Return the pod template spec for instances of the Rollout."""
if self.workload_ref_controller:
return self.workload_ref_controller.pod_template_spec
return self.obj.spec.template
async def get_pod_template_spec_copy(self) -> kubernetes_asyncio.client.models.V1PodTemplateSpec:
"""Return a deep copy of the pod template spec. Eg. for creation of a tuning pod"""
if self.workload_ref_controller:
return await self.workload_ref_controller.get_pod_template_spec_copy()
async with kubernetes_asyncio.client.ApiClient() as api_client:
return api_client.deserialize(
response=FakeKubeResponse(self.pod_template_spec.dict(by_alias=True, exclude_none=True)),
response_type=kubernetes_asyncio.client.models.V1PodTemplateSpec
)
def update_pod(self, pod: kubernetes_asyncio.client.models.V1Pod) -> kubernetes_asyncio.client.models.V1Pod:
"""Update the pod with the latest state of the controller if needed. In the case of argo rollouts, the
pod labels are updated with the latest template hash so that it will be routed to by the appropriate service"""
# Apply the latest template hash so the active service register the tuning pod as an endpoint
pod.metadata.labels["rollouts-pod-template-hash"] = self.obj.status.current_pod_hash
return pod
@backoff.on_exception(backoff.expo, kubernetes_asyncio.client.exceptions.ApiException, max_tries=3)
async def inject_sidecar(
self,
name: str,
image: str,
*args,
service: Optional[str] = None,
port: Optional[int] = None,
index: Optional[int] = None,
service_port: int = 9980
) -> None:
"""
Injects an Envoy sidecar into a target Deployment that proxies a service
or literal TCP port, generating scrapeable metrics usable for optimization.
The service or port argument must be provided to define how traffic is proxied
between the Envoy sidecar and the container responsible for fulfilling the request.
Args:
name: The name of the sidecar to inject.
image: The container image for the sidecar container.
service: Name of the service to proxy. Envoy will accept ingress traffic
on the service port and reverse proxy requests back to the original
target container.
port: The name or number of a port within the Deployment to wrap the proxy around.
index: The index at which to insert the sidecar container. When `None`, the sidecar is appended.
service_port: The port to receive ingress traffic from an upstream service.
"""
if self.workload_ref_controller:
await self.workload_ref_controller.inject_sidecar(
name=name, | |
<filename>DeepAnalogs/utils.py
# "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
# Author: <NAME> <<EMAIL>>
# Geoinformatics and Earth Observation Laboratory (http://geolab.psu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
# This file contains utility functions.
#
import os
import gc
import math
import yaml
import time
import numpy as np
import bottleneck as bn
from tqdm import tqdm
from functools import partial
from bisect import bisect_left
from sklearn import preprocessing
from prettytable import PrettyTable
from datetime import datetime, timezone
from tqdm.contrib.concurrent import process_map
def read_yaml(file):
with open(file, 'r') as file:
args = yaml.load(file, Loader=yaml.FullLoader)
return args
def add_default_values(args):
if 'save_as_pure_python_module' not in args['io']:
args['io']['save_as_pure_python_module'] = False
if 'fcst_variables' not in args['data']:
args['data']['fcst_variables'] = None
if 'obs_weights' not in args['data']:
args['data']['obs_weights'] = None
if 'positive_index' not in args['data']:
args['data']['positive_index'] = None
if 'triplet_sample_prob' not in args['data']:
args['data']['triplet_sample_prob'] = 1.0
if 'dataset_margin' not in args['data']:
args['data']['dataset_margin'] = np.nan
if 'obs_stations_index' not in args['data']:
args['data']['obs_stations_index'] = None
if 'fcst_stations_index' not in args['data']:
args['data']['fcst_stations_index'] = None
if 'preprocess_workers' not in args['data']:
args['data']['preprocess_workers'] = os.cpu_count()
if 'intermediate_file' not in args['data']:
args['data']['intermediate_file'] = ''
if 'julian_weight' not in args['data']:
args['data']['julian_weight'] = 0.0
if 'dataset_class' not in args['data']:
args['data']['dataset_class'] = 'AnEnDatasetWithTimeWindow'
if 'test_complete_sequence' not in args['data']:
args['data']['test_complete_sequence'] = False
if 'use_conv_lstm' not in args['model']:
args['model']['use_conv_lstm'] = False
if 'conv_kernel' not in args['model']:
args['model']['conv_kernel'] = 3
if 'conv_padding' not in args['model']:
args['model']['conv_padding'] = 1
if 'conv_stride' not in args['model']:
args['model']['conv_stride'] = 1
if 'pool_kernel' not in args['model']:
args['model']['pool_kernel'] = 2
if 'pool_padding' not in args['model']:
args['model']['pool_padding'] = 0
if 'pool_stride' not in args['model']:
args['model']['pool_stride'] = args['model']['pool_kernel']
if 'forecast_grid_file' not in args['model']:
args['model']['forecast_grid_file'] = 'Not specified'
if 'spatial_mask_width' not in args['model']:
args['model']['spatial_mask_width'] = 5
if 'spatial_mask_height' not in args['model']:
args['model']['spatial_mask_height'] = 5
if 'hidden_layer_types' not in args['model']:
args['model']['hidden_layer_types'] = 'conv_lstm'
if 'use_naive' not in args['model']:
args['model']['use_naive'] = False
if 'range_step' not in args['model']:
args['model']['range_step'] = 1
if 'optimizer' not in args['train']:
args['train']['optimizer'] = 'Adam'
if 'lr_decay' not in args['train']:
args['train']['lr_decay'] = 0
if 'scaler_type' not in args['train']:
args['train']['scaler_type'] = 'MinMaxScaler'
if 'train_loaders' not in args['train']:
args['train']['train_loaders'] = os.cpu_count()
if 'test_loaders' not in args['train']:
args['train']['test_loaders'] = os.cpu_count()
if 'use_cpu' not in args['train']:
args['train']['use_cpu'] = False
if 'train_margin' not in args['train']:
args['train']['train_margin'] = 0.9
if 'use_amsgrad' not in args['train']:
args['train']['use_amsgrad'] = False
if 'wdecay' not in args['train']:
args['train']['wdecay'] = 0
if 'momentum' not in args['train']:
args['train']['momentum'] = 0
return args
def validate_args(args):
# Check groups
expected_groups = ['io', 'data', 'model', 'train']
assert len(args.keys()) == len(expected_groups) and \
all([k in expected_groups for k in args.keys()]), \
'Allowed argument groups: {}'.format(expected_groups)
args = add_default_values(args)
# General check
err_msg = []
for group in expected_groups:
for k, v in args[group].items():
if v == '__REQUIRED__':
err_msg.append('Please provide argument [{}] in the group [{}]!'.format(k, group))
if v == 'np.nan':
args[group][k] = np.nan
if isinstance(v, str) and len(v) > 0 and v[0] == '~':
args[group][k] = os.path.expanduser(args[group][k])
# Specific check
if args['model']['use_conv_lstm']:
grid_file = args['model']['forecast_grid_file']
if not os.path.exists(grid_file):
err_msg.append('Forecast grid not found: {}'.format(grid_file))
if args['data']['triplet_sample_method'] == 'fitness':
num_negative = args['data']['fitness_num_negative']
if not isinstance(num_negative, int):
err_msg.append('Invalid fitness_num_negative: {}'.format(num_negative))
if args['data']['dataset_class'] == 'AnEnDatasetOneToMany':
matching_station = args['data']['matching_forecast_station']
if isinstance(matching_station, int) and matching_station > 0:
pass
else:
err_msg.append('Invalid matching_forecast_station: {}'.format(matching_station))
if len(err_msg) != 0:
err_msg = '\n'.join(err_msg)
raise Exception('Failed during argument validation:\n' + err_msg)
# Change from str to datetime
for k in ['split', 'anchor_start', 'anchor_end', 'search_start', 'search_end']:
dt = time.strptime(args['io'][k], '%Y/%m/%d %H:%M:%S')
args['io'][k] = datetime(*(dt[0:6]), tzinfo=timezone.utc)
return args
def legend_without_duplicate_labels(ax):
handles, labels = ax.get_legend_handles_labels()
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
ax.legend(*zip(*unique))
def summary(d):
"""
Generate a summary message of the dictionary.
:param d: A dictionary to summary.
:return: A string as summary message.
"""
msg_list = []
for key, value in d.items():
msg = '- {}: '.format(key)
if isinstance(value, np.ndarray):
msg += 'shape {}'.format(value.shape)
elif len(value) == 1:
msg += 'value {}'.format(value)
elif isinstance(value, list):
msg += 'length {}'.format(len(value))
else:
msg += '** no preview **'
msg_list.append(msg)
return '\n'.join(msg_list)
def binary_search(seq, v):
"""
Carries out a binary search on a list to find the index of the first occurrence.
Code references from https://www.geeksforgeeks.org/binary-search-bisect-in-python/.
:param seq: A sorted list.
:param v: A value to search for the index.
:return: The index of the value if found or -1 if not found.
"""
i = bisect_left(seq, v)
if i != len(seq) and seq[i] == v:
return i
else:
return -1
def sort_distance(anchor_times, search_times, arr, scaler_type, parameter_weights=None, julian_weight=0,
forecast_times=None, disable_pbar=False, tqdm=tqdm, return_values=False, verbose=True):
"""
Calculates the dissimilarity (distance) between each of the anchor times and the remaining times at each location.
This function is capable of finding multi-variate distances by setting the parameter weights. The sorted indices
and distances are saved in a dictionary.
:param arr: A four dimensional numpy array with the dimensions [parameters, stations, times, lead times]
:param anchor_times: A list of time indices from the observation array as anchor points. For each of the
anchor time index, it is compared to the time indices in search_times and the search entries will be sorted based
on the similarity from highest to lowest (or the distance from lowest to highest).
:param search_times: A list of time indices from the observation array as search points.
:param scaler_type: The normalization method to use. It can be either `MinMaxScaler` or `StandardScaler`.
:param parameter_weights: A list of weights corresponding to each parameters in the input array.
:param disable_pbar: Whether to disable the progress bar.
:param tqdm: A tqdm progress bar object used to wrap around enumerate function call in the loop.
:param return_values: Whether to return the values of the sorted array. It drastically increase the memory usage.
:param verbose: Whether to print messages
:param julian_weight: The weight for the Julian day as a variable
:return: A dictionary with the sorted members, 'index' and 'distance'. The members are both four dimensional
numpy arrays with the dimensions [stations, anchor times, lead times, remaining times]. They record the order and
the values of the similarity between the remaining times and each of the anchor time.
"""
# Sanity check
assert isinstance(arr, np.ndarray), 'Argument arr should be a Numpy array!'
assert isinstance(search_times, list), 'Argument search_times should be a list'
assert len(arr.shape) == 4, 'Argument arr should have 4 dimensions [parameters, stations, times, lead times]!'
assert len(parameter_weights) == arr.shape[0] if parameter_weights else True, 'Too many or too few weights!'
if isinstance(anchor_times, list):
pass
elif isinstance(anchor_times, int):
anchor_times = [anchor_times]
else:
raise Exception('Argument anchor_times should either be a list of integers or a single integer!')
assert len(anchor_times) == len(set(anchor_times)), 'Anchor times must not have duplicates!'
assert max(anchor_times) <= arr.shape[2], 'Anchor time index out of bound!'
assert julian_weight >= 0
if julian_weight > 0:
anchor_julians = [int(forecast_times[i].strftime('%j')) for i in anchor_times]
search_julians = [int(forecast_times[i].strftime('%j')) for i in search_times]
# Extract dimensions
num_parameters, num_stations, num_times, num_lead_times = arr.shape
num_samples = num_stations * num_times * num_lead_times
num_anchor_times = len(anchor_times)
num_search_times = len(search_times)
# Default parameter weights to one if not set
if parameter_weights:
parameter_weights = np.array(parameter_weights)
else:
parameter_weights = np.ones(num_parameters)
# I need to decide the index that has non-zero weights so that NA for 0 weighted
# variables would not have any effect when calculating the average.
#
obs_vars_index = np.where(parameter_weights != 0)[0]
parameter_weights = parameter_weights[parameter_weights != 0]
if verbose:
print('Observation variable index {} with weights {}'.format(obs_vars_index, parameter_weights))
# Initialize a dictionary for indices and distances.
# For each anchor time, the remaining times will be used as search times.
#
new_dimensions = (num_stations, num_anchor_times, num_lead_times, num_search_times)
sorted_members = {
'index': np.empty(new_dimensions, int),
'distance': np.full(new_dimensions, np.nan),
'anchor_times_index': anchor_times,
'search_times_index': search_times,
'aligned_obs': arr
}
if return_values:
value_dimensions = (num_parameters, num_stations, num_anchor_times, num_lead_times, num_search_times)
sorted_members['value'] = np.full(value_dimensions, np.nan)
# Normalization
if scaler_type == 'MinMaxScaler':
scaler = preprocessing.MinMaxScaler()
if julian_weight > 0:
julian_max = np.max((np.max(anchor_julians), np.max(search_julians)))
anchor_julians /= julian_max
search_julians /= julian_max
elif scaler_type == 'StandardScaler':
scaler = preprocessing.StandardScaler()
if julian_weight | |
<reponame>theunissenlab/sounsig<gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.model_selection import StratifiedKFold
from scipy.stats import binom
def discriminatePlot(X, y, cVal, titleStr='', figdir='.', Xcolname = None, plotFig = False, removeTickLabels = False, testInd = None):
# Frederic's Robust Wrapper for discriminant analysis function. Performs lda, qda and RF afer error checking,
# Generates nice plots and returns cross-validated
# performance, stderr and base line.
# X np array n rows x p parameters
# y group labels n rows
# rgb color code for each data point - should be the same for each data beloging to the same group
# titleStr title for plots
# figdir is a directory name (folder name) for figures
# Xcolname is a np.array or list of strings with column names for printout display
# returns: ldaScore, ldaScoreSE, qdaScore, qdaScoreSE, rfScore, rfScoreSE, nClasses
# Global Parameters
CVFOLDS = 10
MINCOUNT = 10
MINCOUNTTRAINING = 5
# figdir = '/Users/frederictheunissen/Documents/Data/Julie/Acoustical Analysis/Figures Voice'
# Initialize Variables and clean up data
classes, classesCount = np.unique(y, return_counts = True) # Classes to be discriminated should be same as ldaMod.classes_
goodIndClasses = np.array([n >= MINCOUNT for n in classesCount])
goodInd = np.array([b in classes[goodIndClasses] for b in y])
if testInd is not None:
# Check for goodInd - should be an np.array of dtype=bool
# Transform testInd into an index inside xGood and yGood
testIndx = testInd.nonzero()[0]
goodIndx = goodInd.nonzero()[0]
testInd = np.hstack([ np.where(goodIndx == testval)[0] for testval in testIndx])
trainInd = np.asarray([i for i in range(len(goodIndx)) if i not in testInd])
yGood = y[goodInd]
XGood = X[goodInd]
cValGood = cVal[goodInd]
classes, classesCount = np.unique(yGood, return_counts = True)
nClasses = classes.size # Number of classes or groups
# Do we have enough data?
if (nClasses < 2):
print ('Error in ldaPLot: Insufficient classes with minimun data (%d) for discrimination analysis' % (MINCOUNT))
return -1, -1, -1, -1 , -1, -1, -1, -1, -1
if testInd is None:
cvFolds = min(min(classesCount), CVFOLDS)
if (cvFolds < CVFOLDS):
print ('Warning in ldaPlot: Cross-validation performed with %d folds (instead of %d)' % (cvFolds, CVFOLDS))
else:
cvFolds = 1
# Data size and color values
nD = XGood.shape[1] # number of features in X
nX = XGood.shape[0] # number of data points in X
cClasses = [] # Color code for each class
for cl in classes:
icl = (yGood == cl).nonzero()[0][0]
cClasses.append(np.append(cValGood[icl],1.0))
cClasses = np.asarray(cClasses)
# Use a uniform prior
myPrior = np.ones(nClasses)*(1.0/nClasses)
# Perform a PCA for dimensionality reduction so that the covariance matrix can be fitted.
nDmax = int(np.fix(np.sqrt(nX//5)))
if nDmax < nD:
print ('Warning: Insufficient data for', nD, 'parameters. PCA projection to', nDmax, 'dimensions.' )
nDmax = min(nD, nDmax)
pca = PCA(n_components=nDmax)
Xr = pca.fit_transform(XGood)
print ('Variance explained is %.2f%%' % (sum(pca.explained_variance_ratio_)*100.0))
# Initialise Classifiers
ldaMod = LDA(n_components = min(nDmax,nClasses-1), priors = myPrior, shrinkage = None, solver = 'svd')
qdaMod = QDA(priors = myPrior)
rfMod = RF() # by default assumes equal weights
# Perform CVFOLDS fold cross-validation to get performance of classifiers.
ldaYes = 0
qdaYes = 0
rfYes = 0
cvCount = 0
if testInd is None:
skf = StratifiedKFold(n_splits = cvFolds)
skfList = skf.split(Xr, yGood)
else:
skfList = [(trainInd,testInd)]
for train, test in skfList:
# Enforce the MINCOUNT in each class for Training
trainClasses, trainCount = np.unique(yGood[train], return_counts=True)
goodIndClasses = np.array([n >= MINCOUNTTRAINING for n in trainCount])
goodIndTrain = np.array([b in trainClasses[goodIndClasses] for b in yGood[train]])
# Specity the training data set, the number of groups and priors
yTrain = yGood[train[goodIndTrain]]
XrTrain = Xr[train[goodIndTrain]]
trainClasses, trainCount = np.unique(yTrain, return_counts=True)
ntrainClasses = trainClasses.size
# Skip this cross-validation fold because of insufficient data
if ntrainClasses < 2:
continue
goodInd = np.array([b in trainClasses for b in yGood[test]])
if (goodInd.size == 0):
continue
# Fit the data
trainPriors = np.ones(ntrainClasses)*(1.0/ntrainClasses)
ldaMod.priors = trainPriors
qdaMod.priors = trainPriors
ldaMod.fit(XrTrain, yTrain)
qdaMod.fit(XrTrain, yTrain)
rfMod.fit(XrTrain, yTrain)
ldaYes += np.around((ldaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
qdaYes += np.around((qdaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
rfYes += np.around((rfMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
cvCount += goodInd.size
# Refit with all the data for the plots
ldaMod.priors = myPrior
qdaMod.priors = myPrior
Xrr = ldaMod.fit_transform(Xr, yGood)
# Check labels
for a, b in zip(classes, ldaMod.classes_):
if a != b:
print ('Error in ldaPlot: labels do not match')
# Check the within-group covariance in the rotated space
# covs = []
# for group in classes:
# Xg = Xrr[yGood == group, :]
# covs.append(np.atleast_2d(np.cov(Xg,rowvar=False)))
# withinCov = np.average(covs, axis=0, weights=myPrior)
# Print the five largest coefficients of first 3 DFA
MAXCOMP = 3 # Maximum number of DFA componnents
MAXWEIGHT = 5 # Maximum number of weights printed for each componnent
ncomp = min(MAXCOMP, nClasses-1)
nweight = min(MAXWEIGHT, nD)
# The scalings_ has the eigenvectors of the LDA in columns and the pca.componnents has the eigenvectors of PCA in columns
weights = np.dot(ldaMod.scalings_[:,0:ncomp].T, pca.components_)
print('LDA Weights:')
for ic in range(ncomp):
idmax = np.argsort(np.abs(weights[ic,:]))[::-1]
print('DFA %d: '%ic, end = '')
for iw in range(nweight):
if Xcolname is None:
colstr = 'C%d' % idmax[iw]
else:
colstr = Xcolname[idmax[iw]]
print('%s %.3f; ' % (colstr, float(weights[ic, idmax[iw]]) ), end='')
print()
if plotFig:
dimVal = 0.8 # Overall diming of background so that points can be seen
# Obtain fits in this rotated space for display purposes
ldaMod.fit(Xrr, yGood)
qdaMod.fit(Xrr, yGood)
rfMod.fit(Xrr, yGood)
XrrMean = Xrr.mean(0)
# Make a mesh for plotting
x1, x2 = np.meshgrid(np.arange(-6.0, 6.0, 0.1), np.arange(-6.0, 6.0, 0.1))
xm1 = np.reshape(x1, -1)
xm2 = np.reshape(x2, -1)
nxm = np.size(xm1)
Xm = np.zeros((nxm, Xrr.shape[1]))
Xm[:,0] = xm1
if Xrr.shape[1] > 1 :
Xm[:,1] = xm2
for ix in range(2,Xrr.shape[1]):
Xm[:,ix] = np.squeeze(np.ones((nxm,1)))*XrrMean[ix]
XmcLDA = np.zeros((nxm, 4)) # RGBA values for color for LDA
XmcQDA = np.zeros((nxm, 4)) # RGBA values for color for QDA
XmcRF = np.zeros((nxm, 4)) # RGBA values for color for RF
# Predict values on mesh for plotting based on the first two DFs
yPredLDA = ldaMod.predict_proba(Xm)
yPredQDA = qdaMod.predict_proba(Xm)
yPredRF = rfMod.predict_proba(Xm)
# Transform the predictions in color codes
maxLDA = yPredLDA.max()
for ix in range(nxm) :
cWeight = yPredLDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcLDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcLDA[ix,3] = (cWeight.max()/maxLDA)*dimVal
# Plot the surface of probability
plt.figure(facecolor='white', figsize=(10,4))
plt.subplot(131)
Zplot = XmcLDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: LDA %d/%d' % (titleStr, ldaYes, cvCount))
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# Transform the predictions in color codes
maxQDA = yPredQDA.max()
for ix in range(nxm) :
cWeight = yPredQDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcQDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcQDA[ix,3] = (cWeight.max()/maxQDA)*dimVal
# Plot the surface of probability
plt.subplot(132)
Zplot = XmcQDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: QDA %d/%d' % (titleStr, qdaYes, cvCount))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# | |
val)
pass #print("00000000")
pass #print("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",full)
pass #print("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
if self._showLoading == True :#or True:
print(f" ::: Loading History ::: {id}", end="\r\r\r\r")
print()
try:
while(manager._fin is False and time.time()-t0 < timeOut):
####
#### while(abs(manager._checkCounter- cycle[0])<2):
pass #print("waiting for ",id,"to fill",manager._checkCounter, cycle,manager._fin)
#### time.sleep(1)
pass #print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",manager._checkCounter,cycle)
except:
print(f" ::: Exception while looking for xoKey {id} value {val}")
traceback.print_exc()
# print("waiting full TIME =========",time.time()-t)
return full
def __init__(self, max = 10000000, wait=0.01, **entries):
#### global hasManager, Dir, ExtKey, ExtData, channels
# print("IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII")
global manager
# self._max = 10000000,
# self._wait=wait
self._Threads_ = []
# self._showLoading = True
# self._shared._Threads_ = []
# self._Threads_ = []
# print("_______________iiiiiiiiiiiii______________")
if ok._shared is not None:
print(" ::::::::::::::::::::::::::::::::::::::::")
print(" ::: :::")
print(" ::: xObject is ready :::")
print(" ::: :::")
print(" ::: now you can use xo. anywhere :::")
print(" ::: :) :::")
print(" ::: :::")
print(" ::::::::::::::::::::::::::::::::::::::::")
print()
self._manager_ = manager
ok._shared = self
#### if hasManager:
#### # print("FFFFFFFFFFFFFFFFFF")
#### return None
#### # print("New Manager!!")
#### #### super(objManager, self).__init__()
#### dbexist = PATH.exists(Dir)
#### if not dbexist:
#### os.makedirs(Dir)
self.__dict__.update(entries)
#### self.dir = Dir
#### self.ExtKey = ExtKey
#### self.ExtData = ExtData
#### self.channels = channels
#### print("VVVVVVVVVVVVVVVVVVV")
#### self.__channelKeys = {}
#### self.__channelValues = {}
#### self.refreshChannels()
def GetXO(self, get = "", allow_creation = False, getValue = False, follow = None):
# print("XXXXXXXXXXXXXXXXXXXX", get, getValue)
if "str" not in str(type(get)):
print(self._id,"Please provide a string as a key",get)
return None
c = 0
# print(c,c,c,c,c,c,c,c,c,c);c+=1
sp = get.split(".")
first = get.split(".")[0]
if follow is None:
if len(first) > 0:
if allow_creation or xoManager.loadKey(first) is not None:
# print("FIRST",first)
return self.GetXO(get = ".".join(sp[1:]), allow_creation=allow_creation,follow=xo[first], getValue=getValue)
return None
# print(c,c,c,c,c,c,c,c,c,c);c+=1
if get == "":
# print(c,c,c,c,c,c,c,c,c,c);c+=10
if follow is not None and follow._val is not None:
# print(c,c,c,c,c,c,c,c,c,c);c+=100
if "list" in str(type(follow._val)):
if len(follow._val) > 0:
if getValue:
return follow._val[0]
return follow
if getValue:
return follow._val
return follow
# print(c,c,c,c,c,c,c,c,c,c);c+=1000
return follow
else:
if allow_creation or first in follow.children():
return self.GetXO(get = ".".join(sp[1:]), allow_creation=allow_creation,follow=follow[first], getValue=getValue)
return None
def ValueOf(self, get, allow_creation = False):
return self.GetXO(get = get, allow_creation=allow_creation, getValue = True)
def _getValue(self, get, allow_creation = False):
return self.GetXO(get = get, allow_creation=allow_creation, getValue = True)
'''
## key = xoKey i.e xo.a.b.c.d xoKey is "a.b.c.d"
## value to set in xo[key]
## allow_creation = creates XO if not already there
## retBool returns if the value was set correctly, True by default
## retVal returns xo[key].value()
## retXO returns the xo so you can chain commands ie:
## xo.wow.SetValue(1000,retXO=True).runFunc1(True, retXO=True).runFunc2("awesome") '''
def SetValue(self, key, value, allow_creation = True, retBool = True, retValue = False, retXO = False):
if "xo.obj" in str(type(key)):
if key.value() is not None:
key = str(key.value())
else:
key = key._id
if key is None:
" ::: SetValue Key is None :::"
return False
res = self.GetXO(key, allow_creation=allow_creation)
if res is not None and "xo.obj" in str(type(res)):
# res.show()
# print("..............",res, res._id)
xoManager.save(res._id, value)
# res.show()
else:
print(type(res),res)
print("!!!!!!!!!!!!!!!!!!!")
pass
if retXO:
return self.GetXO(key)
res = self.GetXO(key,getValue=True)
if retValue:
return res
# print(res,"@@@@@@",value)
return res == value
def _setDB(self,dirname):
print("xxxxxxxxxxxxxxxxxx to be implemented SETTING DB TO:",dirname)
# xoManager = manager = newObjManager()
def _defFunc(self, *a, **aa):
data = [a, aa]
print()
print(":::::::: incoming data ::::::::::::::::::::::::::::: "+time.ctime())
print(f":::::::: {a}, {aa} ::::::::::::::::::::::::::::: ")
# print(data)
# print(":::::::: "+channel+ "- incoming data :::::::::::::::::::::::::::::")
print()
return data
# #
# def working(data):
# print("!!!!!!!! ALRIGHT !!!!!",data)
# return "!!!!!!!! ALRIGHT !!!!!"+str(data)
# # #
# # #
#
# xo.subscribe("n.run.0", working, autoPub = ["n.x.y","n.kux"] )
# xo.subscribe("n.run.0", working )
# :D
# def subscribe(self, channel=None, func=None, autoPub = None, block = False, once= False, echo=True, debug = False, withID = False, path = None,url = None):
def subscribe(self, channel=None, func=None, autoPub = None, block = False, once= False, echo=True, debug = False, withID = False, path = None, url = None):
if url is None and path is not None and ("http" in path or ".com" in path):
url = path
if path is None:
path = url
if func is None:
func = self._defFunc
if PATH.exists(channel):
path = channel
# print("CHANNEL",channel)
# print("FUNC",func)
# print("autoPub",autoPub)
if path is not None:
if url is not None:
if ".com/" in url:
if "github" in url:
pass
#TODO subscribe to github, check every 1 min, randomize user agent
else:
print(" ::: Could not find match for url",url)
if PATH.exists(path):
# EXAMPLE:
# p = "/home/magic/AlphaENG/xo-gd/";
# xo.subscribe(p, lambda a : print(" ::: File/Folder Changed :",a), "res")
if True:
# fW.watchF(path = path, callback = func, xoKey = autoPub, xo = self)
if autoPub is None or autoPub == "":
autoPub = "watch."+os.path.basename(path)
channel = autoPub
watchF(path = path, callback = func, xoKey = autoPub+".changes", xo = self, res = autoPub)
return True
pass
else:
print(" ::: File or folder does not exist",path, "doesn't exist")
return False
if channel is not None:
if channel.startswith("xo."):
channel = channel[3:]
if self._Threads_ is None:
self._Threads_ = ["xxx"]
# print("[[[[[[[[[[[[]]]]]]]]]]]]")
# if self.lowerCase:
# channel = channel.lower()
#if channel+self.extChannel not in self.__Channels:
#self.printF("########")
# self.addChannel(channel)
# print("########",channel)
self.GetXO(channel,allow_creation=True)
# xo.GetXO(channel,allow_creation=True)u
# print("########",channel)
# xo[channel].show()
if echo:
# print("Subscribing to", channel, "- currently equals:", str(xo.ValueOf(channel)) )
print(" ::: Subscribing to", channel)
if block:
self.__waitAndExe(channel, func, once, autoPub = autoPub, debug = debug, withID=withID)
else:
uT = Thread(target = self.__waitAndExe, args = [channel, func, once, autoPub, debug, withID])
self._Threads_.value().append(uT)
uT.start()
return True
print(); print(" ::: Please provide a channel/path/url to xo.subscribe() "); print()
return False
def _publish(self, channel, data):
# print("ppppppppppp",channel,data)
return self.SetValue(channel, data, allow_creation = True)
def __waitAndExe(self, channel, func, once= False, autoPub = None, debug = False, withID = False):
# if self.lowerCase:
# channel = channel.lower()
# print("__________-")
# print("AAAAAAAAAAAAAAA",autoPub)
run = True
newdata = None
while run:
# for ccc in range(4):
# time.sleep(1)
run = not once
# print("CCCCCCCCCCCC",channel)
data = self.__awaitChannelUpdate(channel)
# print("CCCCCCCCCCCC2222")
# print("@@@@@@@@@@@@")
if not debug:
try:
if withID:
newdata = func([data,channel])
else:
newdata = func(data)
except:
traceback.print_exc()
else:
if withID:
newdata = func([data,channel])
else:
newdata = func(data)
# print("############",newdata,"@@@@@@@",autoPub)
if autoPub is not None:
if type(autoPub) is list:
for pb in autoPub:
print(f" ::: Processed new data in {channel.replace('/','.')} ::: Results saved in {pb} ::: \n ::: {pb} Data: :::\n{newdata}\n\n")
# self._publish(pb,newdata)
self.GetXO(pb).set(newdata)
else:
print(f" ::: data from {channel} was procced in {func} and the results autoPubish to",autoPub, ":::")
# self._publish(autoPub,newdata)
self.GetXO(autoPub).set(newdata)
else:
pass
#print("autoPub is None")
def __awaitChannelUpdate(self, channel,wait = 0.00001):
# if self.lowerCase:
# channel = channel.lower()
# wait=self.wait
# print("DDDDDDDDD11",d)
# d = self.manager.loadKey(channel, pr = True)
d = xoManager.loadKey(channel)
# print("DDDDDDDDD12",d)
# d = self.load(self.dir+channel+self.extChannel)
#print("ddddddddddddddddd",d)
if d is None:
# d = self.load(self.dir+channel+self.extChannel)
# print("DDDDDDDDD1",d)
d = xoManager.loadKey(channel)
# print("DDDDDDDDD2",d)
if d is None:
return None
# print("DDDDDDDDD0",d)
tempD = d+0
#print(d,tempD,str(d) == str(tempD))
while(str(d) == str(tempD)):
# print(".",d)
# d = self.load(self.dir+channel+self.extChannel)
d = xoManager.loadKey(channel)
# print("CCCCCCCCCCCCC",channel,d)
time.sleep(wait)
# print("XXXXXXXXXXX")
# self.printF("!!!!!!!!!!!!!!!!!")
# return self.load(self.dir+channel+self.extData)
return xoManager.loadData(channel)
def __set__(self, key, val):
pass
# print("eeeeeeeeeeeeeeeeeeeessssssssse")
self.__dict__[key] = val
return True
def __get__(self,key, done = False, *args, **kwargs):
# print("TTTTTT",type(key))
# print("###########",key,"#")
if done:
return self
for a in args:
# print (a, str(type(a)), str(a))
if "xo.ok" in str(a):
# print("SSSSSSSSSSSSSS")
return ok._shared.__get__("k",done = True)
# print("$$$$$$$$$$$")
# for a in kwargs:
# print(a, kwargs[a])
# pass
# print("eeeeeeeeeeeeeeeeeeeeeagggggggggaa")
return [""]
return self.__dict__[key]
def __setattr__(self, name, value):
pass ## print("__setattr____setattr____setattr____setattr____setattr____setattr__1",name,value)
if "str" not in str(type(name)):
name = str(name)
# self.__dir__().append("bbb")
res = self._returnFull_(id=name, val=value)
self.__dict__[name] = res
# self.set(name, res)
return res
if not name.startswith("_") and "_val" in self.__dict__ and name not in SelfNamed._languageDefaults:#### and "__skip" in self.__dict__ and name not in self.skip:
if "xo.obj" not in str(type(value)):
pass ## print("_____________________",str(type(value)))
if name not in self.__dict__:
pass ## print("2222222222")
# print("pp22222",self)
self[name] = obj(id = self._id+"/"+name, val= value, parent = None)
else:
pass ## print("33333333")
#### self.__set__(name,value)
manager.save(channel = self._id+"/"+name, data=value)
#### self.save(id = self._id+"/"+name, val= value)
else:
pass ## print("44444")
self.__dict__[name] = value
else:
#### # print("555555555")
self.__dict__[name] = value
def __getitem__(self, name):
# print("__getitem____getitem____getitem____getitem____getitem____getitem____getitem__2",name)
if "str" not in str(type(name)):
name = str(name)
return self._returnFull_(id=name)
# if not name.startswith("_") and "_val" in self.__dict__ and name not in SelfNamed._languageDefaults and name not in self.__dict__:
# self.__dict__[name] = obj(id = self._id+"/"+name, parent = self)
#
# if name in self.__dict__:
# #### print("FUUCKKKKKKKKKKKKKKKKKKKKKk")
#
# item = self.__dict__[name]
# return item
#
# atr = object.__getattribute__(self, name)
# return atr
def __assign__(self, v):
# print('called with %s' % v)
pass
def __setitem__(self, name, value):
if "str" not in str(type(name)):
name = str(name)
pass #print("__setitem____setitem____setitem____setitem____setitem____setitem__3",name,value)
res = self._returnFull_(id=name, val=value)
self.__dict__[name] = res
return res
# return self._returnFull_(id=name, val=value)
#### if "str" not in str(type(name)):
#### name = str(name)
#### if not name.startswith("_") and "_val" in self.__dict__ and name is not "_val":#### and "__skip" in self.__dict__ and name not in self.skip:
#### print("VVVVVVVV",str(type(value)))
#### if "xo.obj" not in str(type(value)):
#### print("_____________________",str(type(value)))
#### if name not in self.__dict__:
#### print("1",name)
#### self.__dict__[name] = obj(id = self._id+"/"+name, val = value)
#### else:
#### print("2",name)
#### self[name].set(value)
#### else:
#### print("22222222222222222222222")
#### self.__dict__[name] = value
#### else:
#### print("3",name)
#### self.__dict__[name] = value
#### print("FINISHED SETTING ITEM", self.__dict__)
def __getattribute__(self, name, loop = True):
# print("__getattribute____getattribute____getattribute____getattribute____getattribute__4",name)
if "str" not in str(type(name)):
name = str(name)
elif name == "value":
# print("VVVVVVVVVVVVVVVVVVVVVVVVVVVVVV")
# name = "_val"
atr = object.__getattribute__(self, name)
# return atr[0]
# self.__dict__[name] = atr
return atr
atr = object.__getattribute__(self, name)
# self.__dict__[name] = atr
return atr
def __getattr__(self, name, loop = True):
# print("__getattr____getattr____getattr____getattr____getattr____getattr__5", name)
if "str" not in str(type(name)):
name = str(name)
# print("__getattr____getattr____getattr____getattr____getattr____getattr__5", name)
#### return ox(id=name)
#### print("getttt")
#### if "str" not in str(type(name)):
#### name = str(name)
#### #### return name
if not name.startswith("_") and name not in self.__dict__:
####print("OOOOO_ooooooooooooooooooooo",name)####,self.__dict__)
####print("aaaaaaaaaaaaaa")
#### self[name] = obj(id = self._id+"/"+name)
# return self._returnFull_(id=name)
res = self._returnFull_(id=name)
self.__dict__[name] = res
return res
####
#### if name in self.__dict__:
#### print("bbbbbbbbbbbbbbb")
#### atr = object.__getattribute__(self, name)
####
#### return atr
#### #### return 13
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$xxxxxxxxxxxx")
def cleanInput():
lines = []
waiting = True
startT = None
timeout = 1
try:
while waiting:
if len(lines) > 0:
startT = time.time()
if startT is not None:
print("!!!!!!!!!!!")
if time.time() - startT > timeout:
waiting = False
lines.append(raw_input())
except EOFError:
pass
lines = "\n".join(lines)
return lines
def cleantxt(a):
clean = ""
for k in a.split("\n"):
clean += re.sub(r"[^a-zA-Z0-9]+", ' ', k) + " "
return clean[:-1]
# print("XXXXXXXXXXXXXx")
def changeHome(newHome):
pass
# print(" ::: NEED TO IMPLEMENT HOME CHANGE ::: ", newHome)
dbDirName = "xo-gd"
defaultDB = "main"
xoHome = "/".join(os.getcwd().split("/")[:3]) + "/" + | |
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['StreamArgs', 'Stream']
@pulumi.input_type
class StreamArgs:
def __init__(__self__, *,
shard_count: pulumi.Input[int],
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Stream resource.
:param pulumi.Input[int] shard_count: The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
:param pulumi.Input[str] encryption_type: The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
:param pulumi.Input[bool] enforce_consumer_deletion: A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
:param pulumi.Input[str] kms_key_id: The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
:param pulumi.Input[int] retention_period: Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_level_metrics: A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
pulumi.set(__self__, "shard_count", shard_count)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if enforce_consumer_deletion is not None:
pulumi.set(__self__, "enforce_consumer_deletion", enforce_consumer_deletion)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if retention_period is not None:
pulumi.set(__self__, "retention_period", retention_period)
if shard_level_metrics is not None:
pulumi.set(__self__, "shard_level_metrics", shard_level_metrics)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> pulumi.Input[int]:
"""
The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
"""
return pulumi.get(self, "shard_count")
@shard_count.setter
def shard_count(self, value: pulumi.Input[int]):
pulumi.set(self, "shard_count", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> Optional[pulumi.Input[str]]:
"""
The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
"""
return pulumi.get(self, "encryption_type")
@encryption_type.setter
def encryption_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_type", value)
@property
@pulumi.getter(name="enforceConsumerDeletion")
def enforce_consumer_deletion(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
"""
return pulumi.get(self, "enforce_consumer_deletion")
@enforce_consumer_deletion.setter
def enforce_consumer_deletion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce_consumer_deletion", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> Optional[pulumi.Input[int]]:
"""
Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
"""
return pulumi.get(self, "retention_period")
@retention_period.setter
def retention_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_period", value)
@property
@pulumi.getter(name="shardLevelMetrics")
def shard_level_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
"""
return pulumi.get(self, "shard_level_metrics")
@shard_level_metrics.setter
def shard_level_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "shard_level_metrics", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Stream(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
scales elastically for real-time processing of streaming big data.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.Stream("testStream",
retention_period=48,
shard_count=1,
shard_level_metrics=[
"IncomingBytes",
"OutgoingBytes",
],
tags={
"Environment": "test",
})
```
## Import
Kinesis Streams can be imported using the `name`, e.g.
```sh
$ pulumi import aws:kinesis/stream:Stream test_stream kinesis-test
```
[1]https://aws.amazon.com/documentation/kinesis/ [2]https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
:param pulumi.Input[str] encryption_type: The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
:param pulumi.Input[bool] enforce_consumer_deletion: A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
:param pulumi.Input[str] kms_key_id: The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
:param pulumi.Input[int] retention_period: Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
:param pulumi.Input[int] shard_count: The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_level_metrics: A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StreamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
scales elastically for real-time processing of streaming big data.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.Stream("testStream",
retention_period=48,
shard_count=1,
shard_level_metrics=[
"IncomingBytes",
"OutgoingBytes",
],
tags={
"Environment": "test",
})
```
## Import
Kinesis Streams can be imported using the `name`, e.g.
```sh
$ pulumi import aws:kinesis/stream:Stream test_stream kinesis-test
```
[1]https://aws.amazon.com/documentation/kinesis/ [2]https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
:param str resource_name: The name of the resource.
:param StreamArgs args: The | |
of items, '+' is faster than ''.join(...)
value = (self.protocol + '://' +
self.get_header('host') +
self.app +
self.path)
if self.query_string:
value = value + '?' + self.query_string
return value
url = uri
"""Alias for uri"""
@property
def relative_uri(self):
"""The path + query string portion of the full URI."""
if self.query_string:
return self.app + self.path + '?' + self.query_string
return self.app + self.path
@property
def user_agent(self):
"""Value of the User-Agent string, or None if missing."""
return self._get_header_by_wsgi_name('USER_AGENT')
@property
def headers(self):
"""Get HTTP headers
Build a temporary dictionary of dash-separated HTTP headers,
which can be used as a whole, like, to perform an HTTP request.
If you want to lookup a header, please use `get_header` instead.
Returns:
A dictionary of HTTP headers.
"""
return dict((k.lower().replace('_', '-'), v)
for k, v in self._headers.items())
def get_header(self, name, required=False):
"""Return a header value as a string
Args:
name: Header name, case-insensitive (e.g., 'Content-Type')
required: Set to True to raise HttpBadRequest instead
of returning gracefully when the header is not found
(default False)
Returns:
The value of the specified header if it exists, or None if the
header is not found and is not required.
Raises:
HTTPBadRequest: The header was not found in the request, but
it was required.
"""
# Use try..except to optimize for the header existing in most cases
try:
# Don't take the time to cache beforehand, using HTTP naming.
# This will be faster, assuming that most headers are looked
# up only once, and not all headers will be requested.
return self._headers[name.upper().replace('-', '_')]
except KeyError:
if not required:
return None
description = 'The "' + name + '" header is required.'
raise HTTPBadRequest('Missing header', description)
def get_param(self, name, required=False, store=None):
"""Return the value of a query string parameter as a string
Args:
name: Parameter name, case-sensitive (e.g., 'sort')
required: Set to True to raise HTTPBadRequest instead of returning
gracefully when the parameter is not found (default False)
store: A dict-like object in which to place the value of the
param, but only if the param is found.
Returns:
The value of the param as a string, or None if param is not found
and is not required.
Raises:
HTTPBadRequest: The param was not found in the request, but was
required.
"""
# PERF: Use if..in since it is a good all-around performer; we don't
# know how likely params are to be specified by clients.
if name in self._params:
if store is not None:
store[name] = self._params[name]
return self._params[name]
if not required:
return None
description = 'The "' + name + '" query parameter is required.'
raise HTTPBadRequest('Missing query parameter', description)
def get_param_as_int(self, name,
required=False, min=None, max=None, store=None):
"""Return the value of a query string parameter as an int
Args:
name: Parameter name, case-sensitive (e.g., 'limit')
required: Set to True to raise HTTPBadRequest instead of returning
gracefully when the parameter is not found or is not an
integer (default False)
min: Set to the minimum value allowed for this param. If the param
is found and it is less than min, an HTTPError is raised.
max: Set to the maximum value allowed for this param. If the param
is found and its value is greater than max, an HTTPError is
raised.
store: A dict-like object in which to place the value of the
param, but only if the param is found (default None)
Returns:
The value of the param if it is found and can be converted to an
integer. If the param is not found, returns None, unless required
is True.
Raises
HTTPBadRequest: The param was not found in the request, even though
it was required to be there. Also raised if the param's value
falls outside the given interval, i.e., the value must be in
the interval: min <= value <= max to avoid triggering an error.
"""
# PERF: Use if..in since it is a good all-around performer; we don't
# know how likely params are to be specified by clients.
if name in self._params:
val = self._params[name]
try:
val = int(val)
except ValueError:
description = ('The value of the "' + name + '" query '
'parameter must be an integer.')
raise InvalidParamValueError(description)
if min is not None and val < min:
description = ('The value of the "' + name + '" query '
'parameter must be at least %d') % min
raise InvalidHeaderValueError(description)
if max is not None and max < val:
description = ('The value of the "' + name + '" query '
'parameter may not exceed %d') % max
raise InvalidHeaderValueError(description)
if store is not None:
store[name] = val
return val
if not required:
return None
description = 'The "' + name + '" query parameter is required.'
raise HTTPBadRequest('Missing query parameter', description)
def get_param_as_bool(self, name, required=False, store=None):
"""Return the value of a query string parameter as a boolean
The following bool-ish strings are supported:
True: ('true', 'True', 'yes')
False: ('false', 'False', 'no')
Args:
name: Parameter name, case-sensitive (e.g., 'limit')
required: Set to True to raise HTTPBadRequest instead of returning
gracefully when the parameter is not found or is not a
recognized bool-ish string (default False).
store: A dict-like object in which to place the value of the
param, but only if the param is found (default None)
Returns:
The value of the param if it is found and can be converted to a
boolean. If the param is not found,
returns None unless required is True
Raises
HTTPBadRequest: The param was not found in the request, even though
it was required to be there.
"""
# PERF: Use if..in since it is a good all-around performer; we don't
# know how likely params are to be specified by clients.
if name in self._params:
val = self._params[name]
if val in TRUE_STRINGS:
val = True
elif val in FALSE_STRINGS:
val = False
else:
description = ('The value of the "' + name + '" query '
'parameter must be "true" or "false".')
raise InvalidParamValueError(description)
if store is not None:
store[name] = val
return val
if not required:
return None
description = 'The "' + name + '" query parameter is required.'
raise HTTPBadRequest('Missing query parameter', description)
def get_param_as_list(self, name,
transform=None, required=False, store=None):
"""Return the value of a query string parameter as a list
Note that list items must be comma-separated.
Args:
name: Parameter name, case-sensitive (e.g., 'limit')
transform: An optional transform function that takes as input
each element in the list as a string and outputs a transformed
element for inclusion in the list that will be returned. For
example, passing the int function will transform list items
into numbers.
required: Set to True to raise HTTPBadRequest instead of returning
gracefully when the parameter is not found or is not an
integer (default False)
store: A dict-like object in which to place the value of the
param, but only if the param is found (default None)
Returns:
The value of the param if it is found. Otherwise, returns None
unless required is True. for partial lists, None will be returned
as a placeholder. For example:
things=1,,3
would be returned as:
['1', None, '3']
while this:
things=,,,
would just be retured as:
[None, None, None, None]
Raises
HTTPBadRequest: The param was not found in the request, but was
required.
"""
# PERF: Use if..in since it is a good all-around performer; we don't
# know how likely params are to be specified by clients.
if name in self._params:
items = self._params[name].split(',')
# PERF(kgriffs): Use if-else rather than a DRY approach
# that sets transform to a passthrough function; avoids
# function calling overhead.
if transform is None:
items = [i if i != '' else None
for i in items]
else:
try:
items = [transform(i) if i != '' else None
for i | |
o'er with white;",
"When lofty trees I see barren of leaves,",
"Which erst from heat did canopy the herd,",
"And summer's green all girded up in sheaves,",
"Borne on the bier with white and bristly beard,",
"Then of thy beauty do I question make,",
"That thou among the wastes of time must go,",
"Since sweets and beauties do themselves forsake",
"And die as fast as they see others grow;",
" And nothing 'gainst Time's scythe can make defence",
" Save breed, to brave him when he takes thee hence."),
("<NAME>",
"O! that you were your self; but, love, you are",
"No longer yours, than you your self here live:",
"Against this coming end you should prepare,",
"And your sweet semblance to some other give:",
"So should that beauty which you hold in lease",
"Find no determination; then you were",
"Yourself again, after yourself's decease,",
"When your sweet issue your sweet form should bear.",
"Who lets so fair a house fall to decay,",
"Which husbandry in honour might uphold,",
"Against the stormy gusts of winter's day",
"And barren rage of death's eternal cold?",
"O! none but unthrifts. Dear my love, you know,",
"You had a father: let your son say so."),
("<NAME>",
"Not from the stars do I my judgement pluck;",
"And yet methinks I have Astronomy,",
"But not to tell of good or evil luck,",
"Of plagues, of dearths, or seasons' quality;",
"Nor can I fortune to brief minutes tell,",
"Pointing to each his thunder, rain and wind,",
"Or say with princes if it shall go well",
"By oft predict that I in heaven find:",
"But from thine eyes my knowledge I derive,",
"And, constant stars, in them I read such art",
"As truth and beauty shall together thrive,",
"If from thyself, to store thou wouldst convert;",
"Or else of thee this I prognosticate:",
"Thy end is truth's and beauty's doom and date."),
("Sonnet 15",
"When I consider every thing that grows",
"Holds in perfection but a little moment,",
"That this huge stage presenteth nought but shows",
"Whereon the stars in secret influence comment;",
"When I perceive that men as plants increase,",
"Cheered and checked even by the self-same sky,",
"Vaunt in their youthful sap, at height decrease,",
"And wear their brave state out of memory;",
"Then the conceit of this inconstant stay",
"Sets you most rich in youth before my sight,",
"Where wasteful Time debateth with decay",
"To change your day of youth to sullied night,",
"And all in war with Time for love of you,",
"As he takes from you, I engraft you new."),
("Sonnet 16",
"But wherefore do not you a mightier way",
"Make war upon this bloody tyrant, Time?",
"And fortify your self in your decay",
"With means more blessed than my barren rhyme?",
"Now stand you on the top of happy hours,",
"And many maiden gardens, yet unset,",
"With virtuous wish would bear you living flowers,",
"Much liker than your painted counterfeit:",
"So should the lines of life that life repair,",
"Which this, Time's pencil, or my pupil pen,",
"Neither in inward worth nor outward fair,",
"Can make you live your self in eyes of men.",
"To give away yourself, keeps yourself still,",
"And you must live, drawn by your own sweet skill."),
("Sonnet 17",
"Who will believe my verse in time to come,",
"If it were filled with your most high deserts?",
"Though yet heaven knows it is but as a tomb",
"Which hides your life, and shows not half your parts.",
"If I could write the beauty of your eyes,",
"And in fresh numbers number all your graces,",
"The age to come would say 'This poet lies;",
"Such heavenly touches ne'er touched earthly faces.'",
"So should my papers, yellowed with their age,",
"Be scorned, like old men of less truth than tongue,",
"And your true rights be termed a poet's rage",
"And stretched metre of an antique song:",
"But were some child of yours alive that time,",
"You should live twice, in it, and in my rhyme."),
("<NAME>",
"Shall I compare thee to a summer's day?",
"Thou art more lovely and more temperate:",
"Rough winds do shake the darling buds of May,",
"And summer's lease hath all too short a date:",
"Sometime too hot the eye of heaven shines,",
"And often is his gold complexion dimmed,",
"And every fair from fair sometime declines,",
"By chance, or nature's changing course untrimmed:",
"But thy eternal summer shall not fade,",
"Nor lose possession of that fair thou ow'st,",
"Nor shall death brag thou wander'st in his shade,",
"When in eternal lines to time thou grow'st,",
"So long as men can breathe, or eyes can see,",
"So long lives this, and this gives life to thee."),
("<NAME>",
"Devouring Time, blunt thou the lion's paws,",
"And make the earth devour her own sweet brood;",
"Pluck the keen teeth from the fierce tiger's jaws,",
"And burn the long-lived phoenix in her blood;",
"Make glad and sorry seasons as thou fleet'st,",
"And do whate'er thou wilt, swift-footed Time,",
"To the wide world and all her fading sweets;",
"But I forbid thee one most heinous crime:",
"O! carve not with thy hours my love's fair brow,",
"Nor draw no lines there with thine antique pen;",
"Him in thy course untainted do allow",
"For beauty's pattern to succeeding men.",
"Yet, do thy worst old Time: despite thy wrong,",
"My love shall in my verse ever live young."),
("Sonnet 20",
"A woman's face with nature's own hand painted,",
"Hast thou, the master mistress of my passion;",
"A woman's gentle heart, but not acquainted",
"With shifting change, as is false women's fashion:",
"An eye more bright than theirs, less false in rolling,",
"Gilding the object whereupon it gazeth;",
"A man in hue all hues in his controlling,",
"Which steals men's eyes and women's souls amazeth.",
"And for a woman wert thou first created;",
"Till Nature, as she wrought thee, fell a-doting,",
"And by addition me of thee defeated,",
"By adding one thing to my purpose nothing.",
"But since she prick'd thee out for women's pleasure,",
"Mine be thy love and thy love's use their treasure."),
("Sonnet 21",
"So is it not with me as with that Muse,",
"Stirred by a painted beauty to his verse,",
"Who heaven itself for ornament doth use",
"And every fair with his fair doth rehearse,",
"Making a couplement of proud compare",
"With sun and moon, with earth and sea's rich gems,",
"With April's first-born flowers, and all things rare,",
"That heaven's air in this huge rondure hems.",
"O! let me, true in love, but truly write,",
"And then believe me, my love is as fair",
"As any mother's child, though not so bright",
"As those gold candles fixed in heaven's air:",
"Let them say more that like of hearsay well;",
"I will not praise that purpose not to sell."),
("Sonnet 22",
"My glass shall not persuade me I am old,",
"So long as youth and thou are of one date;",
"But when in thee time's furrows I behold,",
"Then look I death my days should expiate.",
"For all that beauty that doth cover thee,",
"Is but the seemly raiment of my heart,",
"Which in thy breast doth live, as thine in me:",
"How can I then be elder than thou art?",
"O! therefore, love, be of thyself so wary",
"As I, not for myself, but for thee will;",
"Bearing thy heart, which I will keep so chary",
"As tender nurse her babe from faring ill.",
"Presume not on thy heart when mine is slain,",
"Thou gav'st me thine not to give back again."),
("<NAME>",
"As an unperfect actor on the stage,",
"Who with his fear is put beside his part,",
"Or some fierce thing replete with too much rage,",
| |
<gh_stars>1-10
#!/usr/bin/env python
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
model_optimizer_node.py
This module creates the model_optimizer_node which is responsible for running the Intel
OpenVino model optimizer script for the DeepRacer reinforcement learning models to obtain
the intermediate representation xml files and other optimizer artifacts required to run the
inference with the model.
More details:
(https://docs.openvinotoolkit.org/2021.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
"The optimizer performs static model analysis, and adjusts deep
learning models for optimal execution on end-point target devices."
The node defines:
model_optimizer_service: A service to call the Intel OpenVino model optimizer script
for the specific model with appropriate model and platform
specific parameters set.
"""
import os
import subprocess
import shlex
import re
import rclpy
from rclpy.node import Node
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from deepracer_interfaces_pkg.srv import (ModelOptimizeSrv)
from model_optimizer_pkg import constants
class ModelOptimizerNode(Node):
"""Node responsible for running the Intel OpenVino model optimizer for the DeepRacer models.
"""
def __init__(self):
"""Create a ModelOptimizerNode.
"""
super().__init__("model_optimizer_node")
self.get_logger().info("model_optimizer_node started")
# Service to call the Intel OpenVino model optimizer script
# for the specific model.
self.model_optimizer_service_cb_group = ReentrantCallbackGroup()
self.model_optimizer_service = \
self.create_service(ModelOptimizeSrv,
constants.MODEL_OPTIMIZER_SERVER_SERVICE_NAME,
self.model_optimizer,
callback_group=self.model_optimizer_service_cb_group)
# Heartbeat timer.
self.timer_count = 0
self.timer = self.create_timer(5.0, self.timer_callback)
def timer_callback(self):
"""Heartbeat function to keep the node alive.
"""
self.get_logger().debug(f"Timer heartbeat {self.timer_count}")
self.timer_count += 1
def model_optimizer(self, req, res):
"""Callback for the model_optimizer_server service. Handles calling the Intel OpenVino
model optimizer script with appropriate parameters set for the specific model details
passed in the request data.
Args:
req (ModelOptimizeSrv.Request): Request object with the model details required to
run the optimizer set.
res (ModelOptimizeSrv.Response): Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the
intermediate representaiton xml files are created
for the model.
Returns:
ModelOptimizeSrv.Response: Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the intermediate
representaiton xml files are created for the model.
"""
self.get_logger().info("model_optimizer")
try:
aux_param = {"--fuse": "OFF", "--img-format": req.img_format}
error_code, artifact_path = self.optimize_tf_model(req.model_name,
req.model_metadata_sensors,
req.training_algorithm,
req.width,
req.height,
req.lidar_channels,
aux_param)
res.error = error_code
res.artifact_path = artifact_path
except Exception as ex:
res.error = 1
self.get_logger().error(f"Error while optimizing model: {ex}")
return res
def convert_to_mo_cli(self,
model_name,
model_metadata_sensors,
training_algorithm,
input_width,
input_height,
lidar_channels,
aux_inputs):
"""Helper method that converts the information in model optimizer API into
the appropriate cli commands.
Args:
model_name (str): Model prefix, should be the same in the weight and symbol file.
model_metadata_sensors (list): List of sensor input types(int) for all the sensors
with which the model was trained.
training_algorithm (int): Training algorithm key(int) for the algorithm with which
the model was trained.
input_width (int): Width of the input image to the inference engine.
input_height (int): Height of the input image to the inference engine.
lidar_channels (int): Number of LiDAR values that with which the LiDAR head of
the model was trained.
aux_inputs (dict): Dictionary of auxiliary options for the model optimizer.
Raises:
Exception: Custom exception if the API flags and default values are not
aligned.
Exception: Custom exception if the lidar_channel value is less than 1.
Returns:
dict: Map of parameters to be passed to model optimizer command based on the model.
"""
if len(constants.APIFlags.get_list()) != len(constants.APIDefaults.get_list()):
raise Exception("Inconsistent API flags")
# Set the flags tot he default values.
default_param = {}
for flag, value in zip(constants.APIFlags.get_list(), constants.APIDefaults.get_list()):
default_param[flag] = value
# Set param values to the values to the user entered values in aux_inputs.
for flag, value in aux_inputs.items():
if flag in default_param:
default_param[flag] = value
# Dictionary that will house the cli commands.
common_params = {}
# Convert API information into appropriate cli commands.
for flag, value in default_param.items():
if flag is constants.APIFlags.MODELS_DIR:
common_params[constants.MOKeys.MODEL_PATH] = os.path.join(value, model_name)
# Input shape is in the for [n,h,w,c] to support tensorflow models only
elif flag is constants.APIFlags.IMG_CHANNEL:
common_params[constants.MOKeys.INPUT_SHAPE] = (constants.MOKeys.INPUT_SHAPE_FMT
.format(1,
input_height,
input_width,
value))
elif flag is constants.APIFlags.PRECISION:
common_params[constants.MOKeys.DATA_TYPE] = value
elif flag is constants.APIFlags.FUSE:
if value is not constants.APIDefaults.FUSE:
common_params[constants.MOKeys.DISABLE_FUSE] = ""
common_params[constants.MOKeys.DISABLE_GFUSE] = ""
elif flag is constants.APIFlags.IMG_FORMAT:
if value is constants.APIDefaults.IMG_FORMAT:
common_params[constants.MOKeys.REV_CHANNELS] = ""
elif flag is constants.APIFlags.OUT_DIR:
common_params[constants.MOKeys.OUT_DIR] = value
# Only keep entries with non-empty string values.
elif value:
common_params[flag] = value
# Override the input shape and the input flags to handle multi head inputs in tensorflow
input_shapes = []
input_names = []
training_algorithm_key = constants.TrainingAlgorithms(training_algorithm)
for input_type in model_metadata_sensors:
input_key = constants.SensorInputTypes(input_type)
if input_key == constants.SensorInputTypes.LIDAR \
or input_key == constants.SensorInputTypes.SECTOR_LIDAR:
if lidar_channels < 1:
raise Exception("Lidar channels less than 1")
input_shapes.append(constants.INPUT_SHAPE_FORMAT_MAPPING[input_key]
.format(1, lidar_channels))
else:
# Input shape is in the for [n,h,w,c] to support tensorflow models only
input_shapes.append(
constants.INPUT_SHAPE_FORMAT_MAPPING[input_key]
.format(1,
input_height,
input_width,
constants.INPUT_CHANNEL_SIZE_MAPPING[input_key]))
input_name_format = constants.NETWORK_INPUT_FORMAT_MAPPING[input_key]
input_names.append(
input_name_format.format(
constants.INPUT_HEAD_NAME_MAPPING[training_algorithm_key]))
if len(input_names) > 0 and len(input_shapes) == len(input_names):
common_params[constants.MOKeys.INPUT_SHAPE] = \
constants.MOKeys.INPUT_SHAPE_DELIM.join(input_shapes)
common_params[constants.APIFlags.INPUT] = \
constants.MOKeys.INPUT_SHAPE_DELIM.join(input_names)
common_params[constants.MOKeys.MODEL_NAME] = model_name
return common_params
def run_optimizer(self, mo_path, common_params, platform_parms):
"""Helper method that combines the common commands with the platform specific
commands.
Args:
mo_path (str): Path to intel"s model optimizer for a given platform
(mxnet, caffe, or tensor flow).
common_params (dict): Dictionary containing the cli flags common to all
model optimizer.
platform_parms (dict): Dictionary containing the cli flags for the specific
platform.
Raises:
Exception: Custom exception if the model file is not present.
Returns:
tuple: Tuple whose first value is the error code and second value
is a string to the location of the converted model if any.
"""
if not os.path.isfile(common_params[constants.MOKeys.MODEL_PATH]):
raise Exception(f"Model file {common_params[constants.MOKeys.MODEL_PATH]} not found")
cmd = f"{constants.PYTHON_BIN} {constants.INTEL_PATH}{mo_path}"
# Construct the cli command
for flag, value in dict(common_params, **platform_parms).items():
cmd += f" {flag} {value}"
self.get_logger().info(f"Model optimizer command: {cmd}")
tokenized_cmd = shlex.split(cmd)
retry_count = 0
# Retry running the optimizer if it fails due to any error
# The optimizer command is run for MAX_OPTIMIZER_RETRY_COUNT + 1 times
while retry_count <= constants.MAX_OPTIMIZER_RETRY_COUNT:
self.get_logger().info(f"Optimizing model: {retry_count} of "
f"{constants.MAX_OPTIMIZER_RETRY_COUNT} trials")
proc = subprocess.Popen(tokenized_cmd, stderr=subprocess.PIPE)
_, std_err = proc.communicate()
if not proc.returncode:
return 0, os.path.join(common_params[constants.MOKeys.OUT_DIR],
f"{common_params[constants.MOKeys.MODEL_NAME]}.xml")
std_err = re.sub(r", question #\d+", "", std_err.decode("utf-8"))
self.get_logger().error(f"Model optimizer error info: {std_err}")
retry_count += 1
# Return error code 1, which means that the model optimizer failed even after retries.
return 1, ""
def set_platform_param(self, platform_param, aux_inputs):
"""Helper method that creates a dictionary with the platform specific
Intel model optimizer cli commands.
Args:
platform_param (dict): Dictionary of available platform cli commands.
aux_inputs (dict): Dictionary of auxiliary options for the model optimizer.
Returns:
dict: Dictionary with platform specific params set if present in aux_inputs.
"""
self.get_logger().info(f"aux_inputs: {aux_inputs} ")
set_paltform_params = {}
for flag in platform_param:
if flag in aux_inputs:
set_paltform_params[flag] = aux_inputs[flag]
return set_paltform_params
def optimize_tf_model(self,
model_name,
model_metadata_sensors,
training_algorithm,
input_width,
input_height,
lidar_channels,
aux_inputs={}):
"""Helper function to run Intel"s model optimizer for DeepRacer tensorflow model.
Args:
model_name (str): Model prefix, should be the same in the weight and symbol file.
model_metadata_sensors (list): List of sensor input types(int) for all the sensors
with which the model was trained.
training_algorithm (int): Training algorithm key(int) for the algorithm with which
the model was trained.
input_width (int): Width of the input image to the inference engine.
input_height (int): Height of the input image to the inference engine.
lidar_channels (int): Number of LiDAR values that with which the LiDAR head of
the model was trained.
aux_inputs (dict, optional): Dictionary of auxiliary options for the model optimizer.
Defaults to {}.
Raises:
Exception: Custom exception if the input height or width is less than 1.
Returns:
tuple: Tuple whose first value is the error code and second | |
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import graphviz
from . import labeled_strings as labeled_strings
indent_size = 2
class IR(object):
last_parsed_lineno = 0
last_parse_col_offset = 0
def to_stan(self, acc, indent=0):
pass
def set_map(self, ast):
if hasattr(ast, 'lineno'):
self.lineno = ast.lineno
IR.last_parsed_lineno = ast.lineno
else:
self.lineno = IR.last_parsed_lineno
if hasattr(ast, 'col_offset'):
self.col_offset = ast.col_offset
IR.last_parsed_col_offset = ast.col_offset
else:
self.col_offset = IR.last_parsed_col_offset
return self
def mkString(self, str, indent=0):
return labeled_strings.LabeledString(self, (" "*(indent*indent_size)+str))
def start_block(self, acc, name, indent=0):
acc += self.mkString(name + " {", indent)
acc.newline()
def end_block(self, acc, indent=0):
acc += self.mkString("}", indent)
acc.newline()
def to_stan_stmt_list(self, l, acc, indent=0):
if isinstance(l, list):
if len(l) == 1:
acc.newline()
l[0].to_stan(acc, indent+1)
else:
acc += self.mkString(" {")
acc.newline()
for b in l:
b.to_stan(acc, indent+1)
acc.newline()
acc += self.mkString("}", indent)
else:
acc.newline()
l.to_stan(acc, indent + 1)
def to_stan_arg_list(self, l, acc, indent=0):
acc += self.mkString("(")
first = True
for b in l:
if first:
first = False
else:
acc += self.mkString(", ")
b.to_stan(acc)
acc += self.mkString(")")
class Program(IR):
# Returns an object that can be converted to a strings
# or can be indexed as result[line][col] to get the IR object
# responsible for creating that string
def to_mapped_string(self):
ret = labeled_strings.LabeledRope()
self.to_stan(ret, 0)
return ret.result()
def __init__(self, blocks):
self.blocks = blocks
self.dot = graphviz.Digraph()
self.dot.attr('graph', rankdir='LR')
def viz(self):
def block_helper(name):
if (name in self.blocks):
self.blocks[name].viz(self.dot)
names = [
"data",
"parameters",
"model"]
for n in names:
block_helper(n)
return self.dot
def to_stan(self, acc, indent=0):
def block_helper(name):
if(name in self.blocks):
self.blocks[name].to_stan(acc, indent)
names = [
"functions",
"data",
"transformed_data",
"parameters",
"transformed_parameters",
"model",
"generated_quantities"]
for n in names:
block_helper(n)
# Program Blocks (Section 6)
class ProgramBlock(IR):
def __init__(self, body=[]):
self.body = []
class FunctionsBlock(ProgramBlock):
def __init__(self, fdecls=[]):
self.fdecls = fdecls
def to_stan(self, acc, indent=0):
if self.fdecls:
self.start_block(acc, "functions", indent)
for b in self.fdecls:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class DataBlock(ProgramBlock):
def __init__(self, vdecls=[]):
self.vdecls = vdecls
def viz(self, dot):
for v in self.vdecls:
dot.attr('node', shape='circle',
style='filled', fillcolor='lightgray')
dot.node(v.id)
def to_stan(self, acc, indent=0):
if self.vdecls:
self.start_block(acc, "data", indent)
for b in self.vdecls:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class TransformedDataBlock(ProgramBlock):
def __init__(self, stmts=[]):
self.stmts = stmts
def to_stan(self, acc, indent=0):
if self.stmts:
self.start_block(acc, "transformed data", indent)
for b in self.stmts:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class ParametersBlock(ProgramBlock):
def __init__(self, vdecls=[]):
self.vdecls = vdecls
def viz(self, dot):
for v in self.vdecls:
dot.attr('node', shape='circle', style='filled', fillcolor='white')
dot.node(v.id)
def to_stan(self, acc, indent=0):
if self.vdecls:
self.start_block(acc, "parameters", indent)
for b in self.vdecls:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class TransformedParametersBlock(ProgramBlock):
def __init__(self, stmts=[]):
self.stmts = stmts
def to_stan(self, acc, indent=0):
if self.stmts:
self.start_block(acc, "transformed parameters", indent)
for b in self.stmts:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class ModelBlock(ProgramBlock):
def __init__(self, stmts=[]):
self.stmts = stmts
def viz(self, dot):
for stmt in self.stmts:
stmt.viz(dot)
def to_stan(self, acc, indent=0):
self.start_block(acc, "model", indent)
for b in self.stmts:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
class GeneratedQuantities(ProgramBlock):
def __init__(self, stmts=[]):
self.stmts = stmts
def to_stan(self, acc, indent=0):
if self.stmts:
self.start_block(acc, "generated quantities", indent)
for b in self.stmts:
b.to_stan(acc, indent+1)
acc.newline()
self.end_block(acc, indent)
# stmts (Section 5)
class Statement(IR):
def viz(self, dot):
pass
class FunctionDef(Statement):
def __init__(self, id, args, ty, body):
self.id = id
self.args = args
self.ty = ty
self.body = body
def to_stan(self, acc, indent=0):
self.ty.to_stan(acc, indent)
acc += self.mkString(self.id, indent)
self.to_stan_arg_list(self.args, acc, indent)
if len(self.body) == 1 and isinstance(self.body[0], PassStmt):
acc += self.mkString(";")
else:
acc += self.mkString("{")
acc.newline()
for s in self.body:
s.to_stan(acc, indent + 1)
acc.newline()
acc += self.mkString("}", indent)
class AssignStmt(Statement):
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.op = op
self.rhs = rhs
def to_stan(self, acc, indent=0):
self.lhs.to_stan(acc, indent)
assert not self.op, "TODO: handle this"
acc += self.mkString(" = ")
self.rhs.to_stan(acc)
acc += self.mkString(";")
class AugAssignStmt(Statement):
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def to_stan(self, acc, indent=0):
self.lhs.to_stan(acc, indent)
acc += self.mkString(" += ")
self.rhs.to_stan(acc)
acc += self.mkString(";")
class SamplingStmt(Statement):
def __init__(self, lhs, dist, trunc=None):
self.lhs = lhs
self.dist = dist
self.trunc = trunc
def viz(self, dot):
lv = self.lhs.get_vars()
dist = self.dist.get_vars()
for a in lv:
for b in dist:
dot.edge(b, a)
def to_stan(self, acc, indent=0):
self.lhs.to_stan(acc, indent)
acc += self.mkString(" ~ ")
self.dist.to_stan(acc)
if self.trunc is not None:
acc += self.mkString(" T")
acc += self.mkString("[")
self.trunc.to_stan(acc)
acc += self.mkString("]")
acc += self.mkString(";")
class ForStmt(Statement):
def __init__(self, var, iter, body):
self.var = var
self.iter = iter
self.body = body
def viz(self, dot):
for stmt in self.body:
stmt.viz(dot)
def iter_to_stan(self, acc):
acc += self.mkString(" in ")
if(isinstance(self.iter, Call) and self.iter.id == "range"):
args = self.iter.args
if len(args) == 1:
acc += self.iter.mkString("0:")
upper = Binop(SUB(), args[0], Constant(1)) # Exclude upper bound
upper.to_stan(acc)
elif len(args) == 2:
args[0].to_stan(acc)
acc += self.iter.mkString(":")
upper = Binop(SUB(), args[1], Constant(1)) # Exclude upper bound
upper.to_stan(acc)
elif len(args) == 3:
raise ValueError(
"For loop specified using the three argument version of range. Step values are not currently supported.")
else:
raise ValueError(
"For loop specified using an invalid invocation of range. range does not accept " + len(args) + " arguments")
else:
self.iter.to_stan(acc)
def to_stan(self, acc, indent=0):
acc += self.mkString("for (", indent)
self.var.to_stan(acc)
self.iter_to_stan(acc)
acc += self.mkString(")")
self.to_stan_stmt_list(self.body, acc, indent)
class ConditionalStmt(Statement):
def __init__(self, cond, exp, alt):
self.cond = cond
self.exp = exp
self.alt = alt
def viz(self, dot):
for s in self.exp:
s.viz(dot)
for a in self.alt:
a.viz(dot)
def to_stan(self, acc, indent=0):
acc += self.mkString("if (", indent)
self.cond.to_stan(acc)
acc += self.mkString(")")
self.to_stan_stmt_list(self.exp, acc, indent)
if self.alt:
acc.newline()
acc += self.mkString("else", indent)
self.to_stan_stmt_list(self.alt, acc, indent)
class WhileStmt(Statement):
def __init__(self, cond, body):
self.cond = cond
self.body = body
def viz(self, dot):
for stmt in self.body:
stmt.viz(dot)
def to_stan(self, acc, indent=0):
acc += self.mkString("while (", indent)
self.cond.to_stan(acc)
acc += self.mkString(")")
acc.newline()
self.to_stan_stmt_list(self.body, acc, indent)
class Block(Statement):
def __init__(self, body=[]):
self.body = body
def viz(self, dot):
for stmt in self.body:
stmt.viz(dot)
def to_stan(self, acc, indent=0):
acc += self.mkString(" {", indent)
acc.newline()
for b in self.body:
b.to_stan(acc, indent+1)
acc.newline()
acc += self.mkString("}", indent)
class ExprStmt(Statement):
def __init__(self, expr):
self.body = expr
def to_stan(self, acc, indent=0):
self.body.to_stan(acc, indent)
acc += self.mkString(";")
class BreakStmt(Statement):
def to_stan(self, acc, indent=0):
acc += self.mkString("break;", indent)
class ContinueStmt(Statement):
def to_stan(self, acc, indent=0):
acc += self.mkString("continue;", indent)
class PassStmt(Statement):
pass
class ReturnStmt(Statement):
def __init__(self, val=None):
self.val = val
def to_stan(self, acc, indent=0):
acc += self.mkString("return ", indent)
if self.val:
self.val.to_stan(acc)
acc += self.mkString(";")
# expessions (Section 4)
class Expression(IR):
def to_stan_prec(self, sub, acc, indent=0):
needsParens = sub.precedence > self.precedence
if needsParens:
acc += sub.mkString("(", indent)
sub.to_stan(acc)
acc += sub.mkString(")")
else:
sub.to_stan(acc, indent)
@property
def precedence(self):
return 0
class Atom(Expression):
def get_vars(self):
return []
class Constant(Atom):
def __init__(self, value):
self.value = value
def get_vars(self):
return []
def to_stan(self, acc, indent=0):
acc += self.mkString(str(self.value), indent)
class Variable(Atom):
def __init__(self, id):
self.id = id
def get_vars(self):
return [self.id]
def to_stan(self, acc, indent=0):
acc += self.mkString(self.id, indent)
class VectorExpr(Atom):
pass
class ArrayExpr(Atom):
pass
# def __init__(self, val, elts):
# self.elts = elts
# def get_vars(self):
# return self.val.get_vars()
# def to_stan(self, acc, indent=0):
# acc += self.mkString("{ ", indent)
# first = True
# for elt in self.elts:
# if first:
# first = False
# else:
# acc += self.mkString(", ")
# elt.to_stan(acc)
# acc += self.mkString(" }", indent)
class Subscript(Atom):
def __init__(self, val, slice):
self.val = val
self.slice = slice
def get_vars(self):
return self.val.get_vars()
def to_stan(self, acc, indent=0):
self.to_stan_prec(self.val, acc, indent)
acc += self.mkString("[")
self.slice.to_stan(acc)
acc += self.mkString("]")
class Transpose(Expression):
def __init__(self, val):
self.val = val
def get_vars(self):
return self.val.get_vars()
def to_stan(self, acc, indent=0):
self.to_stan_prec(self.val, acc, indent)
acc += self.mkString("'")
class Slice(Expression):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def to_stan(self, acc, indent=0):
# Do we sometime need parens?
# is this an operator precedence issue?
if | |
half_w
da = 0.5 * abs(a1 - a0)
d = (
self.bend_radius[kk]
* numpy.tan(da)
/ (v0[0] ** 2 + v0[1] ** 2) ** 0.5
)
np = max(
2,
1
+ int(
da / numpy.arccos(1 - self.tolerance / r)
+ 0.5
),
)
angles = numpy.linspace(a0, a1, np)
points = (
r
* numpy.vstack(
(numpy.cos(angles), numpy.sin(angles))
).T
)
arms[ii].extend(points - points[0] + p0 - d * v0)
elif callable(corner):
arms[ii].extend(
corner(p0, v0, p1, v1, pts[jj], self.widths[jj, kk])
)
else:
den = v1[1] * v0[0] - v1[0] * v0[1]
lim = (
1e-12
* (v0[0] ** 2 + v0[1] ** 2)
* (v1[0] ** 2 + v1[1] ** 2)
)
if den ** 2 < lim:
u0 = u1 = 0
p = 0.5 * (p0 + p1)
else:
dx = p1[0] - p0[0]
dy = p1[1] - p0[1]
u0 = (v1[1] * dx - v1[0] * dy) / den
u1 = (v0[1] * dx - v0[0] * dy) / den
p = 0.5 * (p0 + v0 * u0 + p1 + v1 * u1)
if corner == "miter":
arms[ii].append(p)
elif u0 <= 0 and u1 >= 0:
arms[ii].append(p)
elif corner == "bevel":
arms[ii].append(p0)
arms[ii].append(p1)
elif corner == "round":
if v0[1] * v1[0] - v0[0] * v1[1] < 0:
a0 = numpy.arctan2(-v0[0], v0[1])
a1 = numpy.arctan2(-v1[0], v1[1])
else:
a0 = numpy.arctan2(v0[0], -v0[1])
a1 = numpy.arctan2(v1[0], -v1[1])
if abs(a1 - a0) > numpy.pi:
if a0 < a1:
a0 += 2 * numpy.pi
else:
a1 += 2 * numpy.pi
np = max(
4,
1
+ int(
0.5
* abs(a1 - a0)
/ numpy.arccos(1 - self.tolerance / half_w)
+ 0.5
),
)
angles = numpy.linspace(a0, a1, np)
arms[ii].extend(
pts[jj]
+ half_w
* numpy.vstack(
(numpy.cos(angles), numpy.sin(angles))
).T
)
elif corner == "smooth":
angles = [
numpy.arctan2(v0[1], v0[0]),
numpy.arctan2(v1[1], v1[0]),
]
bezpts = numpy.vstack((p0, p1))
cta, ctb = _hobby(bezpts, angles)
f = _func_bezier(
numpy.array(
[bezpts[0], cta[0], ctb[0], bezpts[1]]
)
)
tol = self.tolerance ** 2
uu = [0, 1]
fu = [f(0), f(1)]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
arms[ii].extend(fu)
if end != "flush":
for ii in (0, 1):
if callable(end):
vecs = [
caps[ii][0] - arms[0][-ii],
arms[1][-ii] - caps[ii][1],
]
caps[ii] = end(
caps[ii][0], vecs[0], caps[ii][1], vecs[1]
)
elif end == "smooth":
points = numpy.array(caps[ii])
vecs = [
caps[ii][0] - arms[0][-ii],
arms[1][-ii] - caps[ii][1],
]
angles = [
numpy.arctan2(vecs[0][1], vecs[0][0]),
numpy.arctan2(vecs[1][1], vecs[1][0]),
]
cta, ctb = _hobby(points, angles)
f = _func_bezier(
numpy.array([points[0], cta[0], ctb[0], points[1]])
)
tol = self.tolerance ** 2
uu = [0, 1]
fu = [f(0), f(1)]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
caps[ii] = fu
elif end == "round":
v = pts[0] - pts[1] if ii == 0 else pts[-1] - pts[-2]
r = 0.5 * self.widths[-ii, kk]
np = max(
5,
1
+ int(
_halfpi / numpy.arccos(1 - self.tolerance / r)
+ 0.5
),
)
ang = (2 * ii - 1) * numpy.linspace(
-_halfpi, _halfpi, np
) + numpy.arctan2(v[1], v[0])
caps[ii] = list(
pts[-ii]
+ r
* numpy.vstack((numpy.cos(ang), numpy.sin(ang))).T
)
else: # 'extended'/list
v = pts[0] - pts[1] if ii == 0 else pts[-1] - pts[-2]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
w = (2 * ii - 1) * v[::-1] * _pmone
r = 0.5 * self.widths[-ii, kk]
d = r if end == "extended" else end[ii]
caps[ii] = [
pts[-ii] + r * w,
pts[-ii] + r * w + d * v,
pts[-ii] - r * w + d * v,
pts[-ii] - r * w,
]
poly = caps[0][::-1]
poly.extend(arms[0])
poly.extend(caps[1])
poly.extend(arms[1][::-1])
polygons = [numpy.array(poly)]
if self.max_points > 4 and polygons[0].shape[0] > self.max_points:
ii = 0
while ii < len(polygons):
if len(polygons[ii]) > self.max_points:
pts0 = sorted(polygons[ii][:, 0])
pts1 = sorted(polygons[ii][:, 1])
ncuts = len(pts0) // self.max_points
if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:
# Vertical cuts
cuts = [
pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
polygons[ii], cuts, 0, 1 / self.precision
)
else:
# Horizontal cuts
cuts = [
pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
polygons[ii], cuts, 1, 1 / self.precision
)
polygons.pop(ii)
polygons.extend(
numpy.array(x)
for x in itertools.chain.from_iterable(chopped)
)
else:
ii += 1
key = (self.layers[kk], self.datatypes[kk])
if key in self._polygon_dict:
self._polygon_dict[key].extend(polygons)
else:
self._polygon_dict[key] = polygons
if by_spec:
return libcopy.deepcopy(self._polygon_dict)
else:
return list(itertools.chain.from_iterable(self._polygon_dict.values()))
def to_polygonset(self):
"""
Create a `PolygonSet` representation of this object.
The resulting object will be fractured according to the
parameter `max_points` used when instantiating this object.
Returns
-------
out : `PolygonSet` or None
A `PolygonSet` that contains all boundaries for this path.
If the path is empty, returns None.
"""
if self.points.shape[0] < 2:
return None
polygons = self.get_polygons(True)
pol = PolygonSet([])
for k, v in polygons.items():
pol.layers.extend([k[0]] * len(v))
pol.datatypes.extend([k[1]] * len(v))
pol.polygons.extend(v)
return pol.fracture(self.max_points, self.precision)
def to_gds(self, multiplier):
"""
Convert this object to a series of GDSII elements.
If `FlexPath.gdsii_path` is True, GDSII path elements are
created instead of boundaries. Such paths do not support
variable widths, but their memeory footprint is smaller than
full polygonal boundaries.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
if len(self.points) == 0:
return b""
if self.gdsii_path:
sign = 1 if self.width_transform else -1
else:
return self.to_polygonset().to_gds(multiplier)
data = []
un = self.points[1:] - self.points[:-1]
un = (
un[:, ::-1]
* _mpone
/ ((un[:, 0] ** 2 + un[:, 1] ** 2) ** 0.5).reshape((un.shape[0], 1))
)
for ii in range(self.n):
pathtype = (
0
if callable(self.ends[ii])
else FlexPath._pathtype_dict.get(self.ends[ii], 4)
)
data.append(
struct.pack(
">4Hh2Hh2Hh2Hl",
4,
0x0900,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
6,
0x2102,
pathtype,
8,
0x0F03,
sign * int(round(self.widths[0, ii] * multiplier)),
)
)
if pathtype == 4:
data.append(
struct.pack(
">2Hl2Hl",
8,
0x3003,
int(round(self.ends[ii][0] * multiplier)),
8,
0x3103,
int(round(self.ends[ii][1] * multiplier)),
)
)
if any(self.offsets[:, ii] != 0):
points = numpy.zeros(self.points.shape)
sa = self.points[:-1] + un * self.offsets[:-1, ii : ii + 1]
sb = self.points[1:] + un * self.offsets[1:, ii : ii + 1]
vn = sb - sa
den = vn[1:, 0] * vn[:-1, 1] - vn[1:, 1] * vn[:-1, 0]
idx = numpy.nonzero(
den ** 2
< 1e-12
* (vn[1:, 0] ** 2 + vn[1:, 1] ** 2)
* (vn[:-1, 0] ** 2 + vn[:-1, 1] ** 2)
)[0]
if len(idx) > 0:
den[idx] = 1
u0 = (
vn[1:, 1] * (sb[:-1, 0] - sa[1:, 0])
- vn[1:, 0] * (sb[:-1, 1] - sa[1:, 1])
) / den
points[1:-1] = sb[:-1] + u0.reshape((u0.shape[0], 1)) * vn[:-1]
if len(idx) > 0:
points[idx + 1] = 0.5 * (sa[idx + 1] + sb[idx])
points[0] = sa[0]
points[-1] = sb[-1]
else:
points = self.points
if self.corners[ii] == "circular bend":
r = self.bend_radius[ii]
p0 = points[0]
p1 = points[1]
v0 = p1 - p0
bends = [p0]
for jj in range(1, points.shape[0] - 1):
p2 = points[jj + 1]
v1 = p2 - p1
direction = v0[0] * v1[1] - v0[1] * v1[0]
if direction == 0:
bends.append(p1)
else:
if direction > 0:
a0 = numpy.arctan2(-v0[0], v0[1])
a1 = numpy.arctan2(-v1[0], v1[1])
elif direction < 0:
a0 = numpy.arctan2(v0[0], -v0[1])
a1 | |
<gh_stars>0
import os
from psaw import PushshiftAPI
import datetime as dt
import praw
from praw.models import MoreComments
import time
import numpy as np
import random
import csv
import pandas as pd
import codecs
import re
import sys
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import json
from ibm_watson import ToneAnalyzerV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import ApiException
import flair
from segtok.segmenter import split_single
import matplotlib.pyplot as plt
from matplotlib.dates import (YEARLY, DateFormatter, rrulewrapper, RRuleLocator, drange)
import matplotlib.dates as mdate
import plotly.express as px
import plotly.graph_objects as go
flair_sentiment = flair.models.TextClassifier.load('en-sentiment')
# nltk.download('vader_lexicon')
TEST_START_DATE = int(dt.datetime(2020, 11, 1, 0, 0).timestamp())
TEST_END_DATE = int(dt.datetime(2020, 11, 2, 0, 0).timestamp())
# print(TEST_END_DATE - TEST_START_DATE)
TEST_MAX = 100
MIN_COMMENTS = 500
TEST_SUBREDDIT = 'politics'
SAVE_DIR = "D:/subredditscrape/"
# PRAW OAuth stuff
# CLIENT_ID =
# CLIENT_SECRET =
# PASSWORD =
# USERAGENT =
# USERNAME =
random.seed(hash('setting random seedsy') % 2 ** 32 - 1)
np.random.seed(hash('improves reproducibility') % 2 ** 32 - 1)
REDDIT = praw.Reddit(client_id=CLIENT_ID, client_secret=CLIENT_SECRET,
password=PASSWORD, user_agent=USERAGENT,
username=USERNAME)
API = PushshiftAPI()
# lite version 2500 max calls
# IBM_API =
# IBM_URL =
# ADD IN SPECIFIC IBM API KEY AND INSTANCE URL
# IBM_API = ""
# IBM_URL = ""
authenticator = IAMAuthenticator(IBM_API)
TONE_ANALYZER = ToneAnalyzerV3(
version='2017-09-21',
authenticator=authenticator
)
TONE_ANALYZER.set_service_url(IBM_URL)
def get_historical_submissions(subreddit, limit):
"""returns a list of submission dictionaries from the past 30 months,
querying a random 4 hour chunk in a random day of each month"""
past_30_months = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
12, 11, 10, 9, 8, 7, 6, 5, 4, 3,
2, 1, 12, 11, 10, 9, 8, 7, 6, 5]
all_submissions = []
day = 0
year = 2020
hacky_year_flag = 0
for month in past_30_months:
# derive year
if hacky_year_flag < 9:
year = 2020
if 9 < hacky_year_flag <= 21:
year = 2019
if hacky_year_flag > 22:
year = 2018
hacky_year_flag += 1
# generate random day
if month in [1, 3, 5, 7, 8, 10, 12]:
day = random.randint(1, 31)
if month in [4, 6, 9, 11]:
day = random.randint(1, 30)
if month in [2]:
day = random.randint(1, 28)
# generate random 4 hour time chunk
start_hour = random.randint(0, 19)
end_hour = start_hour + 4
start_time = int(dt.datetime(year, month, day, start_hour, 0).timestamp())
end_time = int(dt.datetime(year, month, day, end_hour, 0).timestamp())
# gets submissions and adds submission dictionary to master list
threads = list(get_submissions(subreddit, start_time, end_time, limit))
for item in threads:
all_submissions.append(item.d_)
print('querying month:', hacky_year_flag)
print('total submissions:', len(all_submissions))
return all_submissions
def get_november_historical_comments(subreddit, limit):
"""given a subreddit and limit gets the first 100 submissions in two consecutive 4 hour chunks """
all_submissions = []
days = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
month = 11
year = 2020
for day in days:
# generate random 4 hour time chunk
start_hour = random.randint(0, 14)
end_hour = start_hour + 4
start_time = int(dt.datetime(year, month, day, start_hour, 0).timestamp())
end_time = int(dt.datetime(year, month, day, end_hour, 0).timestamp())
# gets submissions and adds submission dictionary to master list
threads = list(get_submissions(subreddit, start_time, end_time, limit))
for item in threads:
all_submissions.append(item.d_)
# gets submissions and adds submission dictionary to master list
threads = list(get_submissions(subreddit, start_time + 5, end_time + 5, limit))
for item in threads:
all_submissions.append(item.d_)
print('querying day:', day)
print('total submissions:', len(all_submissions))
return all_submissions
def test_get_historical_submissions():
submission_dictionary = get_historical_submissions(TEST_SUBREDDIT, TEST_MAX)
num_comments = 0
for submission in submission_dictionary:
num_comments += submission['num_comments']
print('total submissions:', len(submission_dictionary))
print("total comments:", num_comments)
def save_historical_submission_comments(list_of_dictionary_submissions, file_name):
"""saves all of the comments from a list of dictionary submissions into a single column csv"""
all_comments_list = []
submission_count = 1
for submission_dict in list_of_dictionary_submissions:
print('saving comments from submission', submission_count, '/', len(list_of_dictionary_submissions))
submission_count += 1
submission = (REDDIT.submission(id=submission_dict['id']))
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
temp_dict = {'body': comment.body, 'comment_id': comment, 'author': comment.author,
'created_utc': comment.created_utc, 'permalink': comment.permalink,
'link_id': comment.link_id, 'score': comment.score}
all_comments_list.append(temp_dict)
print('total comments: ', len(all_comments_list))
comments_df = pd.DataFrame(all_comments_list, columns=['body', 'comment_id', 'author', 'created_utc',
'permalink', 'link_id', 'score'])
print(comments_df)
print('saving comments to file:', file_name, '...')
comments_df.to_csv(file_name)
print('done.')
def test_save_historical_submission_comments():
"""tests the save function with 3 submission comments"""
data = []
threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))
for item in threads:
data.append(item.d_)
save_historical_submission_comments(data, TEST_SUBREDDIT + '_TEST.csv')
def run_save_historical_data():
"""runs the full 30 month submission/comment scrape and saves it to a csv"""
data = get_historical_submissions(TEST_SUBREDDIT, TEST_MAX)
save_historical_submission_comments(data, TEST_SUBREDDIT + '_past_30_months_comments.csv')
def get_all_submissions_in_24_hours(subreddit, start_date, end_date, limit):
"""returns list of all submissions within 6, 4 hour increments in a twenty four hour period over MIN_COMMENTS."""
list_of_all_submissions = []
inc_start_date = start_date
inc_end_date = end_date + 600
for i in range(0, 86400, 14400):
inc_end_date += i
inc_start_date += i
threads = list(get_submissions(subreddit, inc_start_date, inc_end_date, limit))
for item in threads:
if item.d_['num_comments'] > MIN_COMMENTS:
list_of_all_submissions.append(item.d_)
print(len(list_of_all_submissions))
return list_of_all_submissions
def test_get_all_submissions_in_24_hours():
"""tests get_all_submissions_in_24_hours by printing out the length of submissions"""
all_submissions = get_all_submissions_in_24_hours(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX)
print(len(all_submissions))
for x in all_submissions[0:3]:
print(x)
def get_comments(subreddit, start_date, end_date, limit):
"""returns a generator object of comments from a subreddit between a certain period of time"""
api = PushshiftAPI()
return api.search_comments(after=start_date, before=end_date,
subreddit=subreddit, limit=limit
# , filter=['author', 'body', 'created_utc', 'nest_level']
)
def test_get_comments():
"""tests the get_comments function to return 1 comment between TEST_START_DATE and TEST_END_DATE"""
comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))
# prints the dictionary of variables for each comment
for x in comments:
print(x.d_)
def get_number_of_submissions():
"""prints out number of submissions in a subreddit in a given time period"""
start = time.time()
print("counting submissions in", TEST_SUBREDDIT, 'between', TEST_START_DATE, 'and', TEST_END_DATE)
threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))
end = time.time()
print('time elapsed: ', end - start)
print('total submissions:', len(threads))
print(TEST_MAX)
def get_submissions(subreddit, start_date, end_date, limit):
"""returns a generator object of threads from a subreddit between a certain period of time"""
return API.search_submissions(after=start_date, before=end_date,
subreddit=subreddit, limit=limit)
def test_get_submissions():
"""tests get_submissions function to return 1 thread between TEST_START_DATE and TEST_END_DATE"""
threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))
# prints the dictionary of variables for each submission
for x in threads:
print(x.d_)
def get_comments_from_submission(submission_id):
"""returns a submission from a given submission id"""
submission = (REDDIT.submission(id=submission_id))
return submission
def test_get_comments_from_submission():
"""tests get_comments_from_submission by printing out the comments of a submission"""
# gets a test submission
threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))
submission_id = threads[0].d_['id']
# prints link to thread
thread_full_link = threads[0].d_['full_link']
print(thread_full_link)
# prints submission title
thread_title = threads[0].d_['title']
print(thread_title)
submission = get_comments_from_submission(submission_id)
for top_level_comment in submission.comments:
print(top_level_comment.body)
def get_list_of_submission_dictionaries(subreddit, start_date, end_date, limit):
"""returns a list of dictionaries of each submission in a given subreddit between a period of time"""
list_of__submission_dictionaries = []
threads = list(API.search_submissions(after=start_date, before=end_date,
subreddit=subreddit, limit=limit))
# appends thread submission dictionary to a list
for thread_submission in threads:
list_of__submission_dictionaries.append(thread_submission.d_)
return list_of__submission_dictionaries
def test_get_list_of_submission_dictionaries():
submission_list = get_list_of_submission_dictionaries(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX)
print(submission_list[0])
print(len(submission_list))
def filter_list_of_dictionary_submission(submission_list, min_comments):
"""filters the list of submission dictionaries to only include submissions with more than min_comments comments"""
filtered_submission_list = []
# filter submission_list for submissions with > min_comments # comments
for submission_dictionary in submission_list:
if submission_dictionary['num_comments'] >= min_comments:
filtered_submission_list.append(submission_dictionary)
return filtered_submission_list
def test_filter_list_of_dictionary_submission():
"""prints length of the submission list before and after filtering by min_comments number"""
submission_list = get_list_of_submission_dictionaries(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX)
print(len(submission_list))
filtered_list = filter_list_of_dictionary_submission(submission_list, MIN_COMMENTS)
print(len(filtered_list))
def get_comments_from_submission_id(submission_id):
"""returns a list of all comment ids in a submission by submission id"""
flat_comments = []
tree_comments = []
submission = (REDDIT.submission(id=submission_id))
print(submission.num_comments)
print(submission.shortlink)
# sort comments by best and get the flattened list
submission.comment_sort = 'confidence'
# tree comments traversal
submission.comments.replace_more(limit=1)
for comm in submission.comments.list():
tree_comments.append(comm)
flat_comments = list(submission.comments)
return flat_comments, tree_comments
def test_print_comments():
"""prints first 5 comments returned by get_comments_from_submission_id"""
flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')
print(len(flat_comments))
print(len(tree_comments))
print('flat comments')
for c in flat_comments[0:5]:
comment_instance = REDDIT.comment(c)
print(comment_instance.body)
print()
print('tree comments')
for c in tree_comments[0:5]:
comment_instance = REDDIT.comment(c)
print(comment_instance.body)
def get_comments_by_percentage(submission_id, percent_of_comments):
"""returns a list of comment id that is a percentage of the total number of comments in a submission """
comments_list = []
submission = (REDDIT.submission(id=submission_id))
max_comments = int(submission.num_comments * percent_of_comments)
print(submission.num_comments)
print(max_comments)
comment_count = 0
# sort comments by best and get list of id's
submission.comment_sort = 'confidence'
submission.comments.replace_more(limit=40)
for comment_id in submission.comments.list():
if comment_count >= max_comments:
break
comments_list.append(comment_id)
comment_count += 1
return comments_list
def test_get_comments_by_percentage():
"""tests get_comments_by_percentage"""
comment_ids = get_comments_by_percentage('jrjn70', .10)
print(len(comment_ids))
for c in comment_ids[0:5]:
comment_instance = REDDIT.comment(c)
print(comment_instance.body)
def read_csv_to_dataframe(file_name):
"""reads a csv file into a dataframe and drops the redundant index column"""
df = pd.read_csv(file_name)
df = df.drop(['Unnamed: 0'], axis=1)
return df
def test_read_csv_to_dataframe(fname):
| |
<filename>src/textacy/extract/basics.py<gh_stars>1000+
"""
Basics
------
:mod:`textacy.extract.basics`: Extract basic components from a document or sentence
via spaCy, with bells and whistles for filtering the results.
"""
from __future__ import annotations
from functools import partial
from typing import Collection, Iterable, List, Optional, Set, Union
from cytoolz import itertoolz
from spacy.parts_of_speech import DET
from spacy.tokens import Span, Token
from .. import constants, errors, types, utils
def words(
doclike: types.DocLike,
*,
filter_stops: bool = True,
filter_punct: bool = True,
filter_nums: bool = False,
include_pos: Optional[str | Collection[str]] = None,
exclude_pos: Optional[str | Collection[str]] = None,
min_freq: int = 1,
) -> Iterable[Token]:
"""
Extract an ordered sequence of words from a document processed by spaCy,
optionally filtering words by part-of-speech tag and frequency.
Args:
doclike
filter_stops: If True, remove stop words from word list.
filter_punct: If True, remove punctuation from word list.
filter_nums: If True, remove number-like words (e.g. 10, "ten")
from word list.
include_pos: Remove words whose part-of-speech tag IS NOT in the specified tags.
exclude_pos: Remove words whose part-of-speech tag IS in the specified tags.
min_freq: Remove words that occur in ``doclike`` fewer than ``min_freq`` times.
Yields:
Next token from ``doclike`` passing specified filters in order of appearance
in the document.
Raises:
TypeError: if ``include_pos`` or ``exclude_pos`` is not a str, a set of str,
or a falsy value
Note:
Filtering by part-of-speech tag uses the universal POS tag set; for details,
check spaCy's docs: https://spacy.io/api/annotation#pos-tagging
"""
words_: Iterable[Token] = (w for w in doclike if not w.is_space)
if filter_stops is True:
words_ = (w for w in words_ if not w.is_stop)
if filter_punct is True:
words_ = (w for w in words_ if not w.is_punct)
if filter_nums is True:
words_ = (w for w in words_ if not w.like_num)
if include_pos:
include_pos = utils.to_collection(include_pos, str, set)
include_pos = {pos.upper() for pos in include_pos}
words_ = (w for w in words_ if w.pos_ in include_pos)
if exclude_pos:
exclude_pos = utils.to_collection(exclude_pos, str, set)
exclude_pos = {pos.upper() for pos in exclude_pos}
words_ = (w for w in words_ if w.pos_ not in exclude_pos)
if min_freq > 1:
words_ = list(words_)
freqs = itertoolz.frequencies(w.lower_ for w in words_)
words_ = (w for w in words_ if freqs[w.lower_] >= min_freq)
for word in words_:
yield word
def ngrams(
doclike: types.DocLike,
n: int | Collection[int],
*,
filter_stops: bool = True,
filter_punct: bool = True,
filter_nums: bool = False,
include_pos: Optional[str | Collection[str]] = None,
exclude_pos: Optional[str | Collection[str]] = None,
min_freq: int = 1,
) -> Iterable[Span]:
"""
Extract an ordered sequence of n-grams (``n`` consecutive tokens) from a spaCy
``Doc`` or ``Span``, for one or multiple ``n`` values, optionally filtering n-grams
by the types and parts-of-speech of the constituent tokens.
Args:
doclike
n: Number of tokens included per n-gram; for example, ``2`` yields bigrams
and ``3`` yields trigrams. If multiple values are specified, then the
collections of n-grams are concatenated together; for example, ``(2, 3)``
yields bigrams and then trigrams.
filter_stops: If True, remove ngrams that start or end with a stop word.
filter_punct: If True, remove ngrams that contain any punctuation-only tokens.
filter_nums: If True, remove ngrams that contain any numbers
or number-like tokens (e.g. 10, 'ten').
include_pos: Remove ngrams if any constituent tokens' part-of-speech tags
ARE NOT included in this param.
exclude_pos: Remove ngrams if any constituent tokens' part-of-speech tags
ARE included in this param.
min_freq: Remove ngrams that occur in ``doclike`` fewer than ``min_freq`` times
Yields:
Next ngram from ``doclike`` passing all specified filters, in order of appearance
in the document.
Raises:
ValueError: if any ``n`` < 1
TypeError: if ``include_pos`` or ``exclude_pos`` is not a str, a set of str,
or a falsy value
Note:
Filtering by part-of-speech tag uses the universal POS tag set; for details,
check spaCy's docs: https://spacy.io/api/annotation#pos-tagging
"""
ns = utils.to_collection(n, int, tuple)
if any(n_ < 1 for n_ in ns):
raise ValueError("n must be greater than or equal to 1")
if include_pos:
include_pos = {
pos.upper() for pos in utils.to_collection(include_pos, str, set)
}
if exclude_pos:
exclude_pos = {
pos.upper() for pos in utils.to_collection(exclude_pos, str, set)
}
for n_ in ns:
ngrams_ = (doclike[i : i + n_] for i in range(len(doclike) - n_ + 1))
ngrams_ = (ng for ng in ngrams_ if not any(w.is_space for w in ng))
if filter_stops is True:
ngrams_ = (ng for ng in ngrams_ if not ng[0].is_stop and not ng[-1].is_stop)
if filter_punct is True:
ngrams_ = (ng for ng in ngrams_ if not any(w.is_punct for w in ng))
if filter_nums is True:
ngrams_ = (ng for ng in ngrams_ if not any(w.like_num for w in ng))
if include_pos:
ngrams_ = (ng for ng in ngrams_ if all(w.pos_ in include_pos for w in ng))
if exclude_pos:
ngrams_ = (ng for ng in ngrams_ if not any(w.pos_ in exclude_pos for w in ng))
if min_freq > 1:
ngrams_ = list(ngrams_)
freqs = itertoolz.frequencies(ng.text.lower() for ng in ngrams_)
ngrams_ = (ng for ng in ngrams_ if freqs[ng.text.lower()] >= min_freq)
for ngram in ngrams_:
yield ngram
def entities(
doclike: types.DocLike,
*,
include_types: Optional[str | Collection[str]] = None,
exclude_types: Optional[str | Collection[str]] = None,
drop_determiners: bool = True,
min_freq: int = 1,
) -> Iterable[Span]:
"""
Extract an ordered sequence of named entities (PERSON, ORG, LOC, etc.) from
a ``Doc``, optionally filtering by entity types and frequencies.
Args:
doclike
include_types: Remove entities whose type IS NOT
in this param; if "NUMERIC", all numeric entity types ("DATE",
"MONEY", "ORDINAL", etc.) are included
exclude_types: Remove entities whose type IS
in this param; if "NUMERIC", all numeric entity types ("DATE",
"MONEY", "ORDINAL", etc.) are excluded
drop_determiners: Remove leading determiners (e.g. "the")
from entities (e.g. "the United States" => "United States").
.. note:: Entities from which a leading determiner has been removed
are, effectively, *new* entities, and not saved to the ``Doc``
from which they came. This is irritating but unavoidable, since
this function is not meant to have side-effects on document state.
If you're only using the text of the returned spans, this is no
big deal, but watch out if you're counting on determiner-less
entities associated with the doc downstream.
min_freq: Remove entities that occur in ``doclike`` fewer
than ``min_freq`` times
Yields:
Next entity from ``doclike`` passing all specified filters in order of appearance
in the document
Raises:
TypeError: if ``include_types`` or ``exclude_types`` is not a str, a set of
str, or a falsy value
"""
ents = doclike.ents
# HACK: spacy's models have been erroneously tagging whitespace as entities
# https://github.com/explosion/spaCy/commit/1e6725e9b734862e61081a916baf440697b9971e
ents = (ent for ent in ents if not ent.text.isspace())
include_types = _parse_ent_types(include_types, "include")
exclude_types = _parse_ent_types(exclude_types, "exclude")
if include_types:
if isinstance(include_types, str):
ents = (ent for ent in ents if ent.label_ == include_types)
elif isinstance(include_types, (set, frozenset, list, tuple)):
ents = (ent for ent in ents if ent.label_ in include_types)
if exclude_types:
if isinstance(exclude_types, str):
ents = (ent for ent in ents if ent.label_ != exclude_types)
elif isinstance(exclude_types, (set, frozenset, list, tuple)):
ents = (ent for ent in ents if ent.label_ not in exclude_types)
if drop_determiners is True:
ents = (
ent
if ent[0].pos != DET
else Span(
ent.doc, ent.start + 1, ent.end, label=ent.label, vector=ent.vector
)
for ent in ents
)
if min_freq > 1:
ents = list(ents)
freqs = itertoolz.frequencies(ent.text.lower() for ent in ents)
ents = (ent for ent in ents if freqs[ent.text.lower()] >= min_freq)
for ent in ents:
yield ent
def _parse_ent_types(
ent_types: Optional[str | Collection[str]], which: str,
) -> Optional[str | Set[str]]:
if not ent_types:
return None
elif isinstance(ent_types, str):
ent_types = ent_types.upper()
# replace the shorthand numeric case by its corresponding constant
if ent_types == "NUMERIC":
return constants.NUMERIC_ENT_TYPES
else:
return ent_types
elif isinstance(ent_types, (set, frozenset, list, tuple)):
ent_types = {ent_type.upper() for ent_type in ent_types}
# again, replace the shorthand numeric case by its corresponding constant
# and include it in the set in case other types are specified
if any(ent_type == "NUMERIC" for ent_type in ent_types):
return ent_types.union(constants.NUMERIC_ENT_TYPES)
else:
return ent_types
else:
raise | |
bin_centers[0] - fwhm / 2.) and (x < bin_centers[-1] + fwhm / 2.):
i = np.argmin(np.abs(bin_centers - x))
bflx[i] += z
bnrm[i] += 1
bflx /= bnrm
# Pad the binned flux by 5% on each side to get rid of edge effects
pad = int(0.05 * len(bin_centers))
bin_centers = bin_centers[pad:-pad]
bflx = bflx[pad:-pad]
# This is the signal in the bin centered at the line
signal = bflx[len(bflx) // 2]
std = np.nanstd(bflx)
snr = (signal - 1.) / std
# The histogram inset
if plot:
ax[3] = pl.axes([0.195 + 0.01, 0.36 + 0.01, 0.2, 0.15], zorder = 2)
n, bins, _ = ax[3].hist((bflx - 1.) / std, bins = 30, color = 'w')
d = np.digitize(snr, bins)
if d == len(bins):
d -= 1
ax[3].axvline(bins[d] - 0.5 * (bins[1] - bins[0]), color = 'r', ls = '--')
ax[3].set_yscale('log')
ax[3].set_ylim(0.8, 1e3)
ax[3].margins(0.1, None)
ax[3].set_ylabel(r'$\log\ N$', fontsize = 14)
ax[3].set_xlabel(r'SNR', fontsize = 14)
rect = Rectangle((0.125 + 0.01, 0.36 - 0.055 + 0.01), 0.28, 0.235, facecolor='w', edgecolor='k',
transform=fig.transFigure, alpha = 0.85, zorder=1)
fig.patches.append(rect)
# The binned flux
if plot:
ax[2].plot(bin_centers, bflx, 'k.', alpha = 0.1)
ax[2].bar(bin_centers - fwhm / 2., bflx, color = 'None', width = fwhm)
ax[2].axhline(1., color = 'r', ls = '--', alpha = 0.5)
lo, hi = np.min(bflx), np.max(bflx)
rng = hi - lo
ax[2].set_ylim(lo - 0.1 * rng, hi + 0.1 * rng)
# Print some info
if not quiet:
print('Total integration time: %.1f hours' % (total_exp / 3600.))
print('SNR: %.2f' % snr)
# Appearance
if plot:
ax[0].axvline(line, color = 'k', ls = '--')
ax[0].set_xlim(line - plot_sz / 2., line + plot_sz / 2.)
ax[0].set_ylim(-0.025, 1.025)
ax[1].axvline(line, color = 'k', ls = '--')
ax[2].axvline(line, color = 'k', ls = '--')
ax[1].set_xlim(line - plot_sz / 2., line + plot_sz / 2.)
ax[2].set_xlim(line - plot_sz / 2., line + plot_sz / 2.)
ax[1].yaxis.set_major_locator(MaxNLocator(nbins = 4))
ax[2].yaxis.set_major_locator(MaxNLocator(nbins = 4))
ax[2].set_xlabel('Wavelength (Angstroms)', fontsize = 24)
ax[0].set_ylabel('Planet Orbital Phase', fontsize = 24)
ax[1].set_ylabel('Stacked', fontsize = 18)
[tick.label.set_fontsize(8) for tick in ax[1].yaxis.get_major_ticks()]
ax[2].set_ylabel('Binned', fontsize = 18)
[tick.label.set_fontsize(8) for tick in ax[2].yaxis.get_major_ticks()]
# Axes limits for the stacked/binned flux subplots
if stack_lims is not None:
ax[1].set_ylim(*stack_lims[0])
ax[2].set_ylim(*stack_lims[1])
pl.setp(ax[0].get_xticklabels(), visible = False)
pl.setp(ax[1].get_xticklabels(), visible = False)
ax[0].ticklabel_format(useOffset=False)
ax[1].yaxis.set_major_formatter(FormatStrFormatter("%.4f"))
ax[2].yaxis.set_major_formatter(FormatStrFormatter("%.4f"))
ax[0].plot(0, 0, color = 'b', lw = 2, label = 'Planet')
ax[0].plot(0, 0, color = 'g', lw = 2, label = 'Earth')
ax[0].plot(0, 0, color = 'r', lw = 2, label = 'Star')
ax[0].legend(loc = 'upper right', fontsize = 20)
return {'fig': fig, 'ax': ax, 'signal': bflx[len(bflx) // 2],
'bins': bin_centers, 'bflx': bflx, 'snr': snr}
class SearchWrap(object):
'''
'''
def __init__(self, params, **kwargs):
'''
'''
self.kwargs = kwargs
self.kwargs.update({'quiet': True, 'plot': False})
self.params = params
def __call__(self, i):
'''
'''
inclination, period, mean_longitude, stellar_mass = self.params[i]
planet = ProxCenB()
planet.inclination = inclination
planet.mass = 1.27 / np.sin(planet.inclination * np.pi / 180)
planet.period = period
planet.stellar_mass = stellar_mass
planet.mean_longitude = mean_longitude
res = Compute(planet = planet, **self.kwargs)
return (i, res['bflx'])
def PBSSearch(line = Spectrum.OxygenGreen, nodes = 8, ppn = 16, walltime = 100):
'''
Submits a PBS cluster job to do the line search.
:param int walltime: The number of hours to request. Default `100`
:param int nodes: The number of nodes to request. Default `5`
:param int ppn: The number of processors per node to request. Default `12`
'''
# Cache the data
if not os.path.exists(os.path.join(SEARCH_DIR, 'data.npz')):
GetData()
# Submit the cluster job
pbsfile = os.path.join(SEARCH_DIR, 'search.pbs')
name = '%d' % np.floor(line)
str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % (nodes, ppn, ppn, 40 * nodes)
str_w = 'walltime=%d:00:00' % walltime
str_v = 'LINE=%.3f,NODES=%d,SEARCH_DIR=%s' % (line, nodes, SEARCH_DIR)
str_out = os.path.join(SEARCH_DIR, '%s.log' % name)
qsub_args = ['qsub', pbsfile,
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', name,
'-l', str_n,
'-l', str_w]
print("Submitting the job...")
subprocess.call(qsub_args)
def Search(inclination = np.arange(30., 90., 1.),
period = np.arange(11.186 - 3 * 0.002, 11.186 + 3 * 0.002, 0.002 / 4),
mean_longitude = np.arange(110. - 3 * 8., 110. + 3 * 8., 8. / 4),
stellar_mass = [0.120], clobber = False,
period_ticks = [11.182, 11.184, 11.186, 11.188, 11.190],
mean_longitude_ticks = [90., 100., 110., 120., 130.],
inclination_ticks = [35, 45, 55, 65, 75, 85], fmap = map, **kwargs):
'''
'''
line = kwargs.get('line', Spectrum.OxygenGreen)
pref = "%d" % np.floor(line)
search_file = os.path.join(SEARCH_DIR, '%s_search.npz' % pref)
if clobber or not os.path.exists(search_file):
# Get the bin array
wpca_sz = kwargs.get('wpca_sz', 250)
fwhm = kwargs.get('fwhm', 0.05)
bins = np.append(np.arange(line, line - wpca_sz / 2., -fwhm)[::-1], np.arange(line, line + wpca_sz / 2., fwhm)[1:])
pad = int(0.05 * len(bins))
bins = bins[pad:-pad]
# Loop over planet params
print("Running grid search...")
params = list(itertools.product(inclination, period, mean_longitude, stellar_mass))
sw = SearchWrap(params, **kwargs)
res = list(fmap(sw, range(len(params))))
bflx = np.zeros((len(params), len(bins)))
for i, b in res:
bflx[i] = b
bflx = bflx.reshape(len(inclination), len(period), len(mean_longitude), len(stellar_mass), -1)
bflx = np.rollaxis(bflx, 4)
print("Saving...")
np.savez(search_file, bins = bins, bflx = bflx, inclination = inclination, period = period,
mean_longitude = mean_longitude, stellar_mass = stellar_mass, params = params)
else:
print("Loading saved search...")
data = np.load(search_file)
bins = data['bins']
bflx = data['bflx']
inclination = data['inclination']
period = data['period']
mean_longitude = data['mean_longitude']
stellar_mass = data['stellar_mass']
params = data['params']
# Here we compute the distribution of the values of the
# maximum signals at each wavelength (to compute significance later)
bmax = np.max(bflx, axis = (1,2,3,4))
bmu = np.nanmean(bmax)
bstd = np.nanstd(bmax)
# The binned flux at the line as a function of all the grid params
bline = bflx[np.argmin(np.abs(line - bins))]
blinemax = bmax[np.argmin(np.abs(line - bins))]
# The best-fitting planet params
planet = ProxCenB()
ibest, pbest, mbest, sbest = params[np.nanargmax(bline)]
planet.inclination = ibest
planet.mass = 1.27 / np.sin(planet.inclination * np.pi / 180)
planet.period = pbest
planet.mean_longitude = mbest
planet.stellar_mass = sbest
# --- FIGURE: Injection test (~8 sigma). This is our nominal detection threshold.
# Note that we inject 10 angstroms redward of the line we're interested in, so we
# don't stack an injected signal on top of an actual signal. Note also that we
# purposefully inject assuming the same planet params as those of the peak signal,
# so that the number of spectra and the noise properties are the same.
print("Plotting injection test...")
inj_kwargs = dict(kwargs)
inj_kwargs.update({'line': line - 10., 'quiet': True})
res = Compute(inject_contrast = 1.8e-2, inject_planet = planet, planet = planet, **inj_kwargs)
fig1 = res['fig']
inj_sig = (res['signal'] - bmu) / bstd
fig1.suptitle('%d$\sigma$ Injected Signal' % inj_sig, fontsize = 30, y = 0.95)
fig1.savefig('%s_injection_river.pdf' % pref, bbox_inches = 'tight')
#--- FIGURE: Plot the "river plot" for the best solution
print("Plotting river plot...")
res = Compute(planet = planet, quiet = True, **kwargs)
fig5 = res['fig']
fig5.suptitle('Strongest Signal', fontsize = 30, y = 0.95)
fig5.savefig('%s_strongest_river.pdf' % pref, bbox_inches = 'tight')
# --- FIGURE: Triangle plot
print("Plotting triangle plot...")
fig2, ax2 = pl.subplots(3,3)
fig2.subplots_adjust(wspace = 0.08, hspace = 0.1, top = 0.975, bottom = 0.15)
# The marginalized distributions
ax2[0,0].plot(inclination, np.max(bline, axis = (1,2,3)) - 1., color = 'k')
ax2[1,1].plot(period, np.max(bline, axis = (0,2,3)) - 1., color = 'k')
ax2[2,2].plot(mean_longitude, np.max(bline, axis = (0,1,3)) - 1., color = 'k')
# Indicate the 1-sigma bounds
ax2[1,1].axvline(11.186, color = 'k', ls = '--')
ax2[1,1].axvspan(11.186 - 0.002, 11.186 + 0.002, color = 'k', alpha = 0.075)
ax2[2,2].axvline(110., color = 'k', ls = '--')
ax2[2,2].axvspan(110. - 8., 110. + 8., color = 'k', alpha = 0.075)
# The two-parameter heatmaps
ax2[1,0].imshow(np.max(bline, axis = (2,3)).T, aspect = 'auto', extent = (np.min(inclination), np.max(inclination), np.min(period), np.max(period)), cmap = pl.get_cmap('Greys'), origin = 'lower')
ax2[2,0].imshow(np.max(bline, axis = (1,3)).T, aspect = 'auto', extent = (np.min(inclination), np.max(inclination), np.min(mean_longitude), np.max(mean_longitude)), cmap = pl.get_cmap('Greys'), origin = 'lower')
ax2[2,1].imshow(np.max(bline, axis = (0,3)).T, aspect = 'auto', extent | |
######################################
# DO NOT USE ON WALLABY #
# USE THE ONE IN /usr/lib/wallaby.py #
# INSTEAD #
######################################
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_wallaby', [dirname(__file__)])
except ImportError:
import _wallaby
return _wallaby
if fp is not None:
try:
_mod = imp.load_module('_wallaby', fp, pathname, description)
finally:
fp.close()
return _mod
_wallaby = swig_import_helper()
del swig_import_helper
else:
import _wallaby
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class IntSensor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntSensor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntSensor, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_IntSensor
__del__ = lambda self : None;
def value(self): return _wallaby.IntSensor_value(self)
IntSensor_swigregister = _wallaby.IntSensor_swigregister
IntSensor_swigregister(IntSensor)
class ShortSensor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ShortSensor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ShortSensor, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_ShortSensor
__del__ = lambda self : None;
def value(self): return _wallaby.ShortSensor_value(self)
ShortSensor_swigregister = _wallaby.ShortSensor_swigregister
ShortSensor_swigregister(ShortSensor)
class UnsignedShortSensor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, UnsignedShortSensor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, UnsignedShortSensor, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_UnsignedShortSensor
__del__ = lambda self : None;
def value(self): return _wallaby.UnsignedShortSensor_value(self)
UnsignedShortSensor_swigregister = _wallaby.UnsignedShortSensor_swigregister
UnsignedShortSensor_swigregister(UnsignedShortSensor)
class BoolSensor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BoolSensor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BoolSensor, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _wallaby.delete_BoolSensor
__del__ = lambda self : None;
def value(self): return _wallaby.BoolSensor_value(self)
BoolSensor_swigregister = _wallaby.BoolSensor_swigregister
BoolSensor_swigregister(BoolSensor)
class Motor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Motor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Motor, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Motor(*args)
try: self.this.append(this)
except: self.this = this
def clearPositionCounter(self): return _wallaby.Motor_clearPositionCounter(self)
def moveAtVelocity(self, *args): return _wallaby.Motor_moveAtVelocity(self, *args)
def moveToPosition(self, *args): return _wallaby.Motor_moveToPosition(self, *args)
def moveRelativePosition(self, *args): return _wallaby.Motor_moveRelativePosition(self, *args)
def freeze(self): return _wallaby.Motor_freeze(self)
def isMotorDone(self): return _wallaby.Motor_isMotorDone(self)
def blockMotorDone(self): return _wallaby.Motor_blockMotorDone(self)
def forward(self): return _wallaby.Motor_forward(self)
def backward(self): return _wallaby.Motor_backward(self)
def motor(self, *args): return _wallaby.Motor_motor(self, *args)
def motorPower(self, *args): return _wallaby.Motor_motorPower(self, *args)
def off(self): return _wallaby.Motor_off(self)
def port(self): return _wallaby.Motor_port(self)
__swig_destroy__ = _wallaby.delete_Motor
__del__ = lambda self : None;
Motor_swigregister = _wallaby.Motor_swigregister
Motor_swigregister(Motor)
class BackEMF(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BackEMF, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BackEMF, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_BackEMF(*args)
try: self.this.append(this)
except: self.this = this
def value(self): return _wallaby.BackEMF_value(self)
def port(self): return _wallaby.BackEMF_port(self)
__swig_destroy__ = _wallaby.delete_BackEMF
__del__ = lambda self : None;
BackEMF_swigregister = _wallaby.BackEMF_swigregister
BackEMF_swigregister(BackEMF)
class Servo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Servo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Servo, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Servo(*args)
try: self.this.append(this)
except: self.this = this
def setPosition(self, *args): return _wallaby.Servo_setPosition(self, *args)
def position(self): return _wallaby.Servo_position(self)
def disable(self): return _wallaby.Servo_disable(self)
def enable(self): return _wallaby.Servo_enable(self)
def setEnabled(self, *args): return _wallaby.Servo_setEnabled(self, *args)
def isEnabled(self): return _wallaby.Servo_isEnabled(self)
__swig_destroy__ = _wallaby.delete_Servo
__del__ = lambda self : None;
Servo_swigregister = _wallaby.Servo_swigregister
Servo_swigregister(Servo)
class Rgb(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rgb, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rgb, name)
__repr__ = _swig_repr
__swig_setmethods__["r"] = _wallaby.Rgb_r_set
__swig_getmethods__["r"] = _wallaby.Rgb_r_get
if _newclass:r = _swig_property(_wallaby.Rgb_r_get, _wallaby.Rgb_r_set)
__swig_setmethods__["g"] = _wallaby.Rgb_g_set
__swig_getmethods__["g"] = _wallaby.Rgb_g_get
if _newclass:g = _swig_property(_wallaby.Rgb_g_get, _wallaby.Rgb_g_set)
__swig_setmethods__["b"] = _wallaby.Rgb_b_set
__swig_getmethods__["b"] = _wallaby.Rgb_b_get
if _newclass:b = _swig_property(_wallaby.Rgb_b_get, _wallaby.Rgb_b_set)
def __init__(self):
this = _wallaby.new_Rgb()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Rgb
__del__ = lambda self : None;
Rgb_swigregister = _wallaby.Rgb_swigregister
Rgb_swigregister(Rgb)
class Hsv(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Hsv, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Hsv, name)
__repr__ = _swig_repr
__swig_setmethods__["h"] = _wallaby.Hsv_h_set
__swig_getmethods__["h"] = _wallaby.Hsv_h_get
if _newclass:h = _swig_property(_wallaby.Hsv_h_get, _wallaby.Hsv_h_set)
__swig_setmethods__["s"] = _wallaby.Hsv_s_set
__swig_getmethods__["s"] = _wallaby.Hsv_s_get
if _newclass:s = _swig_property(_wallaby.Hsv_s_get, _wallaby.Hsv_s_set)
__swig_setmethods__["v"] = _wallaby.Hsv_v_set
__swig_getmethods__["v"] = _wallaby.Hsv_v_get
if _newclass:v = _swig_property(_wallaby.Hsv_v_get, _wallaby.Hsv_v_set)
def __init__(self):
this = _wallaby.new_Hsv()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Hsv
__del__ = lambda self : None;
Hsv_swigregister = _wallaby.Hsv_swigregister
Hsv_swigregister(Hsv)
class Battery(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Battery, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Battery, name)
__repr__ = _swig_repr
__swig_getmethods__["isCharging"] = lambda x: _wallaby.Battery_isCharging
if _newclass:isCharging = staticmethod(_wallaby.Battery_isCharging)
__swig_getmethods__["powerLevel"] = lambda x: _wallaby.Battery_powerLevel
if _newclass:powerLevel = staticmethod(_wallaby.Battery_powerLevel)
__swig_getmethods__["rawPowerADC"] = lambda x: _wallaby.Battery_rawPowerADC
if _newclass:rawPowerADC = staticmethod(_wallaby.Battery_rawPowerADC)
def __init__(self):
this = _wallaby.new_Battery()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Battery
__del__ = lambda self : None;
Battery_swigregister = _wallaby.Battery_swigregister
Battery_swigregister(Battery)
def Battery_isCharging():
return _wallaby.Battery_isCharging()
Battery_isCharging = _wallaby.Battery_isCharging
def Battery_powerLevel(battery_type=0):
return _wallaby.Battery_powerLevel(battery_type)
Battery_powerLevel = _wallaby.Battery_powerLevel
def Battery_rawPowerADC():
return _wallaby.Battery_rawPowerADC()
Battery_rawPowerADC = _wallaby.Battery_rawPowerADC
class Analog(UnsignedShortSensor):
__swig_setmethods__ = {}
for _s in [UnsignedShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Analog, name, value)
__swig_getmethods__ = {}
for _s in [UnsignedShortSensor]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, Analog, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Analog(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Analog
__del__ = lambda self : None;
def value(self): return _wallaby.Analog_value(self)
def setPullup(self, *args): return _wallaby.Analog_setPullup(self, *args)
def pullup(self): return _wallaby.Analog_pullup(self)
def port(self): return _wallaby.Analog_port(self)
Analog_swigregister = _wallaby.Analog_swigregister
Analog_swigregister(Analog)
class Analog8(Analog):
__swig_setmethods__ = {}
for _s in [Analog]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Analog8, name, value)
__swig_getmethods__ = {}
for _s in [Analog]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, Analog8, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Analog8(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Analog8
__del__ = lambda self : None;
def value(self): return _wallaby.Analog8_value(self)
Analog8_swigregister = _wallaby.Analog8_swigregister
Analog8_swigregister(Analog8)
class Analog10(Analog):
__swig_setmethods__ = {}
for _s in [Analog]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, Analog10, name, value)
__swig_getmethods__ = {}
for _s in [Analog]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, Analog10, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _wallaby.new_Analog10(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Analog10
__del__ = lambda self : None;
def value(self): return _wallaby.Analog10_value(self)
Analog10_swigregister = _wallaby.Analog10_swigregister
Analog10_swigregister(Analog10)
class Acceleration(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Acceleration, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Acceleration, name)
__repr__ = _swig_repr
__swig_getmethods__["x"] = lambda x: _wallaby.Acceleration_x
if _newclass:x = staticmethod(_wallaby.Acceleration_x)
__swig_getmethods__["y"] = lambda x: _wallaby.Acceleration_y
if _newclass:y = staticmethod(_wallaby.Acceleration_y)
__swig_getmethods__["z"] = lambda x: _wallaby.Acceleration_z
if _newclass:z = staticmethod(_wallaby.Acceleration_z)
__swig_getmethods__["calibrate"] = lambda x: _wallaby.Acceleration_calibrate
if _newclass:calibrate = staticmethod(_wallaby.Acceleration_calibrate)
def __init__(self):
this = _wallaby.new_Acceleration()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _wallaby.delete_Acceleration
__del__ = lambda self : None;
Acceleration_swigregister = _wallaby.Acceleration_swigregister
Acceleration_swigregister(Acceleration)
def Acceleration_x():
return _wallaby.Acceleration_x()
Acceleration_x = _wallaby.Acceleration_x
def Acceleration_y():
return _wallaby.Acceleration_y()
Acceleration_y = _wallaby.Acceleration_y
def Acceleration_z():
return _wallaby.Acceleration_z()
Acceleration_z = _wallaby.Acceleration_z
def Acceleration_calibrate():
return _wallaby.Acceleration_calibrate()
Acceleration_calibrate = _wallaby.Acceleration_calibrate
class AccelX(ShortSensor):
__swig_setmethods__ = {}
for _s in [ShortSensor]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AccelX, name, value)
__swig_getmethods__ | |
classification in [
# First we try rerunning every pass we've previously seen succeed.
PassClassification.DUBIOUS,
# If that didn't work, we pull in some new candidate passes.
PassClassification.CANDIDATE,
# If that still didn't work, we now pull out all the stops and
# bring in the desperation passes. These are either passes that
# started as CANDIDATE but we have never seen work, or ones that
# are so expensive that they begin life as AVOID.
PassClassification.AVOID
]:
if self.run_queued_until_change(classification):
return True
assert self.shrink_target is initial
return False
@property
def buffer(self):
return self.shrink_target.buffer
@property
def blocks(self):
return self.shrink_target.blocks
def pass_to_descendant(self):
"""Attempt to replace each example with a descendant example.
This is designed to deal with strategies that call themselves
recursively. For example, suppose we had:
binary_tree = st.deferred(
lambda: st.one_of(
st.integers(), st.tuples(binary_tree, binary_tree)))
This pass guarantees that we can replace any binary tree with one of
its subtrees - each of those will create an interval that the parent
could validly be replaced with, and this pass will try doing that.
This is pretty expensive - it takes O(len(intervals)^2) - so we run it
late in the process when we've got the number of intervals as far down
as possible.
"""
for ex in self.each_non_trivial_example():
st = self.shrink_target
descendants = sorted(set(
st.buffer[d.start:d.end] for d in self.shrink_target.examples
if d.start >= ex.start and d.end <= ex.end and
d.length < ex.length and d.label == ex.label
), key=sort_key)
for d in descendants:
if self.incorporate_new_buffer(
self.buffer[:ex.start] + d + self.buffer[ex.end:]
):
break
def is_shrinking_block(self, i):
"""Checks whether block i has been previously marked as a shrinking
block.
If the shrink target has changed since i was last checked, will
attempt to calculate if an equivalent block in a previous shrink
target was marked as shrinking.
"""
if not self.__shrinking_prefixes:
return False
try:
return self.__shrinking_block_cache[i]
except KeyError:
pass
t = self.shrink_target
return self.__shrinking_block_cache.setdefault(
i,
t.buffer[:t.blocks[i][0]] in self.__shrinking_prefixes
)
def is_payload_block(self, i):
"""A block is payload if it is entirely non-structural: We can tinker
with its value freely and this will not affect the shape of the input
language.
This is mostly a useful concept when we're doing lexicographic
minimimization on multiple blocks at once - by restricting ourself to
payload blocks, we expect the shape of the language to not change
under us (but must still guard against it doing so).
"""
return not (
self.is_shrinking_block(i) or
i in self.shrink_target.forced_blocks
)
def lower_common_block_offset(self):
"""Sometimes we find ourselves in a situation where changes to one part
of the byte stream unlock changes to other parts. Sometimes this is
good, but sometimes this can cause us to exhibit exponential slow
downs!
e.g. suppose we had the following:
m = draw(integers(min_value=0))
n = draw(integers(min_value=0))
assert abs(m - n) > 1
If this fails then we'll end up with a loop where on each iteration we
reduce each of m and n by 2 - m can't go lower because of n, then n
can't go lower because of m.
This will take us O(m) iterations to complete, which is exponential in
the data size, as we gradually zig zag our way towards zero.
This can only happen if we're failing to reduce the size of the byte
stream: The number of iterations that reduce the length of the byte
stream is bounded by that length.
So what we do is this: We keep track of which blocks are changing, and
then if there's some non-zero common offset to them we try and minimize
them all at once by lowering that offset.
This may not work, and it definitely won't get us out of all possible
exponential slow downs (an example of where it doesn't is where the
shape of the blocks changes as a result of this bouncing behaviour),
but it fails fast when it doesn't work and gets us out of a really
nastily slow case when it does.
"""
if len(self.__changed_blocks) <= 1:
return
current = self.shrink_target
blocked = [current.buffer[u:v] for u, v in current.blocks]
changed = [
i for i in sorted(self.__changed_blocks)
if any(blocked[i]) and i not in self.shrink_target.forced_blocks
]
if not changed:
return
ints = [int_from_bytes(blocked[i]) for i in changed]
offset = min(ints)
assert offset > 0
for i in hrange(len(ints)):
ints[i] -= offset
def reoffset(o):
new_blocks = list(blocked)
for i, v in zip(changed, ints):
new_blocks[i] = int_to_bytes(v + o, len(blocked[i]))
return self.incorporate_new_buffer(hbytes().join(new_blocks))
new_offset = Integer.shrink(offset, reoffset, random=self.random)
if new_offset == offset:
self.clear_change_tracking()
def shrink_offset_pairs(self):
"""Lower any two blocks offset from each other the same ammount.
Before this shrink pass, two blocks explicitly offset from each
other would not get minimized properly:
>>> b = st.integers(0, 255)
>>> find(st.tuples(b, b), lambda x: x[0] == x[1] + 1)
(149,148)
This expensive (O(n^2)) pass goes through every pair of non-zero
blocks in the current shrink target and sees if the shrink
target can be improved by applying an offset to both of them.
"""
current = [self.shrink_target.buffer[u:v] for u, v in self.blocks]
def int_from_block(i):
return int_from_bytes(current[i])
def block_len(i):
u, v = self.blocks[i]
return v - u
# Try reoffseting every pair
def reoffset_pair(pair, o):
n = len(self.blocks)
# Number of blocks may have changed, need to validate
valid_pair = [
p for p in pair if p < n and int_from_block(p) > 0 and
self.is_payload_block(p)
]
if len(valid_pair) < 2:
return
m = min([int_from_block(p) for p in valid_pair])
new_blocks = [self.shrink_target.buffer[u:v]
for u, v in self.blocks]
for i in valid_pair:
new_blocks[i] = int_to_bytes(
int_from_block(i) + o - m, block_len(i))
buffer = hbytes().join(new_blocks)
return self.incorporate_new_buffer(buffer)
i = 0
while i < len(self.blocks):
if self.is_payload_block(i) and int_from_block(i) > 0:
j = i + 1
while j < len(self.shrink_target.blocks):
block_val = int_from_block(j)
i_block_val = int_from_block(i)
if self.is_payload_block(j) \
and block_val > 0 and i_block_val > 0:
offset = min(int_from_block(i),
int_from_block(j))
# Save current before shrinking
current = [self.shrink_target.buffer[u:v]
for u, v in self.blocks]
Integer.shrink(
offset, lambda o: reoffset_pair((i, j), o),
random=self.random
)
j += 1
i += 1
def mark_shrinking(self, blocks):
"""Mark each of these blocks as a shrinking block: That is, lowering
its value lexicographically may cause less data to be drawn after."""
t = self.shrink_target
for i in blocks:
if self.__shrinking_block_cache.get(i) is True:
continue
self.__shrinking_block_cache[i] = True
prefix = t.buffer[:t.blocks[i][0]]
self.__shrinking_prefixes.add(prefix)
def clear_change_tracking(self):
self.__changed_blocks.clear()
def mark_changed(self, i):
self.__changed_blocks.add(i)
def update_shrink_target(self, new_target):
assert new_target.frozen
if self.shrink_target is not None:
current = self.shrink_target.buffer
new = new_target.buffer
assert sort_key(new) < sort_key(current)
self.shrinks += 1
if new_target.blocks != self.shrink_target.blocks:
self.clear_change_tracking()
else:
for i, (u, v) in enumerate(self.shrink_target.blocks):
if (
i not in self.__changed_blocks and
current[u:v] != new[u:v]
):
self.mark_changed(i)
else:
self.__changed_blocks = set()
self.shrink_target = new_target
self.__shrinking_block_cache = {}
def try_shrinking_blocks(self, blocks, b):
"""Attempts to replace each block in the blocks list with b. Returns
True if it succeeded (which may include some additional modifications
to shrink_target).
May call mark_shrinking with b if this causes a reduction in size.
In current usage it is expected that each of the blocks currently have
the same value, although this is not essential. Note that b must be
< the block at min(blocks) or this is not a valid shrink.
This method will attempt to do some small amount of work to delete data
that occurs after the end of the blocks. This is useful for cases where
there is some size dependency on the value of a block.
"""
initial_attempt = bytearray(self.shrink_target.buffer)
for i, block in enumerate(blocks):
if block >= len(self.blocks):
blocks = blocks[:i]
break
u, v = self.blocks[block]
n = min(v - u, len(b))
initial_attempt[v - n:v] = b[-n:]
start = self.shrink_target.blocks[blocks[0]][0]
end = self.shrink_target.blocks[blocks[-1]][1]
initial_data = self.cached_test_function(initial_attempt)
if initial_data.status == Status.INTERESTING:
return initial_data is self.shrink_target
# If this produced something completely invalid we ditch it
# here rather than trying to persevere.
if initial_data.status < Status.VALID:
return False
# We've shrunk inside | |
fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_update_with_http_info(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str request_id: The ID of the Update Request (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'request_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_parameter_context_update`")
# verify the required parameter 'request_id' is set
if ('request_id' not in params) or (params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `get_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'request_id' in params:
path_params['requestId'] = params['request_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests/{requestId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_validation_request(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_validation_request_with_http_info(context_id, id, **kwargs)
else:
(data) = self.get_validation_request_with_http_info(context_id, id, **kwargs)
return data
def get_validation_request_with_http_info(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request_with_http_info(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_validation_request`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_parameter_context_update(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
else:
(data) = self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
return data
def submit_parameter_context_update_with_http_info(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update_with_http_info(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `submit_parameter_context_update`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.